/**
@file
@brief Support for legacy Linux kernel versions
@details Copyright (c) 2018-2021 Acronis International GmbH
@author Mikhail Krivtsov (mikhail.krivtsov@acronis.com)
@since $Id: $
*/
#include "compat.h"
#include "debug.h"
#include <linux/magic.h>
/*
* 'get_task_exe_file()' appeared in 'stable/v4.8',
* it was backported in RedHat/CentOS 7 (kernel v3.10)
*/
#ifndef HAVE_GET_TASK_EXE
#ifndef KERNEL_MOCK
#include <linux/sched.h> // task_lock()
#else
#include <mock/mock_sched.h>
#endif
// 'get_mm_exe_file' is not 'exported'
static struct file *get_mm_exe_file_impl(struct mm_struct *mm)
{
struct file *exe_file;
/*
* We need mmap_sem to protect against races with removal of
* VM_EXECUTABLE vmas
*/
down_read(&mm->mmap_sem);
exe_file = mm->exe_file;
if (exe_file)
get_file(exe_file);
up_read(&mm->mmap_sem);
return exe_file;
}
struct file *get_task_exe_file_impl(struct task_struct *task)
{
struct file *exe_file = NULL;
struct mm_struct *mm;
mm = get_task_mm(task);
if (mm) {
exe_file = get_mm_exe_file_impl(mm);
mmput(mm);
}
return exe_file;
}
#elif defined(GET_TASK_EXE_NOT_EXPORTED)
#ifndef HAVE_GET_FILE_RCU_DOUBLE_POINTER
//copy from 5.15
static struct file *get_mm_exe_file_impl(struct mm_struct *mm)
{
struct file *exe_file;
rcu_read_lock();
exe_file = rcu_dereference(mm->exe_file);
if (exe_file && !get_file_rcu(exe_file))
exe_file = NULL;
rcu_read_unlock();
return exe_file;
}
#else
//copy from 6.7
static struct file *get_mm_exe_file_impl(struct mm_struct *mm)
{
struct file *exe_file;
rcu_read_lock();
exe_file = get_file_rcu(&mm->exe_file);
rcu_read_unlock();
return exe_file;
}
#endif
struct file *get_task_exe_file_impl(struct task_struct *task)
{
struct file *exe_file = NULL;
struct mm_struct *mm;
task_lock(task);
mm = task->mm;
if (mm) {
if (!(task->flags & PF_KTHREAD))
exe_file = get_mm_exe_file_impl(mm);
}
task_unlock(task);
return exe_file;
}
#endif
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
// {
#include <linux/kallsyms.h> // kallsyms_on_each_symbol()
typedef struct {
const char *name;
unsigned long kallsyms_address;
} kallsyms_lookup_name_ctx_t;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)
#include <linux/kprobes.h>
static unsigned long kprobe_find_symbol(const char *symbol_name)
{
void *symbol_ptr = 0;
char symbol_name_buffer[NAME_MAX] = {0};
struct kprobe kp = {
.symbol_name = symbol_name,
.addr = 0,
};
IPRINTF("searching for [%s] by kprobe", symbol_name);
register_kprobe(&kp);
symbol_ptr = (void *)kp.addr;
unregister_kprobe(&kp);
IPRINTF("%s: [0x%p] [%pS]", symbol_name, symbol_ptr, symbol_ptr);
snprintf(symbol_name_buffer, NAME_MAX, "%ps", symbol_ptr);
if (strncmp(symbol_name_buffer, symbol_name, strlen(symbol_name)) != 0)
{
EPRINTF("%s address [%p] is wrong", symbol_name, (void *)symbol_ptr);
return 0;
}
return (unsigned long)symbol_ptr;
}
#else
static int kallsyms_lookup_name_on_each_symbol_cb(void *data,
const char *namebuf, struct module *module,
unsigned long kallsyms_address)
{
kallsyms_lookup_name_ctx_t *ctx = data;
(void) module;
if (0 == strcmp(namebuf, ctx->name)) {
ctx->kallsyms_address = kallsyms_address;
return 1;
}
return 0;
}
#endif
unsigned long compat_kallsyms_lookup_name(const char *name)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)
return kprobe_find_symbol(name);
#else
kallsyms_lookup_name_ctx_t ctx = {
.name = name,
.kallsyms_address = 0,
};
if (kallsyms_on_each_symbol(kallsyms_lookup_name_on_each_symbol_cb, &ctx)) {
DPRINTF("name='%s' address=0x%lx", name, ctx.kallsyms_address);
return ctx.kallsyms_address;
} else {
DPRINTF("name='%s' not found", name);
return 0;
}
return 0;
#endif
}
// }
#endif
// tracepoint registration interface was modified in 'stable/v3.15'
#ifdef HAVE_TRACEPOINT_PROBE_REGISTER_STRUCT
// {
#include <linux/tracepoint.h> // for_each_kernel_tracepoint(),
// tracepoint_probe_register(),
// tracepoint_probe_unregister()
typedef struct {
const char *name;
struct tracepoint *tracepoint;
} lookup_tracepoint_ctx_t;
static void lookup_tracepoint_on_each_tracepoint_cb(struct tracepoint *tp, void *data)
{
lookup_tracepoint_ctx_t *ctx = data;
//DPRINTF("%p %s", tp, tp->name);
if (0 == strcmp(tp->name, ctx->name)) {
ctx->tracepoint = tp;
}
}
static struct tracepoint *lookup_tracepoint(const char *name)
{
lookup_tracepoint_ctx_t ctx = {
.name = name,
.tracepoint = 0,
};
for_each_kernel_tracepoint(lookup_tracepoint_on_each_tracepoint_cb, &ctx);
return ctx.tracepoint;
}
int tracepoint_probe_register_compat(const char *name, void *probe, void *data)
{
struct tracepoint *tp = lookup_tracepoint(name);
return (!tp) ? -ENOSYS : tracepoint_probe_register(tp, probe, data);
}
int tracepoint_probe_unregister_compat(const char *name, void *probe, void *data)
{
struct tracepoint *tp = lookup_tracepoint(name);
return (!tp) ? -ENOSYS : tracepoint_probe_unregister(tp, probe, data);
}
// }
#endif
// rbtree postorder iteration functions appeared in 'stable/v3.12'
#if !defined(HAVE_RB_FIRST_POSTORDER) || !defined(HAVE_RB_NEXT_POSTORDER)
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
{
for (;;) {
if (node->rb_left)
node = node->rb_left;
else if (node->rb_right)
node = node->rb_right;
else
return (struct rb_node *)node;
}
}
#endif
#ifndef HAVE_RB_NEXT_POSTORDER
struct rb_node *rb_next_postorder(const struct rb_node *node)
{
const struct rb_node *parent;
if (!node)
return NULL;
parent = rb_parent(node);
/* If we're sitting on node, we've already seen our children */
if (parent && node == parent->rb_left && parent->rb_right) {
/* If we are the parent's left node, go to the parent's right
* node then all the way down to the left */
return rb_left_deepest_node(parent->rb_right);
} else
/* Otherwise we are the parent's right node, and the parent
* should be next */
return (struct rb_node *)parent;
}
#endif
#ifndef HAVE_RB_FIRST_POSTORDER
struct rb_node *rb_first_postorder(const struct rb_root *root)
{
if (!root->rb_node)
return NULL;
return rb_left_deepest_node(root->rb_node);
}
#endif
d_absolute_path_compat_fn g_d_absolute_path_compat = NULL;
void compat_init(void)
{
g_d_absolute_path_compat = (d_absolute_path_compat_fn) compat_kallsyms_lookup_name("d_absolute_path");
}
#ifdef STATX_BTIME
// Filter by magics that can support btime and can get it quickly
// Local fses that support btime
#ifndef BTRFS_SUPER_MAGIC
#define BTRFS_SUPER_MAGIC 0x9123683E
#endif
#ifndef EXFAT_SUPER_MAGIC
#define EXFAT_SUPER_MAGIC 0x2011BAB0
#endif
#ifndef EXT4_SUPER_MAGIC
#define EXT4_SUPER_MAGIC 0xEF53
#endif
#ifndef F2FS_SUPER_MAGIC
#define F2FS_SUPER_MAGIC 0xF2F52010
#endif
#ifndef MSDOS_SUPER_MAGIC
#define MSDOS_SUPER_MAGIC 0x4d44
#endif
#ifndef HFSPLUS_VOLHEAD_SIG
#define HFSPLUS_VOLHEAD_SIG 0x482B
#endif
#ifndef XFS_SUPER_MAGIC
#define XFS_SUPER_MAGIC 0x58465342
#endif
// Network fses that support btime and can get it quickly by means of AT_STATX_DONT_SYNC
#ifndef NFS_SUPER_MAGIC
#define NFS_SUPER_MAGIC 0x6969
#endif
#ifndef FUSE_SUPER_MAGIC
#define FUSE_SUPER_MAGIC 0x65735546
#endif
#ifndef CIFS_SUPER_MAGIC
#define CIFS_SUPER_MAGIC 0xFF534D42
#endif
static inline bool is_magic_allowed_for_fast_getattr(const struct inode* ino)
{
unsigned long magic = ino->i_sb->s_magic;
if (magic == BTRFS_SUPER_MAGIC
|| magic == EXFAT_SUPER_MAGIC
|| magic == EXT4_SUPER_MAGIC
|| magic == F2FS_SUPER_MAGIC
|| magic == MSDOS_SUPER_MAGIC
|| magic == HFSPLUS_VOLHEAD_SIG
|| magic == XFS_SUPER_MAGIC)
return true;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
if (magic == NFS_SUPER_MAGIC)
return true;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
if (magic == FUSE_SUPER_MAGIC)
return true;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)
if (magic == CIFS_SUPER_MAGIC)
return true;
#endif
return false;
}
int vfs_getattr_basic_fast(const struct path *path, struct kstat *stat)
{
if (!is_magic_allowed_for_fast_getattr(path->dentry->d_inode))
return -EINVAL;
return vfs_getattr_nosec(path, stat, STATX_TYPE
| STATX_MODE
| STATX_UID
| STATX_GID
| STATX_ATIME
| STATX_MTIME
| STATX_CTIME
| STATX_SIZE
| STATX_BTIME, AT_STATX_DONT_SYNC);
}
#endif
|