/* snumbd26.c
Copyright (C) Acronis, 2004
Written by Vladimir Simonov
$Id: snumbd26.c 1262030 2018-01-11 12:33:11Z marina $
*/
#ifdef HAVE_LINUX_CONFIG
#include <linux/config.h>
#elif defined(HAVE_LINUX_AUTOCONF)
#include <linux/autoconf.h>
#elif defined(HAVE_GENERATED_AUTOCONF)
#include <generated/autoconf.h>
#else
#warning "neither linux/config.h nor linux/autoconf.h or generated/autoconf.h found"
#endif
#ifdef HAVE_SCHED_SIGNAL_H
#include <linux/sched/signal.h>
#endif
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
#include <asm/system.h>
#endif
#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/interrupt.h> /* for in_interrupt */
#include <linux/poll.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#ifdef HAVE_IOCTL32_CONVERSIONS
#include <linux/ioctl32.h>
#endif
#include "snumbd.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
#define sn_request_queue request_queue_t
#else
#define sn_request_queue struct request_queue
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
#define sn_bio_endio(x) bio_endio(x, sn_bio_bi_size(x), 0)
#define sn_bio_io_error(x) bio_io_error(x, sn_bio_bi_size(x))
#else
#ifdef HAVE_BIO_ENDIO_2ARGS
#define sn_bio_endio(x) bio_endio(x, 0)
#else
#define sn_bio_endio(x) bio_endio(x)
#endif /* HAVE_BIO_ENDIO_2ARGS */
#define sn_bio_io_error(x) bio_io_error(x)
#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
#define USE_KERNEL_THREAD
#endif
#ifdef HAVE_KMAP_ATOMIC_2ARGS
#define sn_kmap_atomic(a) kmap_atomic(a, KM_USER0)
#define sn_kunmap_atomic(a) kunmap_atomic(a, KM_USER0)
#else /* 1 argument */
#define sn_kmap_atomic(a) kmap_atomic(a)
#define sn_kunmap_atomic(a) kunmap_atomic(a)
#endif
#ifdef HAVE_ASM_HAVE_SET_MB
#define sn_set_mb set_mb
#else
#define sn_set_mb smp_store_mb
#endif
#ifndef HAVE_REQ_WRITE
#define REQ_WRITE (1 << BIO_RW)
#endif
#ifndef HAVE_PAGE_CACHE_RELEASE
#define page_cache_release(page) put_page(page)
#endif
#define MAX_MINOR 255
#ifndef BIO_MAX_PAGES
#define BIO_MAX_PAGES 256
#endif
#define SN_MMAP_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
#define DEBUG 0
#define DEBUG_API (1 << 1)
#define DEBUG_ALLOC (1 << 2)
#define DEBUG_BIO (1 << 3)
#define DEBUG_INTERNALS (1 << 4)
#define DEBUG_LEVEL (DEBUG_API)
#if DEBUG
#define inline
#define sa_debug(level, fmt, arg...) \
do { \
static const char *func = __FUNCTION__; \
if (((level) & DEBUG_LEVEL) && snumbd_printk_rate_limit())\
printk(KERN_DEBUG "%s(%s,%d): " fmt, func, \
current->comm, current->pid, ##arg); \
} while (0)
#else
#define sa_debug(fmt,arg...) do { } while (0)
#endif
#define sa_kdebug(fmt, arg...) \
do { \
static const char *func= __FUNCTION__; \
if (snumbd_printk_rate_limit()) \
printk(KERN_DEBUG "%s(%s,%d): " fmt, func,\
current->comm, current->pid, ##arg); \
} while (0)
#define sa_info(fmt, arg...) \
do { \
static const char *func = __FUNCTION__; \
if (snumbd_printk_rate_limit()) \
printk(KERN_INFO "%s(%s,%d): " fmt, func,\
current->comm, current->pid, ##arg); \
} while (0)
#define sa_warn(fmt, arg...) \
do { \
static const char *func = __FUNCTION__; \
if (snumbd_printk_rate_limit()) \
printk(KERN_WARNING "%s(%s,%d): " fmt, func,\
current->comm, current->pid, ##arg); \
} while (0)
#define sa_error(fmt, arg...) \
do { \
static const char *func = __FUNCTION__; \
if (snumbd_printk_rate_limit()) \
printk(KERN_ERR "%s(%s,%d): " fmt, func,\
current->comm, current->pid, ##arg); \
} while (0)
#define sa_BUG(fmt, arg...) \
do { \
static const char *func = __FUNCTION__; \
printk(KERN_CRIT "%s(%s,%d): " fmt, func, \
current->comm, current->pid, ##arg); \
BUG(); \
} while (0)
#define sn_is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
#if defined(__x86_64) && defined (HAVE_IOCTL32_H) && defined(CONFIG_COMPAT) && !defined(HAVE_COMPAT_IOCTL)
#define HAVE_IOCTL32_CONVERSION
#endif
#ifdef HAVE_FOPS_RELEASE_VOID
#define FILE_OPS_RELEASE_RETURN_VALUE void
#define FILE_OPS_RELEASE_RETURN_STATUS
#else
#define FILE_OPS_RELEASE_RETURN_VALUE int
#define FILE_OPS_RELEASE_RETURN_STATUS 0
#endif
static int snumbd_init_ok;
static int snumbdctl_major;
static int snumbd_major;
static LIST_HEAD(sessions_list);
static LIST_HEAD(notinited_list);
static int sessions_count;
/* sessions_list & noninit_sessions_list protection */
#ifdef HAVE_SPIN_LOCK_UNLOCKED
static spinlock_t sessions_lock = SPIN_LOCK_UNLOCKED;
#else
static DEFINE_SPINLOCK(sessions_lock);
#endif
/* protects 'session->s_disk->private_data' */
#ifdef HAVE_SPIN_LOCK_UNLOCKED
static spinlock_t disk_lock = SPIN_LOCK_UNLOCKED;
#else
static DEFINE_SPINLOCK(disk_lock);
#endif
#ifdef __GFP_HIGHIO
#define GFP_SNAPHIGH (__GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
#else
#define GFP_SNAPHIGH (__GFP_IO | __GFP_FS | __GFP_HIGHMEM)
#endif
#define TIMER_INTERVAL (900*HZ)
#ifndef HAVE_FMODE_T
typedef unsigned int fmode_t;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
#define HAVE_BIO_BVEC_ITER 1
#endif
#ifdef HAVE_BIO_BVEC_ITER
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bv), (b), (i))
typedef struct bvec_iter bvec_iterator_t;
#else
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bvp), (b), (i))
typedef int bvec_iterator_t;
#endif
#ifdef HAVE_VM_FAULT_2ARGS
#define snumbdctl_vm_fault(a, b) snumbdctl_vm_fault(a, b)
#else
#define snumbdctl_vm_fault(a, b) snumbdctl_vm_fault(b)
#endif
#ifdef HAVE_VMFAULT_T
#define VMFAULT_RETURN_VALUE vm_fault_t
#else
#define VMFAULT_RETURN_VALUE int
#endif
static inline sector_t sn_bio_bi_sector(struct bio *bio)
{
#ifdef HAVE_BVEC_ITER
return bio->bi_iter.bi_sector;
#else
return bio->bi_sector;
#endif
}
static inline unsigned int sn_bio_bi_size(struct bio *bio)
{
#ifdef HAVE_BVEC_ITER
return bio->bi_iter.bi_size;
#else
return bio->bi_size;
#endif
}
static dev_t sn_bio_dev(struct bio *bio)
{
#ifndef HAVE_BIO_SET_DEV
return bio->bi_bdev->bd_dev;
#else
return bio_dev(bio);
#endif
}
struct session_struct {
struct list_head s_list; /* under sessions_lock */
dev_t s_kdev;
unsigned long long s_scount; /* sectors count */
unsigned int s_sector_size; /* sector size */
volatile unsigned int s_state;
atomic_t s_users;
int s_ro; /* read-only mode */
unsigned int s_hpid;
struct gendisk * s_disk;
struct bio * s_bio;
spinlock_t s_misc_lock; /* protects from here to */
/* s_vma */
unsigned int s_ioctlcnt; /* state data */
unsigned int s_ioctlcnt_prev;
struct vm_area_struct * s_vma;
struct page * s_mpages[BIO_MAX_PAGES];/* mmapped pages */
struct semaphore s_sem; /* session_struct access serialization */
unsigned int s_gpages; /* got pages */
unsigned int s_ppages; /* put pages */
unsigned int s_reads; /* total reads count */
pid_t s_apgrp[2]; /* allowed pgrps */
wait_queue_head_t req_put_wq; /* kernel waits for space to put request */
wait_queue_head_t req_get_wq; /* userspace waits for request to handle */
#define MT_REQ_MAX 4
struct bio * bio[MT_REQ_MAX];
pid_t tag[MT_REQ_MAX];
unsigned bio_count;
unsigned tag_count;
};
static void unregister_device(struct session_struct * s);
inline static int is_session_usable(struct session_struct *s)
{
return SNUM_INITED == s->s_state;
}
static void shutdown_session(struct session_struct *s)
{
unsigned i;
down(&s->s_sem);
sn_set_mb(s->s_state, SNUM_SESSION_ERR);
/* fail all requests*/
for (i = 0; i < MT_REQ_MAX; ++i) {
struct bio *bio = s->bio[i];
if (bio) {
s->bio[i] = NULL;
s->tag[i] = 0;
sn_bio_io_error(bio);
}
}
s->bio_count = 0;
s->tag_count = 0;
wake_up_all(&s->req_put_wq);
wake_up_all(&s->req_get_wq);
up(&s->s_sem);
}
static void get_session(struct session_struct *s)
{
atomic_inc(&s->s_users);
}
static void put_session(struct session_struct *s)
{
if (atomic_dec_and_test(&s->s_users)) {
unregister_device(s);
spin_lock(&sessions_lock);
list_del_init(&s->s_list);
s->s_kdev = 0;
sessions_count--;
spin_unlock(&sessions_lock);
kfree(s);
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
#include <linux/nsproxy.h>
static pid_t sn_current_pgrp(void)
{
if (!current->nsproxy)
return 1;
#ifdef HAVE_PID_NS_CHILDREN
return task_pgrp_nr_ns(current, current->nsproxy->pid_ns_for_children);
#else
return task_pgrp_nr_ns(current, current->nsproxy->pid_ns);
#endif
}
#else
static pid_t sn_current_pgrp(void)
{
return process_group(current);
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
static void sn_set_queue_block_size(struct request_queue *q, unsigned short size)
{
blk_queue_logical_block_size(q, size);
blk_queue_physical_block_size(q, size);
blk_queue_io_min(q, size);
}
#else
static void sn_set_queue_block_size(struct request_queue *q, unsigned short size)
{
blk_queue_hardsect_size(q, size);
}
#endif
static int sn_op_is_write(struct bio *bio)
{
#ifndef HAVE_OP_IS_WRITE
return bio->bi_rw & REQ_WRITE;
#else
return op_is_write(bio_op(bio));
#endif
}
#ifdef HAVE_OP_IS_WRITE
#define BIO_RW_RETURN_VALUE unsigned int
#else
#define BIO_RW_RETURN_VALUE unsigned long
#endif
#if (DEBUG != 0)
static BIO_RW_RETURN_VALUE get_bio_req_flags(struct bio *bio)
{
#ifdef HAVE_BIO_OPF
return bio->bi_opf;
#else
return bio->bi_rw;
#endif
}
#endif
static int snumbd_printk_rate_limit(void)
{
static unsigned long count, last;
if (jiffies - last > HZ)
count = 0;
if (count >= 10)
return 0;
last = jiffies;
count++;
return 1;
}
static inline int get_free_minor(void)
{
dev_t dev;
int minor;
struct list_head *tmp;
minor = 0;
repeate:
minor++;
dev = MKDEV(snumbd_major, minor);
list_for_each(tmp, &sessions_list) {
struct session_struct *s;
s = list_entry(tmp, struct session_struct, s_list);
if (s->s_kdev == dev)
goto repeate;
}
return minor;
}
static int snumbd_ioctl_blk(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
#if DEBUG
struct session_struct *s = bdev->bd_disk->private_data;
if (s)
sa_debug(DEBUG_API, "s=%p dev=%x\n", s, s->s_kdev);
#endif
return -ENOTTY;
}
static int snumbd_is_parent_task(unsigned int pid)
{
#ifdef CONFIG_PREEMPT_RCU
return 0;
#else
int ret = 0;
struct task_struct* task;
rcu_read_lock();
task = current;
while (task && (task != &init_task)) {
if (task->pid == pid) {
ret = 1;
break;
}
task = rcu_dereference(task->parent);
}
rcu_read_unlock();
return ret;
#endif
}
static int snumbd_open_blk(struct block_device *bdev, fmode_t mode)
{
int users;
pid_t pgrp;
struct session_struct *s;
loff_t old_size;
loff_t new_size;
spin_lock(&disk_lock);
s = bdev->bd_disk->private_data;
if (!s) {
spin_unlock(&disk_lock);
return -ENODEV;
}
users = atomic_read(&s->s_users);
spin_unlock(&disk_lock);
if (users == 0)
{
sa_info("dying session detected...(%p, %x)\n", s, s->s_kdev);
return -ENODEV;
}
pgrp = sn_current_pgrp();
/*
Allow to open device only programs in device creator's group.
This eliminates problems with device access(reference) from
udev, multipathd, automount and others.
*/
if ((pgrp != s->s_apgrp[0]) && (pgrp != s->s_apgrp[1]) && (!snumbd_is_parent_task(s->s_hpid))) {
sa_info("Disable access (%d,%d,%d)...\n", pgrp, s->s_apgrp[0], s->s_apgrp[1]);
return -EACCES;
}
get_session(s);
sa_debug(DEBUG_API, "s=%p dev=%x users=%d\n", s, s->s_kdev, users);
/* Note: On first open bdev has zero size */
old_size = i_size_read(bdev->bd_inode);
new_size = s->s_scount * s->s_sector_size;
if (old_size != new_size) {
#ifdef HAVE_SET_CAPACITY
set_capacity(bdev->bd_disk, new_size);
#elif defined(HAVE_BD_SET_NR_SECTORS)
bd_set_nr_sectors(bdev, new_size >> SECTOR_SHIFT);
#else
bd_set_size(bdev, new_size);
#endif
set_blocksize(bdev, s->s_sector_size);
#ifdef HAVE_SET_DEVICE_RO
set_device_ro(bdev, (s->s_ro != 0));
#else
bdev->bd_read_only = (s->s_ro != 0);
#endif
}
/* Note: 'put_session()' will be done in 'snumbd_release_blk()' */
return 0;
}
static FILE_OPS_RELEASE_RETURN_VALUE snumbd_release_blk(struct gendisk *disk, fmode_t mode)
{
struct session_struct *s;
spin_lock(&disk_lock);
s = disk->private_data;
if (!s) {
spin_unlock(&disk_lock);
return FILE_OPS_RELEASE_RETURN_STATUS;
}
spin_unlock(&disk_lock);
sa_debug(DEBUG_API, "s=%p dev=%x\n", s, s->s_kdev);
/* Note: respective 'get_session()' has been done in 'snumbd_open_blk()' */
put_session(s);
return FILE_OPS_RELEASE_RETURN_STATUS;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
static int snumbd_open(struct inode *inode, struct file *file)
{
struct block_device *bdev;
bdev = inode->i_bdev;
return snumbd_open_blk(bdev, 1);
}
static FILE_OPS_RELEASE_RETURN_VALUE snumbd_release(struct inode *inode, struct file *file)
{
struct gendisk *disk;
disk = inode->i_bdev->bd_disk;
return snumbd_release_blk(disk, 1);
}
static int snumbd_ioctl(struct inode *inode, struct file *file, unsigned cmd,
unsigned long arg)
{
struct block_device *bdev;
bdev = inode->i_bdev;
return snumbd_ioctl_blk(bdev, 1, cmd, arg);
}
#else
#define snumbd_open snumbd_open_blk
#define snumbd_ioctl snumbd_ioctl_blk
#define snumbd_release snumbd_release_blk
#endif
#if defined (HAVE_BDOPS_SUBMIT_BIO_BLK_QC_T) || defined(HAVE_BDOPS_SUBMIT_BIO_VOID)
#define HAVE_BDOPS_SUBMIT_BIO 1
#endif
#ifdef HAVE_BDOPS_SUBMIT_BIO
#include "kernel_config.h"
#ifdef HAVE_BDOPS_SUBMIT_BIO_VOID
#define MAKE_REQUEST_RETURN_VALUE void
#define MAKE_REQUEST_EXIT_STATUS
#else
#define MAKE_REQUEST_RETURN_VALUE blk_qc_t
#define MAKE_REQUEST_EXIT_STATUS 0
#endif
typedef MAKE_REQUEST_RETURN_VALUE (make_request_fn) (struct bio *bio);
#else
#ifdef HAVE_MAKE_REQUEST_INT
#define MAKE_REQUEST_EXIT_STATUS 0
#define MAKE_REQUEST_RETURN_VALUE int
#elif defined(HAVE_MAKE_REQUEST_BLK_QC_T)
#define MAKE_REQUEST_EXIT_STATUS 0
#define MAKE_REQUEST_RETURN_VALUE blk_qc_t
#else
#define MAKE_REQUEST_EXIT_STATUS
#define MAKE_REQUEST_RETURN_VALUE void
#endif
#endif
#ifndef HAVE_BDOPS_SUBMIT_BIO
static MAKE_REQUEST_RETURN_VALUE snumbd_make_request(sn_request_queue *q, struct bio *bio);
#else
static MAKE_REQUEST_RETURN_VALUE snumbd_make_request(struct bio *bio);
#endif
static struct block_device_operations snumbd_bdops = {
.owner = THIS_MODULE,
.open = snumbd_open,
.ioctl = snumbd_ioctl,
.release = snumbd_release,
#ifdef HAVE_BDOPS_SUBMIT_BIO
.submit_bio = snumbd_make_request,
#endif
};
static int register_device(struct session_struct * s)
{
sn_request_queue *queue;
int ret;
ret = -ENOMEM;
sa_debug(DEBUG_API, "s=%p\n", s);
#ifndef HAVE_BLK_ALLOC_DISK
#ifdef HAVE_BLK_ALLOC_QUEUE_ONE_ARG
queue = blk_alloc_queue(GFP_KERNEL);
#elif defined (HAVE_BLK_ALLOC_QUEUE_RH)
queue = blk_alloc_queue_rh(snumbd_make_request, NUMA_NO_NODE);
#else
queue = blk_alloc_queue(snumbd_make_request, NUMA_NO_NODE);
#endif
if (!queue) {
sa_info("%s\n", "Alloc queue failed");
return ret;
}
s->s_disk = alloc_disk(1);
if (!s->s_disk) {
sa_info("%s\n", "Alloc disk failed");
blk_cleanup_queue(queue);
goto out;
}
s->s_disk->queue = queue;
#else
s->s_disk = blk_alloc_disk(NUMA_NO_NODE);
if (!s->s_disk) {
sa_info("%s\n", "Alloc disk failed");
goto out;
}
s->s_disk->minors = 1;
queue = s->s_disk->queue;
#endif
s->s_disk->major = MAJOR(s->s_kdev);
s->s_disk->first_minor = MINOR(s->s_kdev);
sprintf(s->s_disk->disk_name, SNUMBD_NAME"%dd", MINOR(s->s_kdev));
s->s_disk->private_data = s;
s->s_disk->fops = &snumbd_bdops;
sa_debug(DEBUG_INTERNALS, "s=%p(%d) users=%d\n", s, s->s_state,
atomic_read(&s->s_users));
set_capacity(s->s_disk, s->s_scount * (s->s_sector_size / 512));
#ifdef HAVE_BLK_QUEUE_MAKE_REQUEST
/* Note: 'blk_queue_make_request()' resets queue's settings */
blk_queue_make_request(queue, snumbd_make_request);
#endif
sn_set_queue_block_size(queue, s->s_sector_size);
#ifdef HAVE_QUEUE_MAX_HW_SECTORS
blk_queue_max_hw_sectors(queue, 2048);
#else
blk_queue_max_sectors(queue, 2048);
#endif
add_disk(s->s_disk);
return 0;
out:
return ret;
}
static void unregister_device(struct session_struct * s)
{
sa_debug(DEBUG_API, "s=%p\n", s);
if (s->s_disk) {
#ifndef HAVE_BLK_CLEANUP_DISK
#ifdef HAVE_BLK_CLEANUP_QUEUE
sn_request_queue *queue;
#endif
#endif
sa_debug(DEBUG_INTERNALS, "s=%p(%d) users=%d\n", s, s->s_state,
atomic_read(&s->s_users));
spin_lock(&disk_lock);
s->s_disk->private_data = 0;
spin_unlock(&disk_lock);
del_gendisk(s->s_disk);
#ifdef HAVE_BLK_CLEANUP_DISK
blk_cleanup_disk(s->s_disk);
#else
#ifdef HAVE_BLK_CLEANUP_QUEUE
queue = s->s_disk->queue;
if (queue)
blk_cleanup_queue(queue);
#endif
put_disk(s->s_disk);
#endif
s->s_disk = NULL;
}
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
#define SN_NOPAGE_SIGBUS NOPAGE_SIGBUS
#else
#define SN_NOPAGE_SIGBUS VM_FAULT_ERROR
#endif
#if 0
static void session_stat(struct sn_state *sn)
{
sa_info("dev=%x:%x, state=%d, blksize=%d, mmapsize=%d\n",
sn->major, sn->minor, sn->state,
sn->blksize, sn->mmapsize);
sa_info("psize=%u, pstrt=%u, mshft=%d, ioctls=%u\n",
sn->partsize, sn->partstrt, sn->minorshft,
sn->ioctlcnt);
sa_info("bhpgs=%d, bhcnt=%d, abhs=%u, fbhs=%u, dbhs=%u\n",
sn->bhpages, sn->bhcount,
sn->abhs, sn->fbhs, sn->dbhs);
sa_info("gpgs=%u, ppgs=%u, emmax=%d, emmin=%d, emcur=%d, cached=%d\n",
sn->gpages, sn->ppages, sn->emmax, sn->emmin,
sn->emcur, sn->cachepages);
sa_info("rblocks=%u, cblocks=%u, rcblocks=%u, rwcolls=%u\n",
sn->rblocks, sn->cblocks, sn->rcblocks,
sn->rwcolls);
}
#endif
static void fill_state(struct session_struct *s, struct snumbd_state *out)
{
memset(out, 0, sizeof(*out));
out->version = (SNUMBD_VMAJOR << 16) + (SNUMBD_VMINOR << 8) +
SNUMBD_VSUBMINOR;
out->major = MAJOR(s->s_kdev);
out->minor = MINOR(s->s_kdev);
out->state = s->s_state;
out->hpid = s->s_hpid;
out->scount = s->s_scount;
out->mmapsize = SN_MMAP_SIZE;
out->ioctlcnt = s->s_ioctlcnt;
out->users = atomic_read(&s->s_users);
}
static void close_session(struct session_struct *s)
{
int i;
shutdown_session(s);
down(&s->s_sem);
sa_debug(DEBUG_API, "s=%p, state=%d, users=%d\n", s,
s->s_state, atomic_read(&s->s_users));
for (i = 0; i < BIO_MAX_PAGES; i++) {
if (s->s_mpages[i]) {
sa_debug(DEBUG_ALLOC, "s=%p, page_release=%p(%d)\n", s,
s->s_mpages[i], page_count(s->s_mpages[i]));
s->s_ppages++;
page_cache_release(s->s_mpages[i]);
s->s_mpages[i] = NULL;
}
}
up(&s->s_sem);
put_session(s);
}
#if (DEBUG != 0) && (DEBUG_LEVEL & DEBUG_BIO)
static void print_bio(struct bio *bio, char *pref)
{
sa_warn("%s bio=%p, dev=%x, sector=%llu, bi_flags=%lx"
" bi_rw=%lx bi_size=%d bi_vcnt=%d bi_io_vec=%p"
" bi_max_vecs=%d\n", pref, bio,
bio->bi_bdev ? bio->bi_bdev->bd_dev : -1,
(unsigned long long)sn_bio_bi_sector(bio), bio->bi_flags,
get_bio_req_flags(bio), sn_bio_bi_size(bio), bio->bi_vcnt, bio->bi_io_vec,
bio->bi_max_vecs);
}
#define dump_bio(x, y) print_bio(x, y)
#else
#define dump_bio(x, y)
#endif
#ifndef HAVE_BDOPS_SUBMIT_BIO
static MAKE_REQUEST_RETURN_VALUE snumbd_make_request(sn_request_queue *q, struct bio *bio)
{
#else
static MAKE_REQUEST_RETURN_VALUE snumbd_make_request(struct bio *bio)
{
#endif
struct session_struct *s;
dev_t dev;
int write;
unsigned i;
dev = sn_bio_dev(bio);
if (bio->bi_vcnt > 1)
dump_bio(bio, "snumbd_make_request:");
#ifdef HAVE_BLK_QUEUE_SPLIT
if (sn_bio_bi_size(bio) > SN_MMAP_SIZE)
#ifdef HAVE_BDOPS_SUBMIT_BIO
blk_queue_split(&bio);
#else
#ifdef HAVE_BLK_QUEUE_SPLIT_3ARGS
blk_queue_split(q, &bio, q->bio_split);
#else
blk_queue_split(q, &bio);
#endif
#endif /* HAVE_BDOPS_SUBMIT_BIO */
#endif
if (sn_bio_bi_size(bio) > SN_MMAP_SIZE) {
sa_warn("Request size=0x%X exceeds limit=0x%X, bio=%p dev=%x\n", sn_bio_bi_size(bio), SN_MMAP_SIZE, bio, dev);
sn_bio_io_error(bio);
return MAKE_REQUEST_EXIT_STATUS;
}
spin_lock(&disk_lock);
#ifdef HAVE_BIO_BI_BDEV
s = (struct session_struct *)(bio->bi_bdev->bd_disk->private_data);
#else
s = (struct session_struct *)(bio->bi_disk->private_data);
#endif
if (!s) {
spin_unlock(&disk_lock);
sa_warn("Can't find session, bio=%p dev=%x.\n", bio, dev);
sn_bio_io_error(bio);
return MAKE_REQUEST_EXIT_STATUS;
}
get_session(s);
spin_unlock(&disk_lock);
sa_debug(DEBUG_INTERNALS, "s=%p state=%d %s(%lu) sector=%llu"
" bi_size=%d \n", s, s->s_state,
(sn_op_is_write(bio)) ? "WRITE" : "READ",
get_bio_req_flags(bio),
(unsigned long long)
sn_bio_bi_sector(bio), sn_bio_bi_size(bio));
write = sn_op_is_write(bio);
retry:
if (!is_session_usable(s)) {
sa_warn("Session is in unusable state=%u, bio=%p dev=%x.\n", s->s_state, bio, dev);
put_session(s);
sn_bio_io_error(bio);
return MAKE_REQUEST_EXIT_STATUS;
}
down(&s->s_sem);
if (s->bio_count >= MT_REQ_MAX) {
up(&s->s_sem);
wait_event(s->req_put_wq, !is_session_usable(s) || s->bio_count < MT_REQ_MAX);
goto retry;
}
for (i = 0; i < MT_REQ_MAX; ++i) {
if (!s->bio[i])
break;
}
s->bio[i] = bio;
++s->bio_count;
wake_up(&s->req_get_wq);
up(&s->s_sem);
put_session(s);
return MAKE_REQUEST_EXIT_STATUS;
}
static int session_init(struct session_struct * s, unsigned long long size,
int ro, unsigned int sector_size)
{
int ret;
int minor;
int i;
sa_debug(DEBUG_API, "len=%llu.\n", size);
ret = -EINVAL;
down(&s->s_sem);
if (s->s_state != SNUM_NOTINITED)
goto out;
if (!sn_is_power_of_2(sector_size) || sector_size < 512 || sector_size > PAGE_SIZE) {
sa_warn("Block size is invalid: size=%d", sector_size);
goto out;
}
s->s_scount = size;
s->s_sector_size = sector_size;
s->s_ro = ro;
s->s_hpid = current->pid;
s->s_apgrp[0] = sn_current_pgrp();
s->s_apgrp[1] = sn_current_pgrp();
ret = -ENOMEM;
for (i = 0; i < BIO_MAX_PAGES; i++) {
s->s_mpages[i] = alloc_page(GFP_KERNEL);
if (!s->s_mpages[i])
goto out_free;
}
spin_lock(&sessions_lock);
minor = get_free_minor();
ret = -ENODEV;
if (minor > MAX_MINOR) {
spin_unlock(&sessions_lock);
goto out;
}
list_del_init(&s->s_list);
s->s_kdev = MKDEV(snumbd_major, minor);
list_add_tail(&s->s_list, &sessions_list);
spin_unlock(&sessions_lock);
ret = register_device(s);
if (ret) {
spin_lock(&sessions_lock);
list_del_init(&s->s_list);
s->s_kdev = 0;
list_add(&s->s_list, ¬inited_list);
spin_unlock(&sessions_lock);
goto out_free;
}
sa_kdebug("OK. kdev=%x:%x, len=%llu sect=%u s=%p pgrp=(%d).\n", MAJOR(s->s_kdev),
MINOR(s->s_kdev), s->s_scount, s->s_sector_size, s, s->s_apgrp[0]);
sn_set_mb(s->s_state, SNUM_INITED);
goto out;
out_free:
for (i = 0; i < BIO_MAX_PAGES; i++) {
if (s->s_mpages[i]) {
page_cache_release(s->s_mpages[i]);
s->s_mpages[i] = NULL;
}
}
out:
up(&s->s_sem);
return ret;
}
static void noinline sn_data_to_user(struct session_struct *s, struct bio *bio)
{
unsigned len;
struct bio_vec bv, *bvp = &bv;
bvec_iterator_t iter;
len = 0;
bio_for_each_segment4(bv, bvp, bio, iter) {
char *kaddr;
int i, count, rest, m_off;
i = len >> PAGE_SHIFT;
m_off = len % PAGE_SIZE;
rest = PAGE_SIZE - m_off;
count = (rest < bvp->bv_len) ? rest : bvp->bv_len;
kaddr = sn_kmap_atomic(bvp->bv_page);
memcpy(page_address(s->s_mpages[i]) + m_off,
kaddr + bvp->bv_offset, count);
if (count < bvp->bv_len) {
/* place rest on the next page */
rest = bvp->bv_len - count;
memcpy(page_address(s->s_mpages[i + 1]),
kaddr + bvp->bv_offset + count, rest);
}
len += bvp->bv_len;
sn_kunmap_atomic(kaddr);
}
if (len != sn_bio_bi_size(bio))
sa_warn("Strange bio: s=%p kdev=%x, total(%u)!=size(%u)\n",
s, s->s_kdev, len, sn_bio_bi_size(bio));
}
static void noinline sn_data_from_user(struct session_struct *s, struct bio *bio)
{
unsigned len;
struct bio_vec bv, *bvp = &bv;
bvec_iterator_t iter;
len = 0;
bio_for_each_segment4(bv, bvp, bio, iter) {
char *kaddr;
int i, count, rest, m_off;
i = len >> PAGE_SHIFT;
m_off = len % PAGE_SIZE;
rest = PAGE_SIZE - m_off;
count = (rest < bvp->bv_len) ? rest : bvp->bv_len;
kaddr = sn_kmap_atomic(bvp->bv_page);
memcpy(kaddr + bvp->bv_offset,
page_address(s->s_mpages[i]) + m_off, count);
if (count < bvp->bv_len) {
/* get rest from the next page */
rest = bvp->bv_len - count;
memcpy(kaddr + bvp->bv_offset + count,
page_address(s->s_mpages[i + 1]), rest);
}
len += bvp->bv_len;
sn_kunmap_atomic(kaddr);
}
if (len != sn_bio_bi_size(bio))
sa_warn("Strange bio: s=%p kdev=%x, total(%u)!=size(%u)\n",
s, s->s_kdev, len, sn_bio_bi_size(bio));
}
static int session_req(struct session_struct *s, unsigned int size,
void *req)
{
int ret;
struct snumbd_req kreq;
struct bio *bio;
int i;
int write;
retry:
down(&s->s_sem);
if (!is_session_usable(s)) {
up(&s->s_sem);
return -EIO;
}
if (s->tag_count >= s->bio_count) {
up(&s->s_sem);
ret = wait_event_interruptible(s->req_get_wq, !is_session_usable(s) || s->tag_count < s->bio_count);
if (ret) {
goto out;
}
goto retry;
}
for (i = 0; i < MT_REQ_MAX; ++i) {
if (!s->tag[i] && s->bio[i])
break;
}
s->tag[i] = current->pid;
++s->tag_count;
bio = s->bio[i];
write = sn_op_is_write(bio);
kreq.cmd = (write) ? WRITE_DATA : READ_DATA;
kreq.sno = sn_bio_bi_sector(bio);
kreq.offset = 0;
kreq.len = sn_bio_bi_size(bio);
sa_debug(DEBUG_INTERNALS, "s=%p, kdev=%x, size=%u, cmd=%d, state=%d, "
"users=%d.\n", s, s->s_kdev, size, kreq.cmd,
s->s_state, atomic_read(&s->s_users));
if (write) {
sn_data_to_user(s, bio);
}
if (size > sizeof(kreq))
size = sizeof(kreq);
ret = copy_to_user(req, &kreq, size);
if (ret)
ret = -EACCES;
up(&s->s_sem);
out:
return ret;
}
static int session_dataready(struct session_struct *s, unsigned int size,
const void *req)
{
int ret;
struct snumbd_req kreq;
struct bio *bio;
pid_t tag;
int i;
int write;
if (size > sizeof(kreq))
size = sizeof(kreq);
ret = copy_from_user(&kreq, req, size);
if (ret) {
ret = -EACCES;
shutdown_session(s);
goto out;
}
sa_debug(DEBUG_INTERNALS, "s=%p kdev=%x, size=%u, cmd=%d, state=%d, "
"users=%d.\n", s, s->s_kdev, size, kreq.cmd,
s->s_state, atomic_read(&s->s_users));
if (kreq.cmd & ERROR_FLAG) {
ret = -ENOSPC;
shutdown_session(s);
goto out;
}
ret = -EIO;
down(&s->s_sem);
if (!is_session_usable(s)) {
goto unlock;
}
tag = current->pid;
for (i = 0; i < MT_REQ_MAX; ++i) {
if (s->tag[i] == tag) {
break;
}
}
if (i >= MT_REQ_MAX) {
goto unlock;
}
bio = s->bio[i];
s->bio[i] = NULL;
s->tag[i] = 0;
--s->tag_count;
--s->bio_count;
write = sn_op_is_write(bio);
if (!write) {
sn_data_from_user(s, bio);
}
sn_bio_endio(bio);
wake_up(&s->req_get_wq);
wake_up(&s->req_put_wq);
ret = 0;
unlock:
up(&s->s_sem);
out:
return ret;
}
static int session_state(struct session_struct *s, int size, void *state)
{
struct snumbd_state st;
int ret;
fill_state(s, &st);
spin_lock(&sessions_lock);
st.sessions = sessions_count;
spin_unlock(&sessions_lock);
if (size > sizeof(st))
size = sizeof(st);
ret = copy_to_user(state, &st, size);
if (ret)
ret = -EACCES;
return ret;
}
static int session_states(struct session_struct *s, int size, void *state)
{
struct snumbd_state st;
struct snumbd_state *out;
struct list_head *tmp;
int len;
int ret;
sa_debug(DEBUG_API, "s=%p, size=%d, state=%p\n", s, size, state);
out = state;
len = 0;
ret = -ENOSPC;
spin_lock(&sessions_lock);
list_for_each(tmp, &sessions_list) {
struct session_struct *ss;
ss = list_entry(tmp, struct session_struct, s_list);
fill_state(ss, &st);
st.sessions = sessions_count;
if (size - len < sizeof(st))
goto err_unlock;
sa_debug(DEBUG_INTERNALS, "out=%p, len=%d\n", out, len);
ret = copy_to_user(out, &st, sizeof(st));
if (ret) {
ret = -EACCES;
goto err_unlock;
}
len += sizeof(st);
out++;
}
list_for_each(tmp, ¬inited_list) {
struct session_struct *s;
s = list_entry(tmp, struct session_struct, s_list);
fill_state(s, &st);
st.sessions = sessions_count;
if (size - len < sizeof(st))
goto err_unlock;
sa_debug(DEBUG_INTERNALS, "out=%p, len=%d\n", out, len);
ret = copy_to_user(out, &st, sizeof(st));
if (ret) {
ret = -EACCES;
goto err_unlock;
}
len += sizeof(st);
out++;
}
ret = 0;
err_unlock:
spin_unlock(&sessions_lock);
return ret;
}
static int session_allowed_pgrp(struct session_struct *s, pid_t pgrp)
{
if (pgrp <= 0)
return -EINVAL;
s->s_apgrp[1] = s->s_apgrp[0];
s->s_apgrp[0] = pgrp;
return 0;
}
static long snumbdctl3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int err;
struct session_struct * ss;
if (!snumbd_init_ok)
return -EPERM;
ss = file->private_data;
if (!ss)
return -EINVAL;
err = -EFAULT;
spin_lock(&ss->s_misc_lock);
ss->s_ioctlcnt++;
spin_unlock(&ss->s_misc_lock);
switch(cmd) {
case SNUMBDCTL_INIT: {
struct snumbdctl_init s;
if (copy_from_user(&s, (void *)arg, sizeof(s)))
break;
err = session_init(ss, s.scount, s.dev_ro, 512);
}
break;
case SNUMBDCTL_INIT_V2: {
struct snumbdctl_init_v2 s;
if (copy_from_user(&s, (void *)arg, sizeof(s)))
break;
err = session_init(ss, s.scount, s.dev_ro, s.sector_size);
}
break;
case SNUMBDCTL_REQ: {
struct snumbdctl_req s;
if (copy_from_user(&s, (void *)arg, sizeof(s)))
break;
err = session_req(ss, s.size, s.req);
}
break;
case SNUMBDCTL_DATAREADY: {
struct snumbdctl_dataready s;
if (copy_from_user(&s, (void *)arg, sizeof(s)))
break;
err = session_dataready(ss, s.size, s.req);
}
break;
case SNUMBDCTL_STATE: {
struct snumbdctl_state s;
if (copy_from_user(&s, (void *)arg, sizeof(s)))
break;
err = session_state(ss, s.size, s.state);
}
break;
case SNUMBDCTL_STATES: {
struct snumbdctl_state s;
if (copy_from_user(&s, (void *)arg, sizeof(s)))
break;
err = session_states(ss, s.size, s.state);
}
break;
case SNUMBDCTL_PGRP: {
struct snumbdctl_pgrp s;
if (copy_from_user(&s, (void *)arg, sizeof(s)))
break;
err = session_allowed_pgrp(ss, s.allowed_pgrp);
}
break;
default:
err = -ENOTTY;
break;
}
sa_debug(DEBUG_API, "err=%d\n", -err);
return err;
}
#ifndef HAVE_UNLOCKED_IOCTL
static int snumbdctl4_ioctl(struct inode *ino, struct file *file, unsigned int cmd,
unsigned long arg)
{
return snumbdctl3_ioctl(file, cmd, arg);
}
#endif /* 2.6.37 */
#ifdef HAVE_IOCTL32_CONVERSION
static int
snumbdctl_compat_ioctl(unsigned int fd, unsigned int cmd,
unsigned long arg, struct file *filep)
{
return snumbdctl3_ioctl(filep, cmd, arg);
}
#endif
#ifdef HAVE_COMPAT_IOCTL
static long
snumbdctl_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
return snumbdctl3_ioctl(filep, cmd, arg);
}
#endif
static int snumbdctl_open(struct inode *ino, struct file *file)
{
struct session_struct * s;
sa_debug(DEBUG_API,"%s\n","enter");
if (!snumbd_init_ok)
return -EPERM;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
memset(s, 0, sizeof(*s));
INIT_LIST_HEAD(&s->s_list);
sema_init(&s->s_sem, 1);
spin_lock_init(&s->s_misc_lock);
atomic_set(&s->s_users, 1);
init_waitqueue_head(&s->req_put_wq);
init_waitqueue_head(&s->req_get_wq);
spin_lock(&sessions_lock);
list_add(&s->s_list, ¬inited_list);
sessions_count++;
spin_unlock(&sessions_lock);
file->private_data = s;
sa_kdebug("%s s=%p\n", "OK", s);
return 0;
}
static int snumbdctl_release(struct inode *ino, struct file *file)
{
struct session_struct * s;
s = file->private_data;
if (!s)
return -EINVAL;
file->private_data = NULL;
sa_debug(DEBUG_API,"%s\n","enter");
close_session(s);
sa_kdebug("%s s=%p\n", "OK", s);
return 0;
}
static struct page * snumbdctl_vm_nopage(struct vm_area_struct * vma,
unsigned long address, int *unused)
{
unsigned int i;
struct session_struct *s;
if (!vma->vm_file) {
sa_warn("vma does not have a file attached.%s", "\n");
return (struct page *)SN_NOPAGE_SIGBUS;
}
s = vma->vm_file->private_data;
if (address - vma->vm_start >= SN_MMAP_SIZE) {
sa_warn("Incorrect address.%s", "\n");
return (struct page *)SN_NOPAGE_SIGBUS;
}
i = (address - vma->vm_start) >> PAGE_SHIFT;
get_page(s->s_mpages[i]);
sa_debug(DEBUG_ALLOC, "s=%p, nopage=%p(%d)\n", s,
s->s_mpages[i], page_count(s->s_mpages[i]));
s->s_gpages++;
return s->s_mpages[i];
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static VMFAULT_RETURN_VALUE snumbdctl_vm_fault(struct vm_area_struct * vma,
struct vm_fault *vmf)
{
#ifdef HAVE_VMFAULT_VIRTUAL_ADDRESS
unsigned long address = (unsigned long) vmf->virtual_address;
#else
unsigned long address = (unsigned long) vmf->address;
#endif
#ifdef HAVE_VM_FAULT_2ARGS
vmf->page = snumbdctl_vm_nopage(vma, address, 0);
#else
vmf->page = snumbdctl_vm_nopage(vmf->vma, address, 0);
#endif
if (vmf->page == (struct page *)SN_NOPAGE_SIGBUS)
return VM_FAULT_ERROR;
return 0;
}
#endif
static void snumbdctl_vm_close(struct vm_area_struct * vma)
{
unsigned int i;
struct session_struct *s;
if (!vma->vm_file) {
sa_warn("vma does not have a file attached.%s", "\n");
return;
}
s = vma->vm_file->private_data;
for (i = 0; i < BIO_MAX_PAGES; i++) {
if (s->s_mpages[i]) {
sa_debug(DEBUG_ALLOC, "s=%p, put page=%p(%d)\n", s,
s->s_mpages[i], page_count(s->s_mpages[i]));
/* page was put by upper level */
s->s_ppages++;
}
}
s->s_vma = NULL;
}
static struct vm_operations_struct snumbdctl_vm_ops = {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
nopage: snumbdctl_vm_nopage,
#else
fault: snumbdctl_vm_fault,
#endif
close: snumbdctl_vm_close,
};
static int snumbdctl_mmap(struct file * file, struct vm_area_struct * vma)
{
struct session_struct *s;
int ret;
unsigned long pg_off;
s = file->private_data;
sa_debug(DEBUG_API,"s=%p, vma=%p,%lx-%lx %lx %lx\n", s, vma,
vma->vm_start, vma->vm_end,
vma->vm_flags, vma->vm_pgoff);
if (!s)
return -EBADF;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
ret = -EINVAL;
down(&s->s_sem);
pg_off = (SN_MMAP_SIZE * (MINOR(s->s_kdev) - 1)) >> PAGE_SHIFT;
if (s->s_vma || s->s_state < SNUM_INITED || vma->vm_pgoff != pg_off)
goto out_up;
ret = -ENOMEM;
if (vma->vm_end - vma->vm_start != SN_MMAP_SIZE)
goto out_up;
ret = 0;
s->s_vma = vma;
vma->vm_ops = &snumbdctl_vm_ops;
out_up:
up(&s->s_sem);
return ret;
}
ssize_t
snumbdctl_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
{
struct session_struct *s;
ssize_t ret;
if (count != sizeof(struct snumbd_req))
return -EINVAL;
s = filp->private_data;
if (!s)
return -EBADF;
sa_debug(DEBUG_INTERNALS,"s=%p, buf=%p, count=%zu, ppos=%lld, state=%d\n",
s, buf, count, *ppos, s->s_state);
ret = session_req(s, count, buf);
if (!ret)
ret = count;
return ret;
}
ssize_t
snumbdctl_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
{
struct session_struct *s;
ssize_t ret;
s = filp->private_data;
if (!s)
return -EBADF;
sa_debug(DEBUG_INTERNALS,"s=%p, buf=%p, count=%zu, ppos=%lld, state=%d\n",
s, buf, count, *ppos, s->s_state);
ret = session_dataready(s, count, buf);
if (!ret)
ret = count;
return ret;
}
static unsigned int snumbdctl_poll(struct file *filp, poll_table *wait)
{
struct session_struct *s;
unsigned int mask;
s = filp->private_data;
sa_debug(DEBUG_INTERNALS, "s=%p\n", s);
if (!s || IS_ERROR_STATE(s->s_state))
return POLLERR;
poll_wait(filp, &s->req_get_wq, wait);
down(&s->s_sem);
mask = 0;
if (!is_session_usable(s)) {
mask |= POLLERR;
}
if (s->tag_count < s->bio_count) {
mask |= POLLIN | POLLRDNORM;
}
up(&s->s_sem);
return mask;
}
static struct file_operations snumbdctl_fops = {
owner: THIS_MODULE,
#ifdef HAVE_UNLOCKED_IOCTL
unlocked_ioctl: snumbdctl3_ioctl,
#else
ioctl: snumbdctl4_ioctl,
#endif
open: snumbdctl_open,
read: snumbdctl_read,
write: snumbdctl_write,
poll: snumbdctl_poll,
mmap: snumbdctl_mmap,
release: snumbdctl_release,
#ifdef HAVE_COMPAT_IOCTL
compat_ioctl: snumbdctl_compat_ioctl,
#endif
};
static int __init snumbd_init(void)
{
int ret;
ret = register_chrdev(0, SNUMBDCTL_NAME, &snumbdctl_fops);
if (ret < 0)
goto out_notify;
snumbdctl_major = ret;
ret = register_blkdev(0, SNUMBD_NAME);
if (ret < 0)
goto out_unreg_chr;
snumbd_major = ret;
#ifdef HAVE_IOCTL32_CONVERSION
register_ioctl32_conversion(SNUMBDCTL_INIT, snumbdctl_compat_ioctl);
register_ioctl32_conversion(SNUMBDCTL_INIT_V2, snumbdctl_compat_ioctl);
register_ioctl32_conversion(SNUMBDCTL_STOP, snumbdctl_compat_ioctl);
register_ioctl32_conversion(SNUMBDCTL_REQ, snumbdctl_compat_ioctl);
register_ioctl32_conversion(SNUMBDCTL_DATAREADY, snumbdctl_compat_ioctl);
register_ioctl32_conversion(SNUMBDCTL_STATE, snumbdctl_compat_ioctl);
register_ioctl32_conversion(SNUMBDCTL_STATES, snumbdctl_compat_ioctl);
#endif
snumbd_init_ok = 1;
ret = 0;
out_info:
sa_info("Snumbd(v.%d.%d.%d) init %s. Ctl major %d, blk major %d.\n",
SNUMBD_VMAJOR, SNUMBD_VMINOR, SNUMBD_VSUBMINOR,
snumbd_init_ok ? "OK" : "failed",
snumbdctl_major, snumbd_major);
return ret;
out_unreg_chr:
unregister_chrdev(snumbdctl_major, SNUMBDCTL_NAME);
out_notify:
goto out_info;
}
static void __exit snumbd_exit(void)
{
unregister_chrdev(snumbdctl_major, SNUMBDCTL_NAME);
unregister_blkdev(snumbd_major, SNUMBD_NAME);
#ifdef HAVE_IOCTL32_CONVERSION
unregister_ioctl32_conversion(SNUMBDCTL_INIT);
unregister_ioctl32_conversion(SNUMBDCTL_INIT_V2);
unregister_ioctl32_conversion(SNUMBDCTL_STOP);
unregister_ioctl32_conversion(SNUMBDCTL_REQ);
unregister_ioctl32_conversion(SNUMBDCTL_DATAREADY);
unregister_ioctl32_conversion(SNUMBDCTL_STATE);
unregister_ioctl32_conversion(SNUMBDCTL_STATES);
#endif
sa_info("Snumbd unloading...%s", "\n");
}
module_init(snumbd_init);
module_exit(snumbd_exit);
MODULE_AUTHOR("Acronis");
MODULE_DESCRIPTION("Acronis User Mode Block Device");
MODULE_LICENSE("GPL");
MODULE_VERSION(SNUMBD_COMMON_MOD_VERSION);
MODULE_INFO(supported, "external");
|