From c36b7ce1ead3bfbcd7248bcce6290950467121aa Mon Sep 17 00:00:00 2001 From: Benjamin Schieder Date: Sun, 18 Apr 2004 00:55:57 +0000 Subject: [PATCH] Benjamin Schieder: cleaner implementation of [2004030219054226079] adds packages loop-aes and ciphers [2004041220104619881] (https://www.rocklinux.net/submaster) git-svn-id: http://www.rocklinux.org/svn/rock-linux/trunk@2756 c5f82cb5-29bc-0310-9cd0-bff59a50e3bc --- ...-support.patch => cryptoloop-support.diff} | 0 package/base/util-linux/util-linux.conf | 1 + package/blindcoder/ciphers/ciphers.conf | 1 + package/blindcoder/ciphers/ciphers.desc | 42 + package/blindcoder/ciphers/no_depmod.patch | 13 + package/blindcoder/loop-aes/config.hlp | 8 + package/blindcoder/loop-aes/config.in | 5 + .../loop-aes/linux24_cryptoloop.diff | 1787 +++++++++++++++ .../loop-aes/linux26_cryptoloop.diff | 1943 +++++++++++++++++ package/blindcoder/loop-aes/loop-aes.conf | 10 + package/blindcoder/loop-aes/loop-aes.desc | 42 + .../loop-aes/pkg_linux24-src_post.conf | 1 + .../blindcoder/loop-aes/pkg_linux24_post.conf | 1 + .../loop-aes/pkg_linux26-src_post.conf | 1 + .../blindcoder/loop-aes/pkg_linux26_post.conf | 1 + .../loop-aes/pkg_util-linux_post.conf | 2 + 16 files changed, 3858 insertions(+) rename package/base/util-linux/{cryptoloop-support.patch => cryptoloop-support.diff} (100%) create mode 100644 package/blindcoder/ciphers/ciphers.conf create mode 100644 package/blindcoder/ciphers/ciphers.desc create mode 100644 package/blindcoder/ciphers/no_depmod.patch create mode 100644 package/blindcoder/loop-aes/config.hlp create mode 100644 package/blindcoder/loop-aes/config.in create mode 100644 package/blindcoder/loop-aes/linux24_cryptoloop.diff create mode 100644 package/blindcoder/loop-aes/linux26_cryptoloop.diff create mode 100644 package/blindcoder/loop-aes/loop-aes.conf create mode 100644 package/blindcoder/loop-aes/loop-aes.desc create mode 100644 package/blindcoder/loop-aes/pkg_linux24-src_post.conf create mode 100644 package/blindcoder/loop-aes/pkg_linux24_post.conf create mode 100644 package/blindcoder/loop-aes/pkg_linux26-src_post.conf create mode 100644 package/blindcoder/loop-aes/pkg_linux26_post.conf create mode 100644 package/blindcoder/loop-aes/pkg_util-linux_post.conf diff --git a/package/base/util-linux/cryptoloop-support.patch b/package/base/util-linux/cryptoloop-support.diff similarity index 100% rename from package/base/util-linux/cryptoloop-support.patch rename to package/base/util-linux/cryptoloop-support.diff diff --git a/package/base/util-linux/util-linux.conf b/package/base/util-linux/util-linux.conf index 294b30216..3a6641a90 100644 --- a/package/base/util-linux/util-linux.conf +++ b/package/base/util-linux/util-linux.conf @@ -53,4 +53,5 @@ util_linux_postmake() { } postmake="util_linux_postmake" +var_append patchfiles " " "${confdir}/cryptoloop-support.diff" diff --git a/package/blindcoder/ciphers/ciphers.conf b/package/blindcoder/ciphers/ciphers.conf new file mode 100644 index 000000000..76db6451f --- /dev/null +++ b/package/blindcoder/ciphers/ciphers.conf @@ -0,0 +1 @@ +custmain="make" diff --git a/package/blindcoder/ciphers/ciphers.desc b/package/blindcoder/ciphers/ciphers.desc new file mode 100644 index 000000000..e80336805 --- /dev/null +++ b/package/blindcoder/ciphers/ciphers.desc @@ -0,0 +1,42 @@ + +[COPY] --- ROCK-COPYRIGHT-NOTE-BEGIN --- +[COPY] +[COPY] This copyright note is auto-generated by ./scripts/Create-CopyPatch. +[COPY] Please add additional copyright information _after_ the line containing +[COPY] the ROCK-COPYRIGHT-NOTE-END tag. Otherwise it might get removed by +[COPY] the ./scripts/Create-CopyPatch script. Do not edit this copyright text! +[COPY] +[COPY] ROCK Linux: rock-src/package/blindcoder/ciphers/ciphers.desc +[COPY] ROCK Linux is Copyright (C) 1998 - 2003 Clifford Wolf +[COPY] +[COPY] This program is free software; you can redistribute it and/or modify +[COPY] it under the terms of the GNU General Public License as published by +[COPY] the Free Software Foundation; either version 2 of the License, or +[COPY] (at your option) any later version. A copy of the GNU General Public +[COPY] License can be found at Documentation/COPYING. +[COPY] +[COPY] Many people helped and are helping developing ROCK Linux. Please +[COPY] have a look at http://www.rocklinux.org/ and the Documentation/TEAM +[COPY] file for details. +[COPY] +[COPY] --- ROCK-COPYRIGHT-NOTE-END --- + +[I] Kernel modules for loop-aes + +[T] These are the cipher modules for twofish, blowfish and serpent +[T] encryption used by loop-aes. + +[U] http://loop-aes.sourceforge.net/ + +[A] Jari Ruusu +[M] Benjamin Schieder + +[C] extra/crypto + +[L] GPL +[S] Stable +[V] 2.0f +[P] O -----5---9 800.000 + +[D] 1818785949 ciphers-v2.0f.tar.bz2 http://loop-aes.sourceforge.net/ciphers/ + diff --git a/package/blindcoder/ciphers/no_depmod.patch b/package/blindcoder/ciphers/no_depmod.patch new file mode 100644 index 000000000..e8dade9c2 --- /dev/null +++ b/package/blindcoder/ciphers/no_depmod.patch @@ -0,0 +1,13 @@ +diff -ruN ciphers-v2.0f_orig/Makefile ciphers-v2.0f/Makefile +--- ciphers-v2.0f_orig/Makefile 2004-03-02 12:50:45.000000000 +0100 ++++ ciphers-v2.0f/Makefile 2004-03-02 12:52:04.000000000 +0100 +@@ -99,9 +99,6 @@ + modules: clean $(MLIST) + mkdir -p $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)/block + cp -p $(MLIST) $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)/block +-ifeq ($(RUNDM),y) +- $(DEPMOD) -a $(DMOPTS) +-endif + sync + @echo "Currently running kernel is " $(KR) + @echo "Modules were built for kernel" $(KERNELRELEASE) diff --git a/package/blindcoder/loop-aes/config.hlp b/package/blindcoder/loop-aes/config.hlp new file mode 100644 index 000000000..69768869c --- /dev/null +++ b/package/blindcoder/loop-aes/config.hlp @@ -0,0 +1,8 @@ +ROCKCFG_PKG_LOOP_AES_USEIT + Use this option to enable or disable loop-aes. + loop-aes is a "patched" loop kernel module that enables the use + of AES (Advanced Encryption Standard) encrypted files/filesystems. + The supporting modules loop_blowfish, loop_twofish and loop_serpent + are in the package "ciphers". + For more information see: + http://loop-aes.sourceforge.net/loop-AES.README diff --git a/package/blindcoder/loop-aes/config.in b/package/blindcoder/loop-aes/config.in new file mode 100644 index 000000000..0b1d25550 --- /dev/null +++ b/package/blindcoder/loop-aes/config.in @@ -0,0 +1,5 @@ +menu_begin MENU_LOOP_AES "Loop-AES Options" + bool "Use Loop-AES" ROCKCFG_PKG_LOOP_AES_USEIT 0 + [ "${ROCKCFG_PKG_LOOP_AES_USEIT}" == "0" ] && pkgdisable loop-aes ciphers + [ "${ROCKCFG_PKG_LOOP_AES_USEIT}" == "1" ] && pkgenable loop-aes ciphers +menu_end diff --git a/package/blindcoder/loop-aes/linux24_cryptoloop.diff b/package/blindcoder/loop-aes/linux24_cryptoloop.diff new file mode 100644 index 000000000..3d2b1ae9d --- /dev/null +++ b/package/blindcoder/loop-aes/linux24_cryptoloop.diff @@ -0,0 +1,1787 @@ +--- linux-2.4.25/drivers/block/loop.c 2003-08-25 13:44:41.000000000 +0200 ++++ linux-2.4.25/drivers/block/loop.c 2004-02-08 16:51:25.000000000 +0100 +@@ -39,21 +39,29 @@ + * Support up to 256 loop devices + * Heinz Mauelshagen , Feb 2002 + * ++ * IV is now passed as (512 byte) sector number. ++ * Jari Ruusu, May 18 2001 ++ * ++ * External encryption module locking bug fixed. ++ * Ingo Rohloff , June 21 2001 ++ * ++ * Make device backed loop work with swap (pre-allocated buffers + queue rewrite). ++ * Jari Ruusu, September 2 2001 ++ * ++ * File backed code now uses file->f_op->read/write. Based on Andrew Morton's idea. ++ * Jari Ruusu, May 23 2002 ++ * ++ * Backported struct loop_info64 ioctls from 2.6 kernels (64 bit offsets and ++ * 64 bit sizelimits). Added support for removing offset from IV computations. ++ * Jari Ruusu, September 21 2003 ++ * ++ * + * Still To Fix: + * - Advisory locking is ignored here. + * - Should use an own CAP_* category instead of CAP_SYS_ADMIN +- * +- * WARNING/FIXME: +- * - The block number as IV passing to low level transfer functions is broken: +- * it passes the underlying device's block number instead of the +- * offset. This makes it change for a given block when the file is +- * moved/restored/copied and also doesn't work over NFS. +- * AV, Feb 12, 2000: we pass the logical block number now. It fixes the +- * problem above. Encryption modules that used to rely on the old scheme +- * should just call ->i_mapping->bmap() to calculate the physical block +- * number. + */ + ++#include + #include + #include + +@@ -71,6 +79,9 @@ + #include + #include + #include ++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_SOFTWARE_SUSPEND2) ++#include ++#endif + + #include + +@@ -79,24 +90,44 @@ + #define MAJOR_NR LOOP_MAJOR + + static int max_loop = 8; +-static struct loop_device *loop_dev; + static int *loop_sizes; + static int *loop_blksizes; ++static int *loop_hardsizes; + static devfs_handle_t devfs_handle; /* For the directory */ + ++struct loopinfo64 { ++ __u64 lo_device; /* ioctl r/o */ ++ __u64 lo_inode; /* ioctl r/o */ ++ __u64 lo_rdevice; /* ioctl r/o */ ++ __u64 lo_offset; ++ __u64 lo_sizelimit;/* bytes, 0 == max available */ ++ __u32 lo_number; /* ioctl r/o */ ++ __u32 lo_encrypt_type; ++ __u32 lo_encrypt_key_size; /* ioctl w/o */ ++ __u32 lo_flags; /* ioctl r/o */ ++ __u8 lo_file_name[LO_NAME_SIZE]; ++ __u8 lo_crypt_name[LO_NAME_SIZE]; ++ __u8 lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ ++ __u64 lo_init[2]; ++}; ++#if !defined(LOOP_SET_STATUS64) ++# define LOOP_SET_STATUS64 0x4C04 ++#endif ++#if !defined(LOOP_GET_STATUS64) ++# define LOOP_GET_STATUS64 0x4C05 ++#endif ++ + /* + * Transfer functions + */ + static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf, + char *loop_buf, int size, int real_block) + { +- if (raw_buf != loop_buf) { +- if (cmd == READ) +- memcpy(loop_buf, raw_buf, size); +- else +- memcpy(raw_buf, loop_buf, size); +- } ++ /* this code is only called from file backed loop */ ++ /* and that code expects this function to be no-op */ + ++ if (current->need_resched) ++ {set_current_state(TASK_RUNNING);schedule();} + return 0; + } + +@@ -118,12 +149,13 @@ static int transfer_xor(struct loop_devi + keysize = lo->lo_encrypt_key_size; + for (i = 0; i < size; i++) + *out++ = *in++ ^ key[(i & 511) % keysize]; ++ if (current->need_resched) ++ {set_current_state(TASK_RUNNING);schedule();} + return 0; + } + + static int none_status(struct loop_device *lo, struct loop_info *info) + { +- lo->lo_flags |= LO_FLAGS_BH_REMAP; + return 0; + } + +@@ -136,13 +168,13 @@ static int xor_status(struct loop_device + + struct loop_func_table none_funcs = { + number: LO_CRYPT_NONE, +- transfer: transfer_none, ++ transfer: (void *)transfer_none, + init: none_status, + }; + + struct loop_func_table xor_funcs = { + number: LO_CRYPT_XOR, +- transfer: transfer_xor, ++ transfer: (void *)transfer_xor, + init: xor_status + }; + +@@ -152,325 +184,420 @@ struct loop_func_table *xfer_funcs[MAX_L + &xor_funcs + }; + +-#define MAX_DISK_SIZE 1024*1024*1024 ++/* ++ * First number of 'lo_prealloc' is the default number of RAM pages ++ * to pre-allocate for each device backed loop. Every (configured) ++ * device backed loop pre-allocates this amount of RAM pages unless ++ * later 'lo_prealloc' numbers provide an override. 'lo_prealloc' ++ * overrides are defined in pairs: loop_index,number_of_pages ++ */ ++static int lo_prealloc[9] = { 125, 999, 0, 999, 0, 999, 0, 999, 0 }; ++#define LO_PREALLOC_MIN 4 /* minimum user defined pre-allocated RAM pages */ ++#define LO_PREALLOC_MAX 512 /* maximum user defined pre-allocated RAM pages */ + +-static int compute_loop_size(struct loop_device *lo, struct dentry * lo_dentry, kdev_t lodev) +-{ +- if (S_ISREG(lo_dentry->d_inode->i_mode)) +- return (lo_dentry->d_inode->i_size - lo->lo_offset) >> BLOCK_SIZE_BITS; +- if (blk_size[MAJOR(lodev)]) +- return blk_size[MAJOR(lodev)][MINOR(lodev)] - +- (lo->lo_offset >> BLOCK_SIZE_BITS); +- return MAX_DISK_SIZE; +-} ++MODULE_PARM(lo_prealloc, "1-9i"); ++MODULE_PARM_DESC(lo_prealloc, "Number of pre-allocated pages [,index,pages]..."); + +-static void figure_loop_size(struct loop_device *lo) +-{ +- loop_sizes[lo->lo_number] = compute_loop_size(lo, +- lo->lo_backing_file->f_dentry, +- lo->lo_device); +-} ++/* ++ * This is loop helper thread nice value in range ++ * from 0 (low priority) to -20 (high priority). ++ */ ++#if defined(DEF_NICE) && defined(DEF_COUNTER) ++static int lo_nice = -20; /* old scheduler default */ ++#else ++static int lo_nice = -1; /* O(1) scheduler default */ ++#endif + +-static int lo_send(struct loop_device *lo, struct buffer_head *bh, int bsize, +- loff_t pos) +-{ +- struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ +- struct address_space *mapping = file->f_dentry->d_inode->i_mapping; +- struct address_space_operations *aops = mapping->a_ops; +- struct page *page; +- char *kaddr, *data; +- unsigned long index; +- unsigned size, offset; +- int len; ++MODULE_PARM(lo_nice, "1i"); ++MODULE_PARM_DESC(lo_nice, "Loop thread scheduler nice (0 ... -20)"); + +- down(&mapping->host->i_sem); +- index = pos >> PAGE_CACHE_SHIFT; +- offset = pos & (PAGE_CACHE_SIZE - 1); +- len = bh->b_size; +- data = bh->b_data; +- while (len > 0) { +- int IV = index * (PAGE_CACHE_SIZE/bsize) + offset/bsize; +- int transfer_result; ++typedef struct { ++ struct loop_device lo_orig; ++ struct buffer_head *lo_bh_que0; ++ struct buffer_head *lo_bh_que1; ++ struct buffer_head *lo_bh_que2; ++ struct buffer_head *lo_bh_free; ++ int lo_bh_flsh; ++ int lo_bh_need; ++ wait_queue_head_t lo_bh_wait; ++ loff_t lo_offset; ++ loff_t lo_sizelimit; ++ unsigned long lo_offs_sec; ++ unsigned long lo_iv_remove; ++ unsigned char lo_crypt_name[LO_NAME_SIZE]; ++} LoDevExt; ++static LoDevExt *loop_dev; ++ ++#define LDE_lo_bh_que0 (((LoDevExt *)lo)->lo_bh_que0) ++#define LDE_lo_bh_que1 (((LoDevExt *)lo)->lo_bh_que1) ++#define LDE_lo_bh_que2 (((LoDevExt *)lo)->lo_bh_que2) ++#define LDE_lo_bh_free (((LoDevExt *)lo)->lo_bh_free) ++#define LDE_lo_bh_flsh (((LoDevExt *)lo)->lo_bh_flsh) ++#define LDE_lo_bh_need (((LoDevExt *)lo)->lo_bh_need) ++#define LDE_lo_bh_wait (((LoDevExt *)lo)->lo_bh_wait) ++#define LDE_lo_offset (((LoDevExt *)lo)->lo_offset) ++#define LDE_lo_sizelimit (((LoDevExt *)lo)->lo_sizelimit) ++#define LDE_lo_offs_sec (((LoDevExt *)lo)->lo_offs_sec) ++#define LDE_lo_iv_remove (((LoDevExt *)lo)->lo_iv_remove) ++#define LDE_lo_crypt_name (((LoDevExt *)lo)->lo_crypt_name) ++ ++typedef struct { ++ struct buffer_head **q0; ++ struct buffer_head **q1; ++ struct buffer_head **q2; ++ int x0; ++ int x1; ++ int x2; ++} que_look_up_table; + +- size = PAGE_CACHE_SIZE - offset; +- if (size > len) +- size = len; ++static void loop_prealloc_cleanup(struct loop_device *lo) ++{ ++ struct buffer_head *bh; + +- page = grab_cache_page(mapping, index); +- if (!page) +- goto fail; +- kaddr = kmap(page); +- if (aops->prepare_write(file, page, offset, offset+size)) +- goto unlock; +- flush_dcache_page(page); +- transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV); +- if (transfer_result) { +- /* +- * The transfer failed, but we still write the data to +- * keep prepare/commit calls balanced. +- */ +- printk(KERN_ERR "loop: transfer error block %ld\n", index); +- memset(kaddr + offset, 0, size); +- } +- if (aops->commit_write(file, page, offset, offset+size)) +- goto unlock; +- if (transfer_result) +- goto unlock; +- kunmap(page); +- data += size; +- len -= size; +- offset = 0; +- index++; +- pos += size; +- UnlockPage(page); +- page_cache_release(page); ++ while ((bh = LDE_lo_bh_free)) { ++ __free_page(bh->b_page); ++ LDE_lo_bh_free = bh->b_reqnext; ++ bh->b_reqnext = NULL; ++ kmem_cache_free(bh_cachep, bh); + } +- up(&mapping->host->i_sem); +- return 0; +- +-unlock: +- kunmap(page); +- UnlockPage(page); +- page_cache_release(page); +-fail: +- up(&mapping->host->i_sem); +- return -1; + } + +-struct lo_read_data { +- struct loop_device *lo; +- char *data; +- int bsize; +-}; +- +-static int lo_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) ++static int loop_prealloc_init(struct loop_device *lo, int y) + { +- char *kaddr; +- unsigned long count = desc->count; +- struct lo_read_data *p = (struct lo_read_data*)desc->buf; +- struct loop_device *lo = p->lo; +- int IV = page->index * (PAGE_CACHE_SIZE/p->bsize) + offset/p->bsize; ++ struct buffer_head *bh; ++ int x; + +- if (size > count) +- size = count; ++ if(!y) { ++ y = lo_prealloc[0]; ++ for (x = 1; x < (sizeof(lo_prealloc) / sizeof(int)); x += 2) { ++ if (lo_prealloc[x + 1] && (lo->lo_number == lo_prealloc[x])) { ++ y = lo_prealloc[x + 1]; ++ break; ++ } ++ } ++ } ++ LDE_lo_bh_flsh = (y * 3) / 4; + +- kaddr = kmap(page); +- if (lo_do_transfer(lo, READ, kaddr + offset, p->data, size, IV)) { +- size = 0; +- printk(KERN_ERR "loop: transfer error block %ld\n",page->index); +- desc->error = -EINVAL; ++ for (x = 0; x < y; x++) { ++ bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL); ++ if (!bh) { ++ loop_prealloc_cleanup(lo); ++ return 1; ++ } ++ bh->b_page = alloc_page(GFP_KERNEL); ++ if (!bh->b_page) { ++ bh->b_reqnext = NULL; ++ kmem_cache_free(bh_cachep, bh); ++ loop_prealloc_cleanup(lo); ++ return 1; ++ } ++ bh->b_reqnext = LDE_lo_bh_free; ++ LDE_lo_bh_free = bh; + } +- kunmap(page); +- +- desc->count = count - size; +- desc->written += size; +- p->data += size; +- return size; +-} +- +-static int lo_receive(struct loop_device *lo, struct buffer_head *bh, int bsize, +- loff_t pos) +-{ +- struct lo_read_data cookie; +- read_descriptor_t desc; +- struct file *file; +- +- cookie.lo = lo; +- cookie.data = bh->b_data; +- cookie.bsize = bsize; +- desc.written = 0; +- desc.count = bh->b_size; +- desc.buf = (char*)&cookie; +- desc.error = 0; +- spin_lock_irq(&lo->lo_lock); +- file = lo->lo_backing_file; +- spin_unlock_irq(&lo->lo_lock); +- do_generic_file_read(file, &pos, &desc, lo_read_actor); +- return desc.error; ++ return 0; + } + +-static inline int loop_get_bs(struct loop_device *lo) ++static void loop_add_queue_last(struct loop_device *lo, struct buffer_head *bh, struct buffer_head **q) + { +- int bs = 0; ++ unsigned long flags; + +- if (blksize_size[MAJOR(lo->lo_device)]) +- bs = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)]; +- if (!bs) +- bs = BLOCK_SIZE; ++ spin_lock_irqsave(&lo->lo_lock, flags); ++ if (*q) { ++ bh->b_reqnext = (*q)->b_reqnext; ++ (*q)->b_reqnext = bh; ++ } else { ++ bh->b_reqnext = bh; ++ } ++ *q = bh; ++ spin_unlock_irqrestore(&lo->lo_lock, flags); + +- return bs; ++ if (waitqueue_active(&LDE_lo_bh_wait)) ++ wake_up_interruptible(&LDE_lo_bh_wait); + } + +-static inline unsigned long loop_get_iv(struct loop_device *lo, +- unsigned long sector) ++static void loop_add_queue_first(struct loop_device *lo, struct buffer_head *bh, struct buffer_head **q) + { +- int bs = loop_get_bs(lo); +- unsigned long offset, IV; +- +- IV = sector / (bs >> 9) + lo->lo_offset / bs; +- offset = ((sector % (bs >> 9)) << 9) + lo->lo_offset % bs; +- if (offset >= bs) +- IV++; +- +- return IV; ++ spin_lock_irq(&lo->lo_lock); ++ if (*q) { ++ bh->b_reqnext = (*q)->b_reqnext; ++ (*q)->b_reqnext = bh; ++ } else { ++ bh->b_reqnext = bh; ++ *q = bh; ++ } ++ spin_unlock_irq(&lo->lo_lock); + } + +-static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw) ++static struct buffer_head *loop_get_bh(struct loop_device *lo, int *list_nr, ++ que_look_up_table *qt) + { +- loff_t pos; +- int ret; ++ struct buffer_head *bh = NULL, *last; + +- pos = ((loff_t) bh->b_rsector << 9) + lo->lo_offset; +- +- if (rw == WRITE) +- ret = lo_send(lo, bh, loop_get_bs(lo), pos); +- else +- ret = lo_receive(lo, bh, loop_get_bs(lo), pos); +- +- return ret; +-} +- +-static void loop_end_io_transfer(struct buffer_head *bh, int uptodate); +-static void loop_put_buffer(struct buffer_head *bh) +-{ +- /* +- * check b_end_io, may just be a remapped bh and not an allocated one +- */ +- if (bh && bh->b_end_io == loop_end_io_transfer) { +- __free_page(bh->b_page); +- kmem_cache_free(bh_cachep, bh); ++ spin_lock_irq(&lo->lo_lock); ++ if ((last = *qt->q0)) { ++ bh = last->b_reqnext; ++ if (bh == last) ++ *qt->q0 = NULL; ++ else ++ last->b_reqnext = bh->b_reqnext; ++ bh->b_reqnext = NULL; ++ *list_nr = qt->x0; ++ } else if ((last = *qt->q1)) { ++ bh = last->b_reqnext; ++ if (bh == last) ++ *qt->q1 = NULL; ++ else ++ last->b_reqnext = bh->b_reqnext; ++ bh->b_reqnext = NULL; ++ *list_nr = qt->x1; ++ } else if ((last = *qt->q2)) { ++ bh = last->b_reqnext; ++ if (bh == last) ++ *qt->q2 = NULL; ++ else ++ last->b_reqnext = bh->b_reqnext; ++ bh->b_reqnext = NULL; ++ *list_nr = qt->x2; + } ++ spin_unlock_irq(&lo->lo_lock); ++ return bh; + } + +-/* +- * Add buffer_head to back of pending list +- */ +-static void loop_add_bh(struct loop_device *lo, struct buffer_head *bh) ++static void loop_put_buffer(struct loop_device *lo, struct buffer_head *b) + { + unsigned long flags; ++ int wk; + + spin_lock_irqsave(&lo->lo_lock, flags); +- if (lo->lo_bhtail) { +- lo->lo_bhtail->b_reqnext = bh; +- lo->lo_bhtail = bh; +- } else +- lo->lo_bh = lo->lo_bhtail = bh; ++ b->b_reqnext = LDE_lo_bh_free; ++ LDE_lo_bh_free = b; ++ wk = LDE_lo_bh_need; + spin_unlock_irqrestore(&lo->lo_lock, flags); + +- up(&lo->lo_bh_mutex); ++ if (wk && waitqueue_active(&LDE_lo_bh_wait)) ++ wake_up_interruptible(&LDE_lo_bh_wait); + } + +-/* +- * Grab first pending buffer +- */ +-static struct buffer_head *loop_get_bh(struct loop_device *lo) ++static void loop_end_io_transfer_wr(struct buffer_head *bh, int uptodate) + { +- struct buffer_head *bh; +- +- spin_lock_irq(&lo->lo_lock); +- if ((bh = lo->lo_bh)) { +- if (bh == lo->lo_bhtail) +- lo->lo_bhtail = NULL; +- lo->lo_bh = bh->b_reqnext; +- bh->b_reqnext = NULL; +- } +- spin_unlock_irq(&lo->lo_lock); ++ struct loop_device *lo = (struct loop_device *)(&loop_dev[MINOR(bh->b_dev)]); ++ struct buffer_head *rbh = bh->b_private; + +- return bh; ++ rbh->b_reqnext = NULL; ++ rbh->b_end_io(rbh, uptodate); ++ loop_put_buffer(lo, bh); ++ if (atomic_dec_and_test(&lo->lo_pending)) ++ wake_up_interruptible(&LDE_lo_bh_wait); + } + +-/* +- * when buffer i/o has completed. if BH_Dirty is set, this was a WRITE +- * and lo->transfer stuff has already been done. if not, it was a READ +- * so queue it for the loop thread and let it do the transfer out of +- * b_end_io context (we don't want to do decrypt of a page with irqs +- * disabled) +- */ +-static void loop_end_io_transfer(struct buffer_head *bh, int uptodate) ++static void loop_end_io_transfer_rd(struct buffer_head *bh, int uptodate) + { +- struct loop_device *lo = &loop_dev[MINOR(bh->b_dev)]; +- +- if (!uptodate || test_bit(BH_Dirty, &bh->b_state)) { +- struct buffer_head *rbh = bh->b_private; ++ struct loop_device *lo = (struct loop_device *)(&loop_dev[MINOR(bh->b_dev)]); + +- rbh->b_end_io(rbh, uptodate); +- if (atomic_dec_and_test(&lo->lo_pending)) +- up(&lo->lo_bh_mutex); +- loop_put_buffer(bh); +- } else +- loop_add_bh(lo, bh); ++ if (!uptodate) ++ loop_end_io_transfer_wr(bh, uptodate); ++ else ++ loop_add_queue_last(lo, bh, &LDE_lo_bh_que0); + } + + static struct buffer_head *loop_get_buffer(struct loop_device *lo, +- struct buffer_head *rbh) ++ struct buffer_head *rbh, int from_thread, int rw) + { + struct buffer_head *bh; ++ struct page *p; ++ unsigned long flags; + +- /* +- * for xfer_funcs that can operate on the same bh, do that +- */ +- if (lo->lo_flags & LO_FLAGS_BH_REMAP) { +- bh = rbh; +- goto out_bh; ++ spin_lock_irqsave(&lo->lo_lock, flags); ++ bh = LDE_lo_bh_free; ++ if (bh) { ++ LDE_lo_bh_free = bh->b_reqnext; ++ if (from_thread) ++ LDE_lo_bh_need = 0; ++ } else { ++ if (from_thread) ++ LDE_lo_bh_need = 1; + } ++ spin_unlock_irqrestore(&lo->lo_lock, flags); ++ if (!bh) ++ return (struct buffer_head *)0; + +- do { +- bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO); +- if (bh) +- break; +- +- run_task_queue(&tq_disk); +- set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(HZ); +- } while (1); +- memset(bh, 0, sizeof(*bh)); ++ p = bh->b_page; ++ memset(bh, 0, sizeof(struct buffer_head)); ++ bh->b_page = p; + ++ bh->b_private = rbh; + bh->b_size = rbh->b_size; + bh->b_dev = rbh->b_rdev; ++ bh->b_rdev = lo->lo_device; + bh->b_state = (1 << BH_Req) | (1 << BH_Mapped) | (1 << BH_Lock); ++ bh->b_data = page_address(bh->b_page); ++ bh->b_end_io = (rw == WRITE) ? loop_end_io_transfer_wr : loop_end_io_transfer_rd; ++ bh->b_rsector = rbh->b_rsector + LDE_lo_offs_sec; ++ init_waitqueue_head(&bh->b_wait); ++ ++ return bh; ++} ++ ++static int figure_loop_size(struct loop_device *lo) ++{ ++ loff_t size, offs; ++ unsigned int x; ++ int err = 0; ++ kdev_t lodev = lo->lo_device; ++ ++ offs = LDE_lo_offset; ++ if (S_ISREG(lo->lo_backing_file->f_dentry->d_inode->i_mode)) { ++ size = lo->lo_backing_file->f_dentry->d_inode->i_size; ++ } else { ++ offs &= ~((loff_t)511); ++ if (blk_size[MAJOR(lodev)]) ++ size = (loff_t)(blk_size[MAJOR(lodev)][MINOR(lodev)]) << BLOCK_SIZE_BITS; ++ else ++ size = 1024*1024*1024; /* unknown size */ ++ } ++ if ((offs > 0) && (offs < size)) { ++ size -= offs; ++ } else { ++ if (offs) ++ err = -EINVAL; ++ LDE_lo_offset = 0; ++ LDE_lo_offs_sec = LDE_lo_iv_remove = 0; ++ } ++ if ((LDE_lo_sizelimit > 0) && (LDE_lo_sizelimit <= size)) { ++ size = LDE_lo_sizelimit; ++ } else { ++ if (LDE_lo_sizelimit) ++ err = -EINVAL; ++ LDE_lo_sizelimit = 0; ++ } ++ size >>= BLOCK_SIZE_BITS; + + /* +- * easy way out, although it does waste some memory for < PAGE_SIZE +- * blocks... if highmem bounce buffering can get away with it, +- * so can we :-) ++ * Unfortunately, if we want to do I/O on the device, ++ * the number of 1024-byte blocks has to fit into unsigned int + */ +- do { +- bh->b_page = alloc_page(GFP_NOIO); +- if (bh->b_page) +- break; ++ x = (unsigned int)size; ++ if ((loff_t)x != size) { ++ err = -EFBIG; ++ size = 0; ++ } + +- run_task_queue(&tq_disk); +- set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(HZ); +- } while (1); ++ loop_sizes[lo->lo_number] = size; ++ return err; ++} + +- bh->b_data = page_address(bh->b_page); +- bh->b_end_io = loop_end_io_transfer; +- bh->b_private = rbh; +- init_waitqueue_head(&bh->b_wait); ++static inline int lo_do_Transfer(struct loop_device *lo, int cmd, char *rbuf, ++ char *lbuf, int size, int rblock) ++{ ++ if (!lo->transfer) ++ return 0; + +-out_bh: +- bh->b_rsector = rbh->b_rsector + (lo->lo_offset >> 9); +- spin_lock_irq(&lo->lo_lock); +- bh->b_rdev = lo->lo_device; +- spin_unlock_irq(&lo->lo_lock); ++ /* this ugly cast is needed to work around (possible) kmap damage in function prototype */ ++ /* should be: return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock); */ ++ return ((int (*)(struct loop_device *, int, char *, char *, int, int))lo->transfer)(lo, cmd, rbuf, lbuf, size, rblock); ++} + +- return bh; ++static int loop_file_io(struct file *file, char *buf, int size, loff_t *ppos, int w) ++{ ++ mm_segment_t fs; ++ int x, y, z; ++ ++ y = 0; ++ do { ++ z = size - y; ++ fs = get_fs(); ++ set_fs(get_ds()); ++ if (w) { ++ x = file->f_op->write(file, buf + y, z, ppos); ++ set_fs(fs); ++ } else { ++ x = file->f_op->read(file, buf + y, z, ppos); ++ set_fs(fs); ++ if (!x) ++ return 1; ++ } ++ if (x < 0) { ++ if ((x == -EAGAIN) || (x == -ENOMEM) || (x == -ERESTART) || (x == -EINTR)) { ++ run_task_queue(&tq_disk); ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(HZ / 2); ++ continue; ++ } ++ return 1; ++ } ++ y += x; ++ } while (y < size); ++ return 0; ++} ++ ++static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw) ++{ ++ loff_t pos; ++ struct file *file = lo->lo_backing_file; ++ char *data, *buf; ++ unsigned int size, len; ++ unsigned long IV; ++ ++ pos = ((loff_t) bh->b_rsector << 9) + LDE_lo_offset; ++ buf = page_address(LDE_lo_bh_free->b_page); ++ len = bh->b_size; ++ data = bh_kmap(bh); ++ IV = bh->b_rsector; ++ if (!LDE_lo_iv_remove) ++ IV += LDE_lo_offs_sec; ++ while (len > 0) { ++ if (lo->lo_encrypt_type == LO_CRYPT_NONE) { ++ /* this code relies that NONE transfer is a no-op */ ++ buf = data; ++ } ++ size = PAGE_SIZE; ++ if (size > len) ++ size = len; ++ if (rw == WRITE) { ++ if (lo_do_Transfer(lo, WRITE, buf, data, size, IV)) { ++ printk(KERN_ERR "loop%d: write transfer error, sector %lu\n", lo->lo_number, IV); ++ goto kunmap_and_out; ++ } ++ if (loop_file_io(file, buf, size, &pos, 1)) { ++ printk(KERN_ERR "loop%d: write i/o error, sector %lu\n", lo->lo_number, IV); ++ goto kunmap_and_out; ++ } ++ } else { ++ if (loop_file_io(file, buf, size, &pos, 0)) { ++ printk(KERN_ERR "loop%d: read i/o error, sector %lu\n", lo->lo_number, IV); ++ goto kunmap_and_out; ++ } ++ if (lo_do_Transfer(lo, READ, buf, data, size, IV)) { ++ printk(KERN_ERR "loop%d: read transfer error, sector %lu\n", lo->lo_number, IV); ++ goto kunmap_and_out; ++ } ++ } ++ data += size; ++ len -= size; ++ IV += size >> 9; ++ } ++ bh_kunmap(bh); ++ return 0; ++ ++kunmap_and_out: ++ bh_kunmap(bh); ++ return 1; + } + + static int loop_make_request(request_queue_t *q, int rw, struct buffer_head *rbh) + { +- struct buffer_head *bh = NULL; ++ struct buffer_head *bh; + struct loop_device *lo; +- unsigned long IV; ++ char *md; + ++ set_current_state(TASK_RUNNING); + if (!buffer_locked(rbh)) + BUG(); + + if (MINOR(rbh->b_rdev) >= max_loop) + goto out; + +- lo = &loop_dev[MINOR(rbh->b_rdev)]; ++ lo = (struct loop_device *)(&loop_dev[MINOR(rbh->b_rdev)]); + spin_lock_irq(&lo->lo_lock); + if (lo->lo_state != Lo_bound) + goto inactive; +@@ -483,45 +610,55 @@ static int loop_make_request(request_que + } else if (rw == READA) { + rw = READ; + } else if (rw != READ) { +- printk(KERN_ERR "loop: unknown command (%d)\n", rw); ++ printk(KERN_ERR "loop%d: unknown command (%d)\n", lo->lo_number, rw); + goto err; + } + +- rbh = blk_queue_bounce(q, rw, rbh); +- + /* + * file backed, queue for loop_thread to handle + */ + if (lo->lo_flags & LO_FLAGS_DO_BMAP) { +- /* +- * rbh locked at this point, noone else should clear +- * the dirty flag +- */ +- if (rw == WRITE) +- set_bit(BH_Dirty, &rbh->b_state); +- loop_add_bh(lo, rbh); ++ loop_add_queue_last(lo, rbh, (rw == WRITE) ? &LDE_lo_bh_que1 : &LDE_lo_bh_que0); + return 0; + } + + /* +- * piggy old buffer on original, and submit for I/O ++ * device backed, just remap rdev & rsector for NONE transfer + */ +- bh = loop_get_buffer(lo, rbh); +- IV = loop_get_iv(lo, rbh->b_rsector); ++ if (lo->lo_encrypt_type == LO_CRYPT_NONE) { ++ rbh->b_rsector += LDE_lo_offs_sec; ++ rbh->b_rdev = lo->lo_device; ++ generic_make_request(rw, rbh); ++ if (atomic_dec_and_test(&lo->lo_pending)) ++ wake_up_interruptible(&LDE_lo_bh_wait); ++ return 0; ++ } ++ ++ /* ++ * device backed, start reads and writes now if buffer available ++ */ ++ bh = loop_get_buffer(lo, rbh, 0, rw); ++ if (!bh) { ++ /* just queue request and let thread handle alloc later */ ++ loop_add_queue_last(lo, rbh, (rw == WRITE) ? &LDE_lo_bh_que1 : &LDE_lo_bh_que2); ++ return 0; ++ } + if (rw == WRITE) { +- set_bit(BH_Dirty, &bh->b_state); +- if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data, +- bh->b_size, IV)) ++ int trv; ++ md = bh_kmap(rbh); ++ trv = lo_do_Transfer(lo, WRITE, bh->b_data, md, bh->b_size, bh->b_rsector - LDE_lo_iv_remove); ++ bh_kunmap(rbh); ++ if (trv) { ++ loop_put_buffer(lo, bh); + goto err; ++ } + } +- + generic_make_request(rw, bh); + return 0; + + err: + if (atomic_dec_and_test(&lo->lo_pending)) +- up(&lo->lo_bh_mutex); +- loop_put_buffer(bh); ++ wake_up_interruptible(&LDE_lo_bh_wait); + out: + buffer_IO_error(rbh); + return 0; +@@ -530,30 +667,6 @@ inactive: + goto out; + } + +-static inline void loop_handle_bh(struct loop_device *lo,struct buffer_head *bh) +-{ +- int ret; +- +- /* +- * For block backed loop, we know this is a READ +- */ +- if (lo->lo_flags & LO_FLAGS_DO_BMAP) { +- int rw = !!test_and_clear_bit(BH_Dirty, &bh->b_state); +- +- ret = do_bh_filebacked(lo, bh, rw); +- bh->b_end_io(bh, !ret); +- } else { +- struct buffer_head *rbh = bh->b_private; +- unsigned long IV = loop_get_iv(lo, rbh->b_rsector); +- +- ret = lo_do_transfer(lo, READ, bh->b_data, rbh->b_data, +- bh->b_size, IV); +- +- rbh->b_end_io(rbh, !ret); +- loop_put_buffer(bh); +- } +-} +- + /* + * worker thread that handles reads/writes to file backed loop devices, + * to avoid blocking in our make_request_fn. it also does loop decrypting +@@ -563,25 +676,71 @@ static inline void loop_handle_bh(struct + static int loop_thread(void *data) + { + struct loop_device *lo = data; +- struct buffer_head *bh; ++ struct buffer_head *bh, *xbh; ++ int x, rw, qi = 0, flushcnt = 0; ++ wait_queue_t waitq; ++ que_look_up_table qt[4] = { ++ { &LDE_lo_bh_que0, &LDE_lo_bh_que1, &LDE_lo_bh_que2, 0, 1, 2 }, ++ { &LDE_lo_bh_que2, &LDE_lo_bh_que0, &LDE_lo_bh_que1, 2, 0, 1 }, ++ { &LDE_lo_bh_que0, &LDE_lo_bh_que2, &LDE_lo_bh_que1, 0, 2, 1 }, ++ { &LDE_lo_bh_que1, &LDE_lo_bh_que0, &LDE_lo_bh_que2, 1, 0, 2 } ++ }; ++ char *md; ++ static const struct rlimit loop_rlim_defaults[RLIM_NLIMITS] = INIT_RLIMITS; + ++ init_waitqueue_entry(&waitq, current); ++ memcpy(¤t->rlim[0], &loop_rlim_defaults[0], sizeof(current->rlim)); + daemonize(); + exit_files(current); ++#if !defined(NO_REPARENT_TO_INIT) + reparent_to_init(); ++#endif + + sprintf(current->comm, "loop%d", lo->lo_number); + ++#if !defined(NO_TASK_STRUCT_SIGMASK_LOCK) + spin_lock_irq(¤t->sigmask_lock); ++#elif NO_TASK_STRUCT_SIGMASK_LOCK == 1 ++ spin_lock_irq(¤t->sighand->siglock); ++#else ++ spin_lock_irq(¤t->sig->siglock); ++#endif + sigfillset(¤t->blocked); + flush_signals(current); ++#if !defined(NO_TASK_STRUCT_SIGMASK_LOCK) + spin_unlock_irq(¤t->sigmask_lock); ++#elif NO_TASK_STRUCT_SIGMASK_LOCK == 1 ++ spin_unlock_irq(¤t->sighand->siglock); ++#else ++ spin_unlock_irq(¤t->sig->siglock); ++#endif ++ ++ if (lo_nice > 0) ++ lo_nice = 0; ++ if (lo_nice < -20) ++ lo_nice = -20; ++#if defined(DEF_NICE) && defined(DEF_COUNTER) ++ /* old scheduler syntax */ ++ current->policy = SCHED_OTHER; ++ current->nice = lo_nice; ++#else ++ /* O(1) scheduler syntax */ ++ set_user_nice(current, lo_nice); ++#endif + + spin_lock_irq(&lo->lo_lock); + lo->lo_state = Lo_bound; + atomic_inc(&lo->lo_pending); + spin_unlock_irq(&lo->lo_lock); + ++#if defined(PF_NOIO) + current->flags |= PF_NOIO; ++#endif ++#if defined(PF_NOFREEZE) ++ current->flags |= PF_NOFREEZE; ++#elif defined(PF_IOTHREAD) ++ current->flags |= PF_IOTHREAD; ++#endif + + /* + * up sem, we are running +@@ -589,23 +748,110 @@ static int loop_thread(void *data) + up(&lo->lo_sem); + + for (;;) { +- down_interruptible(&lo->lo_bh_mutex); ++ add_wait_queue(&LDE_lo_bh_wait, &waitq); ++ for (;;) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (!atomic_read(&lo->lo_pending)) ++ break; ++ ++ x = 0; ++ spin_lock_irq(&lo->lo_lock); ++ if (LDE_lo_bh_que0) { ++ x = 1; ++ } else if (LDE_lo_bh_que1 || LDE_lo_bh_que2) { ++ /* file backed works too because LDE_lo_bh_need == 0 */ ++ if (LDE_lo_bh_free || !LDE_lo_bh_need) ++ x = 1; ++ } ++ spin_unlock_irq(&lo->lo_lock); ++ if (x) ++ break; ++ ++ schedule(); ++ } ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&LDE_lo_bh_wait, &waitq); ++ + /* +- * could be upped because of tear-down, not because of ++ * could be woken because of tear-down, not because of + * pending work + */ + if (!atomic_read(&lo->lo_pending)) + break; + +- bh = loop_get_bh(lo); +- if (!bh) { +- printk("loop: missing bh\n"); ++ /* ++ * read queues using alternating order to prevent starvation ++ */ ++ bh = loop_get_bh(lo, &x, &qt[++qi & 3]); ++ if (!bh) ++ continue; ++ ++ /* ++ * x list tag usage(buffer-allocated) ++ * --- -------------- ----------------------- ++ * 0 LDE_lo_bh_que0 dev-read(y) / file-read ++ * 1 LDE_lo_bh_que1 dev-write(n) / file-write ++ * 2 LDE_lo_bh_que2 dev-read(n) ++ */ ++ rw = (x == 1) ? WRITE : READ; ++ if ((x >= 1) && !(lo->lo_flags & LO_FLAGS_DO_BMAP)) { ++ /* loop_make_request didn't allocate a buffer, do that now */ ++ xbh = loop_get_buffer(lo, bh, 1, rw); ++ if (!xbh) { ++ run_task_queue(&tq_disk); ++ flushcnt = 0; ++ loop_add_queue_first(lo, bh, (rw == WRITE) ? &LDE_lo_bh_que1 : &LDE_lo_bh_que2); ++ /* LDE_lo_bh_need should be 1 now, go back to sleep */ ++ continue; ++ } ++ if (rw == WRITE) { ++ int trv; ++ md = bh_kmap(bh); ++ trv = lo_do_Transfer(lo, WRITE, xbh->b_data, md, xbh->b_size, xbh->b_rsector - LDE_lo_iv_remove); ++ bh_kunmap(bh); ++ if (trv) { ++ loop_put_buffer(lo, xbh); ++ buffer_IO_error(bh); ++ atomic_dec(&lo->lo_pending); ++ continue; ++ } ++ } ++ generic_make_request(rw, xbh); ++ ++ /* start I/O if there are no more requests lacking buffers */ ++ x = 0; ++ spin_lock_irq(&lo->lo_lock); ++ if (!LDE_lo_bh_que1 && !LDE_lo_bh_que2) ++ x = 1; ++ spin_unlock_irq(&lo->lo_lock); ++ if (x || (++flushcnt >= LDE_lo_bh_flsh)) { ++ run_task_queue(&tq_disk); ++ flushcnt = 0; ++ } ++ ++ /* request not completely processed yet */ + continue; + } +- loop_handle_bh(lo, bh); ++ if (lo->lo_flags & LO_FLAGS_DO_BMAP) { ++ /* request is for file backed device */ ++ x = do_bh_filebacked(lo, bh, rw); ++ bh->b_reqnext = NULL; ++ bh->b_end_io(bh, !x); ++ } else { ++ /* device backed read has completed, do decrypt now */ ++ xbh = bh->b_private; ++ /* must not use bh->b_rsector as IV, as it may be modified by LVM at this point */ ++ /* instead, recompute IV from original request */ ++ md = bh_kmap(xbh); ++ x = lo_do_Transfer(lo, READ, bh->b_data, md, bh->b_size, xbh->b_rsector + LDE_lo_offs_sec - LDE_lo_iv_remove); ++ bh_kunmap(xbh); ++ xbh->b_reqnext = NULL; ++ xbh->b_end_io(xbh, !x); ++ loop_put_buffer(lo, bh); ++ } + + /* +- * upped both for pending work and tear-down, lo_pending ++ * woken both for pending work and tear-down, lo_pending + * will hit zero then + */ + if (atomic_dec_and_test(&lo->lo_pending)) +@@ -616,15 +862,34 @@ static int loop_thread(void *data) + return 0; + } + ++static void loop_set_softblksz(struct loop_device *lo, kdev_t dev) ++{ ++ int bs = 0, x; ++ ++ if (blksize_size[MAJOR(lo->lo_device)]) ++ bs = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)]; ++ if (!bs) ++ bs = BLOCK_SIZE; ++ if (lo->lo_flags & LO_FLAGS_DO_BMAP) { ++ x = loop_sizes[lo->lo_number]; ++ if ((bs == 8192) && (x & 7)) ++ bs = 4096; ++ if ((bs == 4096) && (x & 3)) ++ bs = 2048; ++ if ((bs == 2048) && (x & 1)) ++ bs = 1024; ++ } ++ set_blocksize(dev, bs); ++} ++ + static int loop_set_fd(struct loop_device *lo, struct file *lo_file, kdev_t dev, + unsigned int arg) + { + struct file *file; + struct inode *inode; + kdev_t lo_device; +- int lo_flags = 0; ++ int lo_flags = 0, hardsz = 512; + int error; +- int bs; + + MOD_INC_USE_COUNT; + +@@ -643,33 +908,46 @@ static int loop_set_fd(struct loop_devic + if (!(file->f_mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + ++ LDE_lo_offset = LDE_lo_sizelimit = 0; ++ LDE_lo_offs_sec = LDE_lo_iv_remove = 0; ++ LDE_lo_bh_free = LDE_lo_bh_que2 = LDE_lo_bh_que1 = LDE_lo_bh_que0 = NULL; ++ LDE_lo_bh_need = LDE_lo_bh_flsh = 0; ++ init_waitqueue_head(&LDE_lo_bh_wait); + if (S_ISBLK(inode->i_mode)) { + lo_device = inode->i_rdev; + if (lo_device == dev) { + error = -EBUSY; + goto out_putf; + } ++ if (loop_prealloc_init(lo, 0)) { ++ error = -ENOMEM; ++ goto out_putf; ++ } ++ hardsz = get_hardsect_size(lo_device); + } else if (S_ISREG(inode->i_mode)) { +- struct address_space_operations *aops = inode->i_mapping->a_ops; + /* + * If we can't read - sorry. If we only can't write - well, + * it's going to be read-only. + */ +- if (!aops->readpage) ++ if (!file->f_op || !file->f_op->read) + goto out_putf; + +- if (!aops->prepare_write || !aops->commit_write) ++ if (!file->f_op->write) + lo_flags |= LO_FLAGS_READ_ONLY; + + lo_device = inode->i_dev; + lo_flags |= LO_FLAGS_DO_BMAP; ++ if (loop_prealloc_init(lo, 1)) { ++ error = -ENOMEM; ++ goto out_putf; ++ } + error = 0; + } else + goto out_putf; + + get_file(file); + +- if (IS_RDONLY (inode) || is_read_only(lo_device) ++ if ((S_ISREG(inode->i_mode) && IS_RDONLY(inode)) || is_read_only(lo_device) + || !(lo_file->f_mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + +@@ -677,28 +955,40 @@ static int loop_set_fd(struct loop_devic + + lo->lo_device = lo_device; + lo->lo_flags = lo_flags; ++ if(lo_flags & LO_FLAGS_READ_ONLY) ++ lo->lo_flags |= 0x200000; /* export to user space */ + lo->lo_backing_file = file; + lo->transfer = NULL; + lo->ioctl = NULL; +- figure_loop_size(lo); +- lo->old_gfp_mask = inode->i_mapping->gfp_mask; +- inode->i_mapping->gfp_mask &= ~(__GFP_IO|__GFP_FS); +- +- bs = 0; +- if (blksize_size[MAJOR(lo_device)]) +- bs = blksize_size[MAJOR(lo_device)][MINOR(lo_device)]; +- if (!bs) +- bs = BLOCK_SIZE; ++ if (figure_loop_size(lo)) { ++ error = -EFBIG; ++ goto out_cleanup; ++ } + +- set_blocksize(dev, bs); ++ if (lo_flags & LO_FLAGS_DO_BMAP) { ++ lo->old_gfp_mask = inode->i_mapping->gfp_mask; ++ inode->i_mapping->gfp_mask &= ~(__GFP_IO|__GFP_FS); ++ inode->i_mapping->gfp_mask |= __GFP_HIGH; ++ } else { ++ lo->old_gfp_mask = -1; ++ } + +- lo->lo_bh = lo->lo_bhtail = NULL; +- kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); +- down(&lo->lo_sem); ++ loop_hardsizes[MINOR(dev)] = hardsz; ++ loop_set_softblksz(lo, dev); + ++ error = kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); ++ if(error < 0) ++ goto out_mapping; ++ down(&lo->lo_sem); + fput(file); + return 0; + ++ out_mapping: ++ if(lo->old_gfp_mask != -1) ++ inode->i_mapping->gfp_mask = lo->old_gfp_mask; ++ out_cleanup: ++ loop_prealloc_cleanup(lo); ++ fput(file); + out_putf: + fput(file); + out: +@@ -711,6 +1001,7 @@ static int loop_release_xfer(struct loop + int err = 0; + if (lo->lo_encrypt_type) { + struct loop_func_table *xfer= xfer_funcs[lo->lo_encrypt_type]; ++ lo->transfer = NULL; + if (xfer && xfer->release) + err = xfer->release(lo); + if (xfer && xfer->unlock) +@@ -736,7 +1027,11 @@ static int loop_init_xfer(struct loop_de + return err; + } + ++#if LINUX_VERSION_CODE >= 0x2040C + static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) ++#else ++static int loop_clr_fd(struct loop_device *lo, kdev_t dev) ++#endif + { + struct file *filp = lo->lo_backing_file; + int gfp = lo->old_gfp_mask; +@@ -751,11 +1046,12 @@ static int loop_clr_fd(struct loop_devic + spin_lock_irq(&lo->lo_lock); + lo->lo_state = Lo_rundown; + if (atomic_dec_and_test(&lo->lo_pending)) +- up(&lo->lo_bh_mutex); ++ wake_up_interruptible(&LDE_lo_bh_wait); + spin_unlock_irq(&lo->lo_lock); + + down(&lo->lo_sem); + ++ loop_prealloc_cleanup(lo); + lo->lo_backing_file = NULL; + + loop_release_xfer(lo); +@@ -763,23 +1059,81 @@ static int loop_clr_fd(struct loop_devic + lo->ioctl = NULL; + lo->lo_device = 0; + lo->lo_encrypt_type = 0; +- lo->lo_offset = 0; ++ LDE_lo_offset = LDE_lo_sizelimit = 0; ++ LDE_lo_offs_sec = LDE_lo_iv_remove = 0; + lo->lo_encrypt_key_size = 0; + lo->lo_flags = 0; + memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); + memset(lo->lo_name, 0, LO_NAME_SIZE); ++ memset(LDE_lo_crypt_name, 0, LO_NAME_SIZE); + loop_sizes[lo->lo_number] = 0; ++#if LINUX_VERSION_CODE >= 0x2040C + invalidate_bdev(bdev, 0); +- filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp; ++#else ++ invalidate_buffers(dev); ++#endif ++ if (gfp != -1) ++ filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp; + lo->lo_state = Lo_unbound; + fput(filp); + MOD_DEC_USE_COUNT; + return 0; + } + +-static int loop_set_status(struct loop_device *lo, struct loop_info *arg) ++static void ++loop_info64_from_old(const struct loop_info *info, struct loopinfo64 *info64) ++{ ++ memset(info64, 0, sizeof(*info64)); ++ info64->lo_number = info->lo_number; ++ info64->lo_device = info->lo_device; ++ info64->lo_inode = info->lo_inode; ++ info64->lo_rdevice = info->lo_rdevice; ++ info64->lo_offset = info->lo_offset; ++ info64->lo_encrypt_type = info->lo_encrypt_type; ++ info64->lo_encrypt_key_size = info->lo_encrypt_key_size; ++ info64->lo_flags = info->lo_flags; ++ info64->lo_init[0] = info->lo_init[0]; ++ info64->lo_init[1] = info->lo_init[1]; ++ if (info->lo_encrypt_type == 18) /* LO_CRYPT_CRYPTOAPI */ ++ memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); ++ else ++ memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); ++ memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); ++} ++ ++static int ++loop_info64_to_old(struct loopinfo64 *info64, struct loop_info *info) ++{ ++ memset(info, 0, sizeof(*info)); ++ info->lo_number = info64->lo_number; ++ info->lo_device = info64->lo_device; ++ info->lo_inode = info64->lo_inode; ++ info->lo_rdevice = info64->lo_rdevice; ++ info->lo_offset = info64->lo_offset; ++ info->lo_encrypt_type = info64->lo_encrypt_type; ++ info->lo_encrypt_key_size = info64->lo_encrypt_key_size; ++ info->lo_flags = info64->lo_flags; ++ info->lo_init[0] = info64->lo_init[0]; ++ info->lo_init[1] = info64->lo_init[1]; ++ if (info->lo_encrypt_type == 18) /* LO_CRYPT_CRYPTOAPI */ ++ memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); ++ else ++ memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); ++ memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); ++ ++ /* error in case values were truncated */ ++ if (info->lo_device != info64->lo_device || ++ info->lo_rdevice != info64->lo_rdevice || ++ info->lo_inode != info64->lo_inode || ++ info->lo_offset != info64->lo_offset || ++ info64->lo_sizelimit) ++ return -EOVERFLOW; ++ ++ return 0; ++} ++ ++static int loop_set_status(struct loop_device *lo, kdev_t dev, struct loopinfo64 *info, struct loop_info *oldinfo) + { +- struct loop_info info; + int err; + unsigned int type; + +@@ -788,62 +1142,137 @@ static int loop_set_status(struct loop_d + return -EPERM; + if (lo->lo_state != Lo_bound) + return -ENXIO; +- if (copy_from_user(&info, arg, sizeof (struct loop_info))) +- return -EFAULT; +- if ((unsigned int) info.lo_encrypt_key_size > LO_KEY_SIZE) ++ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; +- type = info.lo_encrypt_type; ++ type = info->lo_encrypt_type; + if (type >= MAX_LO_CRYPT || xfer_funcs[type] == NULL) + return -EINVAL; +- if (type == LO_CRYPT_XOR && info.lo_encrypt_key_size == 0) ++ if (type == LO_CRYPT_XOR && info->lo_encrypt_key_size == 0) + return -EINVAL; + err = loop_release_xfer(lo); +- if (!err) +- err = loop_init_xfer(lo, type, &info); + if (err) + return err; + +- lo->lo_offset = info.lo_offset; +- strncpy(lo->lo_name, info.lo_name, LO_NAME_SIZE); ++ if ((loff_t)info->lo_offset < 0) { ++ /* negative offset == remove offset from IV computations */ ++ LDE_lo_offset = -(info->lo_offset); ++ LDE_lo_iv_remove = LDE_lo_offset >> 9; ++ } else { ++ /* positive offset == include offset in IV computations */ ++ LDE_lo_offset = info->lo_offset; ++ LDE_lo_iv_remove = 0; ++ } ++ LDE_lo_offs_sec = LDE_lo_offset >> 9; ++ LDE_lo_sizelimit = info->lo_sizelimit; ++ err = figure_loop_size(lo); ++ if (err) ++ return err; ++ loop_set_softblksz(lo, dev); ++ ++ /* transfer init function for 2.4 kernels takes old style struct */ ++ err = loop_init_xfer(lo, type, oldinfo); ++ /* copy key -- just in case transfer init func modified it */ ++ memcpy(info->lo_encrypt_key, oldinfo->lo_encrypt_key, sizeof(info->lo_encrypt_key)); ++ if (err) ++ return err; + ++ strncpy(lo->lo_name, info->lo_file_name, LO_NAME_SIZE); ++ strncpy(LDE_lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); + lo->transfer = xfer_funcs[type]->transfer; + lo->ioctl = xfer_funcs[type]->ioctl; +- lo->lo_encrypt_key_size = info.lo_encrypt_key_size; +- lo->lo_init[0] = info.lo_init[0]; +- lo->lo_init[1] = info.lo_init[1]; +- if (info.lo_encrypt_key_size) { +- memcpy(lo->lo_encrypt_key, info.lo_encrypt_key, +- info.lo_encrypt_key_size); ++ lo->lo_encrypt_key_size = info->lo_encrypt_key_size; ++ lo->lo_init[0] = info->lo_init[0]; ++ lo->lo_init[1] = info->lo_init[1]; ++ if (info->lo_encrypt_key_size) { ++ memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, ++ info->lo_encrypt_key_size); + lo->lo_key_owner = current->uid; +- } +- figure_loop_size(lo); ++ } ++ + return 0; + } + +-static int loop_get_status(struct loop_device *lo, struct loop_info *arg) ++static int loop_get_status(struct loop_device *lo, struct loopinfo64 *info) + { +- struct loop_info info; + struct file *file = lo->lo_backing_file; + + if (lo->lo_state != Lo_bound) + return -ENXIO; +- if (!arg) +- return -EINVAL; +- memset(&info, 0, sizeof(info)); +- info.lo_number = lo->lo_number; +- info.lo_device = kdev_t_to_nr(file->f_dentry->d_inode->i_dev); +- info.lo_inode = file->f_dentry->d_inode->i_ino; +- info.lo_rdevice = kdev_t_to_nr(lo->lo_device); +- info.lo_offset = lo->lo_offset; +- info.lo_flags = lo->lo_flags; +- strncpy(info.lo_name, lo->lo_name, LO_NAME_SIZE); +- info.lo_encrypt_type = lo->lo_encrypt_type; ++ memset(info, 0, sizeof(*info)); ++ info->lo_number = lo->lo_number; ++ info->lo_device = kdev_t_to_nr(file->f_dentry->d_inode->i_dev); ++ info->lo_inode = file->f_dentry->d_inode->i_ino; ++ info->lo_rdevice = kdev_t_to_nr(lo->lo_device); ++ info->lo_offset = LDE_lo_iv_remove ? -(LDE_lo_offset) : LDE_lo_offset; ++ info->lo_sizelimit = LDE_lo_sizelimit; ++ info->lo_flags = lo->lo_flags; ++ strncpy(info->lo_file_name, lo->lo_name, LO_NAME_SIZE); ++ strncpy(info->lo_crypt_name, LDE_lo_crypt_name, LO_NAME_SIZE); ++ info->lo_encrypt_type = lo->lo_encrypt_type; + if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { +- info.lo_encrypt_key_size = lo->lo_encrypt_key_size; +- memcpy(info.lo_encrypt_key, lo->lo_encrypt_key, ++ info->lo_encrypt_key_size = lo->lo_encrypt_key_size; ++ memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, + lo->lo_encrypt_key_size); ++ info->lo_init[0] = lo->lo_init[0]; ++ info->lo_init[1] = lo->lo_init[1]; + } +- return copy_to_user(arg, &info, sizeof(info)) ? -EFAULT : 0; ++ return 0; ++} ++ ++static int ++loop_set_status_n(struct loop_device *lo, kdev_t dev, void *arg, int n) ++{ ++ struct loop_info info; ++ struct loopinfo64 info64; ++ int err; ++ ++ if (n) { ++ if (copy_from_user(&info64, arg, sizeof (struct loopinfo64))) ++ return -EFAULT; ++ /* truncation errors can be ignored here as transfer init func only wants key bits */ ++ loop_info64_to_old(&info64, &info); ++ } else { ++ if (copy_from_user(&info, arg, sizeof (struct loop_info))) ++ return -EFAULT; ++ loop_info64_from_old(&info, &info64); ++ } ++ err = loop_set_status(lo, dev, &info64, &info); ++ memset(&info.lo_encrypt_key[0], 0, sizeof(info.lo_encrypt_key)); ++ memset(&info64.lo_encrypt_key[0], 0, sizeof(info64.lo_encrypt_key)); ++ return err; ++} ++ ++static int ++loop_get_status_old(struct loop_device *lo, struct loop_info *arg) { ++ struct loop_info info; ++ struct loopinfo64 info64; ++ int err = 0; ++ ++ if (!arg) ++ err = -EINVAL; ++ if (!err) ++ err = loop_get_status(lo, &info64); ++ if (!err) ++ err = loop_info64_to_old(&info64, &info); ++ if (!err && copy_to_user(arg, &info, sizeof(info))) ++ err = -EFAULT; ++ ++ return err; ++} ++ ++static int ++loop_get_status64(struct loop_device *lo, struct loopinfo64 *arg) { ++ struct loopinfo64 info64; ++ int err = 0; ++ ++ if (!arg) ++ err = -EINVAL; ++ if (!err) ++ err = loop_get_status(lo, &info64); ++ if (!err && copy_to_user(arg, &info64, sizeof(info64))) ++ err = -EFAULT; ++ ++ return err; + } + + static int lo_ioctl(struct inode * inode, struct file * file, +@@ -862,20 +1291,30 @@ static int lo_ioctl(struct inode * inode + dev = MINOR(inode->i_rdev); + if (dev >= max_loop) + return -ENODEV; +- lo = &loop_dev[dev]; ++ lo = (struct loop_device *)(&loop_dev[dev]); + down(&lo->lo_ctl_mutex); + switch (cmd) { + case LOOP_SET_FD: + err = loop_set_fd(lo, file, inode->i_rdev, arg); + break; + case LOOP_CLR_FD: ++#if LINUX_VERSION_CODE >= 0x2040C + err = loop_clr_fd(lo, inode->i_bdev); ++#else ++ err = loop_clr_fd(lo, inode->i_rdev); ++#endif + break; + case LOOP_SET_STATUS: +- err = loop_set_status(lo, (struct loop_info *) arg); ++ err = loop_set_status_n(lo, inode->i_rdev, (void *) arg, 0); + break; + case LOOP_GET_STATUS: +- err = loop_get_status(lo, (struct loop_info *) arg); ++ err = loop_get_status_old(lo, (struct loop_info *) arg); ++ break; ++ case LOOP_SET_STATUS64: ++ err = loop_set_status_n(lo, inode->i_rdev, (void *) arg, 1); ++ break; ++ case LOOP_GET_STATUS64: ++ err = loop_get_status64(lo, (struct loopinfo64 *) arg); + break; + case BLKGETSIZE: + if (lo->lo_state != Lo_bound) { +@@ -884,6 +1323,7 @@ static int lo_ioctl(struct inode * inode + } + err = put_user((unsigned long)loop_sizes[lo->lo_number] << 1, (unsigned long *) arg); + break; ++#if defined(BLKGETSIZE64) + case BLKGETSIZE64: + if (lo->lo_state != Lo_bound) { + err = -ENXIO; +@@ -891,9 +1331,18 @@ static int lo_ioctl(struct inode * inode + } + err = put_user((u64)loop_sizes[lo->lo_number] << 10, (u64*)arg); + break; ++#endif ++#if defined(BLKBSZGET) + case BLKBSZGET: ++#endif ++#if defined(BLKBSZSET) + case BLKBSZSET: ++#endif ++#if defined(BLKSSZGET) + case BLKSSZGET: ++#endif ++ case BLKROGET: ++ case BLKROSET: + err = blk_ioctl(inode->i_rdev, cmd, arg); + break; + default: +@@ -906,7 +1355,7 @@ static int lo_ioctl(struct inode * inode + static int lo_open(struct inode *inode, struct file *file) + { + struct loop_device *lo; +- int dev, type; ++ int dev; + + if (!inode) + return -EINVAL; +@@ -918,13 +1367,9 @@ static int lo_open(struct inode *inode, + if (dev >= max_loop) + return -ENODEV; + +- lo = &loop_dev[dev]; ++ lo = (struct loop_device *)(&loop_dev[dev]); + MOD_INC_USE_COUNT; + down(&lo->lo_ctl_mutex); +- +- type = lo->lo_encrypt_type; +- if (type && xfer_funcs[type] && xfer_funcs[type]->lock) +- xfer_funcs[type]->lock(lo); + lo->lo_refcnt++; + up(&lo->lo_ctl_mutex); + return 0; +@@ -933,7 +1378,7 @@ static int lo_open(struct inode *inode, + static int lo_release(struct inode *inode, struct file *file) + { + struct loop_device *lo; +- int dev, type; ++ int dev; + + if (!inode) + return 0; +@@ -946,20 +1391,18 @@ static int lo_release(struct inode *inod + if (dev >= max_loop) + return 0; + +- lo = &loop_dev[dev]; ++ lo = (struct loop_device *)(&loop_dev[dev]); + down(&lo->lo_ctl_mutex); +- type = lo->lo_encrypt_type; + --lo->lo_refcnt; +- if (xfer_funcs[type] && xfer_funcs[type]->unlock) +- xfer_funcs[type]->unlock(lo); +- + up(&lo->lo_ctl_mutex); + MOD_DEC_USE_COUNT; + return 0; + } + + static struct block_device_operations lo_fops = { ++#if !defined(NO_BLOCK_DEVICE_OPERATIONS_OWNER) + owner: THIS_MODULE, ++#endif + open: lo_open, + release: lo_release, + ioctl: lo_ioctl, +@@ -970,11 +1413,13 @@ static struct block_device_operations lo + */ + MODULE_PARM(max_loop, "i"); + MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)"); ++#if defined(MODULE_LICENSE) + MODULE_LICENSE("GPL"); ++#endif + + int loop_register_transfer(struct loop_func_table *funcs) + { +- if ((unsigned)funcs->number > MAX_LO_CRYPT || xfer_funcs[funcs->number]) ++ if ((unsigned)funcs->number >= MAX_LO_CRYPT || xfer_funcs[funcs->number]) + return -EINVAL; + xfer_funcs[funcs->number] = funcs; + return 0; +@@ -983,15 +1428,15 @@ int loop_register_transfer(struct loop_f + int loop_unregister_transfer(int number) + { + struct loop_device *lo; ++ int x, type; + + if ((unsigned)number >= MAX_LO_CRYPT) + return -EINVAL; +- for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) { +- int type = lo->lo_encrypt_type; ++ for (x = 0; x < max_loop; x++) { ++ lo = (struct loop_device *)(&loop_dev[x]); ++ type = lo->lo_encrypt_type; + if (type == number) { +- xfer_funcs[type]->release(lo); +- lo->transfer = NULL; +- lo->lo_encrypt_type = 0; ++ loop_release_xfer(lo); + } + } + xfer_funcs[number] = NULL; +@@ -1017,10 +1462,9 @@ int __init loop_init(void) + return -EIO; + } + +- +- loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL); ++ loop_dev = kmalloc(max_loop * sizeof(LoDevExt), GFP_KERNEL); + if (!loop_dev) +- return -ENOMEM; ++ goto out_dev; + + loop_sizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL); + if (!loop_sizes) +@@ -1030,25 +1474,40 @@ int __init loop_init(void) + if (!loop_blksizes) + goto out_blksizes; + ++ loop_hardsizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL); ++ if (!loop_hardsizes) ++ goto out_hardsizes; ++ + blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request); + + for (i = 0; i < max_loop; i++) { +- struct loop_device *lo = &loop_dev[i]; +- memset(lo, 0, sizeof(struct loop_device)); ++ struct loop_device *lo = (struct loop_device *)(&loop_dev[i]); ++ memset(lo, 0, sizeof(LoDevExt)); + init_MUTEX(&lo->lo_ctl_mutex); + init_MUTEX_LOCKED(&lo->lo_sem); +- init_MUTEX_LOCKED(&lo->lo_bh_mutex); + lo->lo_number = i; + spin_lock_init(&lo->lo_lock); + } + + memset(loop_sizes, 0, max_loop * sizeof(int)); + memset(loop_blksizes, 0, max_loop * sizeof(int)); ++ memset(loop_hardsizes, 0, max_loop * sizeof(int)); + blk_size[MAJOR_NR] = loop_sizes; + blksize_size[MAJOR_NR] = loop_blksizes; ++ hardsect_size[MAJOR_NR] = loop_hardsizes; + for (i = 0; i < max_loop; i++) + register_disk(NULL, MKDEV(MAJOR_NR, i), 1, &lo_fops, 0); + ++ { extern int init_module_aes(void); init_module_aes(); } ++ for (i = 0; i < (sizeof(lo_prealloc) / sizeof(int)); i += 2) { ++ if (!lo_prealloc[i]) ++ continue; ++ if (lo_prealloc[i] < LO_PREALLOC_MIN) ++ lo_prealloc[i] = LO_PREALLOC_MIN; ++ if (lo_prealloc[i] > LO_PREALLOC_MAX) ++ lo_prealloc[i] = LO_PREALLOC_MAX; ++ } ++ + devfs_handle = devfs_mk_dir(NULL, "loop", NULL); + devfs_register_series(devfs_handle, "%u", max_loop, DEVFS_FL_DEFAULT, + MAJOR_NR, 0, +@@ -1058,10 +1517,13 @@ int __init loop_init(void) + printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop); + return 0; + ++out_hardsizes: ++ kfree(loop_blksizes); + out_blksizes: + kfree(loop_sizes); + out_sizes: + kfree(loop_dev); ++out_dev: + if (devfs_unregister_blkdev(MAJOR_NR, "loop")) + printk(KERN_WARNING "loop: cannot unregister blkdev\n"); + printk(KERN_ERR "loop: ran out of memory\n"); +@@ -1070,12 +1532,18 @@ out_sizes: + + void loop_exit(void) + { ++ { extern void cleanup_module_aes(void); cleanup_module_aes(); } + devfs_unregister(devfs_handle); + if (devfs_unregister_blkdev(MAJOR_NR, "loop")) + printk(KERN_WARNING "loop: cannot unregister blkdev\n"); ++ ++ blk_size[MAJOR_NR] = 0; ++ blksize_size[MAJOR_NR] = 0; ++ hardsect_size[MAJOR_NR] = 0; + kfree(loop_dev); + kfree(loop_sizes); + kfree(loop_blksizes); ++ kfree(loop_hardsizes); + } + + module_init(loop_init); +@@ -1090,3 +1558,10 @@ static int __init max_loop_setup(char *s + + __setup("max_loop=", max_loop_setup); + #endif ++ ++extern void loop_compute_sector_iv(int, u_int32_t *); ++EXPORT_SYMBOL(loop_compute_sector_iv); ++extern void loop_compute_md5_iv(int, u_int32_t *, u_int32_t *); ++EXPORT_SYMBOL(loop_compute_md5_iv); ++extern void md5_transform_CPUbyteorder(u_int32_t *, u_int32_t const *); ++EXPORT_SYMBOL_NOVERS(md5_transform_CPUbyteorder); diff --git a/package/blindcoder/loop-aes/linux26_cryptoloop.diff b/package/blindcoder/loop-aes/linux26_cryptoloop.diff new file mode 100644 index 000000000..ed1942fac --- /dev/null +++ b/package/blindcoder/loop-aes/linux26_cryptoloop.diff @@ -0,0 +1,1943 @@ +--- linux-2.6.4/drivers/block/loop.c 2004-03-11 03:55:29.000000000 +0100 ++++ linux-2.6.4/drivers/block/loop.c 2004-02-08 16:51:25.000000000 +0100 +@@ -2,7 +2,7 @@ + * linux/drivers/block/loop.c + * + * Written by Theodore Ts'o, 3/29/93 +- * ++ * + * Copyright 1993 by Theodore Ts'o. Redistribution of this file is + * permitted under the GNU General Public License. + * +@@ -21,12 +21,12 @@ + * Loadable modules and other fixes by AK, 1998 + * + * Make real block number available to downstream transfer functions, enables +- * CBC (and relatives) mode encryption requiring unique IVs per data block. ++ * CBC (and relatives) mode encryption requiring unique IVs per data block. + * Reed H. Petty, rhp@draper.net + * + * Maximum number of loop devices now dynamic via max_loop module parameter. + * Russell Kroll 19990701 +- * ++ * + * Maximum number of loop devices when compiled-in now selectable by passing + * max_loop=<1-255> to the kernel on boot. + * Erik I. Bolsų, , Oct 31, 1999 +@@ -39,18 +39,43 @@ + * Support up to 256 loop devices + * Heinz Mauelshagen , Feb 2002 + * +- * Still To Fix: +- * - Advisory locking is ignored here. +- * - Should use an own CAP_* category instead of CAP_SYS_ADMIN ++ * IV is now passed as (512 byte) sector number. ++ * Jari Ruusu, May 18 2001 + * +- */ ++ * External encryption module locking bug fixed. ++ * Ingo Rohloff , June 21 2001 ++ * ++ * Make device backed loop work with swap (pre-allocated buffers + queue rewrite). ++ * Jari Ruusu, September 2 2001 ++ * ++ * Ported 'pre-allocated buffers + queue rewrite' to BIO for 2.5 kernels ++ * Ben Slusky , March 1 2002 ++ * Jari Ruusu, March 27 2002 ++ * ++ * File backed code now uses file->f_op->read/write. Based on Andrew Morton's idea. ++ * Jari Ruusu, May 23 2002 ++ * ++ * Exported hard sector size correctly, fixed file-backed-loop-on-tmpfs bug, ++ * plus many more enhancements and optimizations. ++ * Adam J. Richter , Aug 2002 ++ * ++ * Added support for removing offset from IV computations. ++ * Jari Ruusu, September 21 2003 ++ * ++ * ++ * Still To Fix: ++ * - Advisory locking is ignored here. ++ * - Should use an own CAP_* category instead of CAP_SYS_ADMIN ++ */ + ++#include + #include + #include + + #include + #include + #include ++#include + #include + #include + #include +@@ -69,41 +94,34 @@ + + #include + ++#if !defined(LO_FLAGS_DO_BMAP) ++# define LO_FLAGS_DO_BMAP 0x80000 ++#endif ++#if !defined(LO_FLAGS_READ_ONLY) ++# define LO_FLAGS_READ_ONLY 0x40000 ++#endif ++ + static int max_loop = 8; +-static struct loop_device *loop_dev; + static struct gendisk **disks; + + /* + * Transfer functions + */ +-static int transfer_none(struct loop_device *lo, int cmd, +- struct page *raw_page, unsigned raw_off, +- struct page *loop_page, unsigned loop_off, +- int size, sector_t real_block) ++static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf, ++ char *loop_buf, int size, sector_t real_block) + { +- char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; +- char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; +- +- if (cmd == READ) +- memcpy(loop_buf, raw_buf, size); +- else +- memcpy(raw_buf, loop_buf, size); ++ /* this code is only called from file backed loop */ ++ /* and that code expects this function to be no-op */ + +- kunmap_atomic(raw_buf, KM_USER0); +- kunmap_atomic(loop_buf, KM_USER1); + cond_resched(); + return 0; + } + +-static int transfer_xor(struct loop_device *lo, int cmd, +- struct page *raw_page, unsigned raw_off, +- struct page *loop_page, unsigned loop_off, +- int size, sector_t real_block) +-{ +- char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; +- char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; +- char *in, *out, *key; +- int i, keysize; ++static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf, ++ char *loop_buf, int size, sector_t real_block) ++{ ++ char *in, *out, *key; ++ int i, keysize; + + if (cmd == READ) { + in = raw_buf; +@@ -117,324 +135,611 @@ static int transfer_xor(struct loop_devi + keysize = lo->lo_encrypt_key_size; + for (i = 0; i < size; i++) + *out++ = *in++ ^ key[(i & 511) % keysize]; +- +- kunmap_atomic(raw_buf, KM_USER0); +- kunmap_atomic(loop_buf, KM_USER1); + cond_resched(); + return 0; + } + +-static int xor_init(struct loop_device *lo, const struct loop_info64 *info) ++static int xor_init(struct loop_device *lo, struct loop_info64 *info) + { + if (info->lo_encrypt_key_size <= 0) + return -EINVAL; + return 0; + } + +-static struct loop_func_table none_funcs = { ++static struct loop_func_table none_funcs = { + .number = LO_CRYPT_NONE, +- .transfer = transfer_none, +-}; ++ .transfer = (void *)transfer_none, ++}; + +-static struct loop_func_table xor_funcs = { ++static struct loop_func_table xor_funcs = { + .number = LO_CRYPT_XOR, +- .transfer = transfer_xor, +- .init = xor_init +-}; ++ .transfer = (void *)transfer_xor, ++ .init = (void *)xor_init, ++}; + +-/* xfer_funcs[0] is special - its release function is never called */ ++/* xfer_funcs[0] is special - its release function is never called */ + static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { + &none_funcs, +- &xor_funcs ++ &xor_funcs, + }; + +-static int +-figure_loop_size(struct loop_device *lo) +-{ +- loff_t size, offset, loopsize; +- sector_t x; ++/* ++ * First number of 'lo_prealloc' is the default number of RAM pages ++ * to pre-allocate for each device backed loop. Every (configured) ++ * device backed loop pre-allocates this amount of RAM pages unless ++ * later 'lo_prealloc' numbers provide an override. 'lo_prealloc' ++ * overrides are defined in pairs: loop_index,number_of_pages ++ */ ++static int lo_prealloc[9] = { 125, -1, 0, -1, 0, -1, 0, -1, 0 }; ++#define LO_PREALLOC_MIN 4 /* minimum user defined pre-allocated RAM pages */ ++#define LO_PREALLOC_MAX 512 /* maximum user defined pre-allocated RAM pages */ + +- /* Compute loopsize in bytes */ +- size = i_size_read(lo->lo_backing_file->f_mapping->host); +- offset = lo->lo_offset; +- loopsize = size - offset; +- if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) +- loopsize = lo->lo_sizelimit; ++MODULE_PARM(lo_prealloc, "1-9i"); ++MODULE_PARM_DESC(lo_prealloc, "Number of pre-allocated pages [,index,pages]..."); + +- /* +- * Unfortunately, if we want to do I/O on the device, +- * the number of 512-byte sectors has to fit into a sector_t. +- */ +- size = loopsize >> 9; +- x = (sector_t)size; ++/* ++ * This is loop helper thread nice value in range ++ * from 0 (low priority) to -20 (high priority). ++ */ ++static int lo_nice = -1; + +- if ((loff_t)x != size) +- return -EFBIG; ++MODULE_PARM(lo_nice, "1i"); ++MODULE_PARM_DESC(lo_nice, "Loop thread scheduler nice (0 ... -20)"); + +- set_capacity(disks[lo->lo_number], x); +- return 0; +-} ++struct loop_bio_extension { ++ struct bio *bioext_merge; ++ struct loop_device *bioext_loop; ++ sector_t bioext_iv; ++ int bioext_index; ++ int bioext_size; ++}; ++ ++typedef struct { ++ struct loop_device lo_orig; ++ struct bio *lo_bio_que0; ++ struct bio *lo_bio_que1; ++ struct bio *lo_bio_que2; ++ struct bio *lo_bio_free0; ++ struct bio *lo_bio_free1; ++ atomic_t lo_bio_barr; ++ int lo_bio_flsh; ++ int lo_bio_need; ++ wait_queue_head_t lo_bio_wait; ++ sector_t lo_offs_sec; ++ sector_t lo_iv_remove; ++} LoDevExt; ++static struct loop_device **loop_dev_ptr_arr; ++ ++#define LDE_lo_bio_que0 (((LoDevExt *)lo)->lo_bio_que0) ++#define LDE_lo_bio_que1 (((LoDevExt *)lo)->lo_bio_que1) ++#define LDE_lo_bio_que2 (((LoDevExt *)lo)->lo_bio_que2) ++#define LDE_lo_bio_free0 (((LoDevExt *)lo)->lo_bio_free0) ++#define LDE_lo_bio_free1 (((LoDevExt *)lo)->lo_bio_free1) ++#define LDE_lo_bio_barr (((LoDevExt *)lo)->lo_bio_barr) ++#define LDE_lo_bio_flsh (((LoDevExt *)lo)->lo_bio_flsh) ++#define LDE_lo_bio_need (((LoDevExt *)lo)->lo_bio_need) ++#define LDE_lo_bio_wait (((LoDevExt *)lo)->lo_bio_wait) ++#define LDE_lo_offs_sec (((LoDevExt *)lo)->lo_offs_sec) ++#define LDE_lo_iv_remove (((LoDevExt *)lo)->lo_iv_remove) + +-static inline int +-lo_do_transfer(struct loop_device *lo, int cmd, +- struct page *rpage, unsigned roffs, +- struct page *lpage, unsigned loffs, +- int size, sector_t rblock) ++static void loop_prealloc_cleanup(struct loop_device *lo) + { +- if (!lo->transfer) +- return 0; ++ struct bio *bio; + +- return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); ++ while ((bio = LDE_lo_bio_free0)) { ++ LDE_lo_bio_free0 = bio->bi_next; ++ __free_page(bio->bi_io_vec[0].bv_page); ++ kfree(bio->bi_private); ++ bio->bi_next = NULL; ++ bio_put(bio); ++ } ++ while ((bio = LDE_lo_bio_free1)) { ++ LDE_lo_bio_free1 = bio->bi_next; ++ /* bi_flags was used for other purpose */ ++ bio->bi_flags = 0; ++ /* bi_cnt was used for other purpose */ ++ atomic_set(&bio->bi_cnt, 1); ++ bio->bi_next = NULL; ++ bio_put(bio); ++ } + } + +-static int +-do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) ++static int loop_prealloc_init(struct loop_device *lo, int y) + { +- struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ +- struct address_space *mapping = file->f_mapping; +- struct address_space_operations *aops = mapping->a_ops; +- struct page *page; +- pgoff_t index; +- unsigned size, offset, bv_offs; +- int len; +- int ret = 0; ++ struct bio *bio; ++ int x; + +- down(&mapping->host->i_sem); +- index = pos >> PAGE_CACHE_SHIFT; +- offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1); +- bv_offs = bvec->bv_offset; +- len = bvec->bv_len; +- while (len > 0) { +- sector_t IV; +- int transfer_result; +- +- IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9); +- +- size = PAGE_CACHE_SIZE - offset; +- if (size > len) +- size = len; +- +- page = grab_cache_page(mapping, index); +- if (!page) +- goto fail; +- if (aops->prepare_write(file, page, offset, offset+size)) +- goto unlock; +- transfer_result = lo_do_transfer(lo, WRITE, page, offset, +- bvec->bv_page, bv_offs, +- size, IV); +- if (transfer_result) { +- char *kaddr; +- +- /* +- * The transfer failed, but we still write the data to +- * keep prepare/commit calls balanced. +- */ +- printk(KERN_ERR "loop: transfer error block %llu\n", +- (unsigned long long)index); +- kaddr = kmap_atomic(page, KM_USER0); +- memset(kaddr + offset, 0, size); +- kunmap_atomic(kaddr, KM_USER0); ++ if(!y) { ++ y = lo_prealloc[0]; ++ for (x = 1; x < (sizeof(lo_prealloc) / sizeof(int)); x += 2) { ++ if (lo_prealloc[x + 1] && (lo->lo_number == lo_prealloc[x])) { ++ y = lo_prealloc[x + 1]; ++ break; ++ } + } +- flush_dcache_page(page); +- if (aops->commit_write(file, page, offset, offset+size)) +- goto unlock; +- if (transfer_result) +- goto unlock; +- bv_offs += size; +- len -= size; +- offset = 0; +- index++; +- pos += size; +- unlock_page(page); +- page_cache_release(page); + } +- up(&mapping->host->i_sem); +-out: +- return ret; ++ LDE_lo_bio_flsh = (y * 3) / 4; + +-unlock: +- unlock_page(page); +- page_cache_release(page); +-fail: +- up(&mapping->host->i_sem); +- ret = -1; +- goto out; ++ for (x = 0; x < y; x++) { ++ bio = bio_alloc(GFP_KERNEL, 1); ++ if (!bio) { ++ fail1: ++ loop_prealloc_cleanup(lo); ++ return 1; ++ } ++ bio->bi_io_vec[0].bv_page = alloc_page(GFP_KERNEL); ++ if (!bio->bi_io_vec[0].bv_page) { ++ fail2: ++ bio->bi_next = NULL; ++ bio_put(bio); ++ goto fail1; ++ } ++ bio->bi_vcnt = 1; ++ bio->bi_private = kmalloc(sizeof(struct loop_bio_extension), GFP_KERNEL); ++ if (!bio->bi_private) ++ goto fail2; ++ bio->bi_next = LDE_lo_bio_free0; ++ LDE_lo_bio_free0 = bio; ++ ++ bio = bio_alloc(GFP_KERNEL, 1); ++ if (!bio) ++ goto fail1; ++ bio->bi_vcnt = 1; ++ bio->bi_next = LDE_lo_bio_free1; ++ LDE_lo_bio_free1 = bio; ++ } ++ return 0; + } + +-static int +-lo_send(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) ++static void loop_add_queue_last(struct loop_device *lo, struct bio *bio, struct bio **q) + { +- struct bio_vec *bvec; +- int i, ret = 0; ++ unsigned long flags; + +- bio_for_each_segment(bvec, bio, i) { +- ret = do_lo_send(lo, bvec, bsize, pos); +- if (ret < 0) +- break; +- pos += bvec->bv_len; ++ spin_lock_irqsave(&lo->lo_lock, flags); ++ if (*q) { ++ bio->bi_next = (*q)->bi_next; ++ (*q)->bi_next = bio; ++ } else { ++ bio->bi_next = bio; + } +- return ret; +-} ++ *q = bio; ++ spin_unlock_irqrestore(&lo->lo_lock, flags); + +-struct lo_read_data { +- struct loop_device *lo; +- struct page *page; +- unsigned offset; +- int bsize; +-}; ++ if (waitqueue_active(&LDE_lo_bio_wait)) ++ wake_up_interruptible(&LDE_lo_bio_wait); ++} + +-static int +-lo_read_actor(read_descriptor_t *desc, struct page *page, +- unsigned long offset, unsigned long size) ++static void loop_add_queue_first(struct loop_device *lo, struct bio *bio, struct bio **q) + { +- unsigned long count = desc->count; +- struct lo_read_data *p = (struct lo_read_data*)desc->buf; +- struct loop_device *lo = p->lo; +- sector_t IV; +- +- IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9); +- +- if (size > count) +- size = count; +- +- if (lo_do_transfer(lo, READ, page, offset, p->page, p->offset, size, IV)) { +- size = 0; +- printk(KERN_ERR "loop: transfer error block %ld\n", +- page->index); +- desc->error = -EINVAL; ++ spin_lock_irq(&lo->lo_lock); ++ if (*q) { ++ bio->bi_next = (*q)->bi_next; ++ (*q)->bi_next = bio; ++ } else { ++ bio->bi_next = bio; ++ *q = bio; + } +- +- desc->count = count - size; +- desc->written += size; +- p->offset += size; +- return size; ++ spin_unlock_irq(&lo->lo_lock); + } + +-static int +-do_lo_receive(struct loop_device *lo, +- struct bio_vec *bvec, int bsize, loff_t pos) ++static struct bio *loop_get_bio(struct loop_device *lo, int *list_nr) + { +- struct lo_read_data cookie; +- struct file *file; +- int retval; +- +- cookie.lo = lo; +- cookie.page = bvec->bv_page; +- cookie.offset = bvec->bv_offset; +- cookie.bsize = bsize; +- file = lo->lo_backing_file; +- retval = file->f_op->sendfile(file, &pos, bvec->bv_len, +- lo_read_actor, &cookie); +- return (retval < 0)? retval: 0; ++ struct bio *bio = NULL, *last; ++ ++ spin_lock_irq(&lo->lo_lock); ++ if ((last = LDE_lo_bio_que0)) { ++ bio = last->bi_next; ++ if (bio == last) ++ LDE_lo_bio_que0 = NULL; ++ else ++ last->bi_next = bio->bi_next; ++ bio->bi_next = NULL; ++ *list_nr = 0; ++ } else if ((last = LDE_lo_bio_que1)) { ++ bio = last->bi_next; ++ if (bio == last) ++ LDE_lo_bio_que1 = NULL; ++ else ++ last->bi_next = bio->bi_next; ++ bio->bi_next = NULL; ++ *list_nr = 1; ++ } else if ((last = LDE_lo_bio_que2)) { ++ bio = last->bi_next; ++ if (bio == last) ++ LDE_lo_bio_que2 = NULL; ++ else ++ last->bi_next = bio->bi_next; ++ bio->bi_next = NULL; ++ *list_nr = 2; ++ } ++ spin_unlock_irq(&lo->lo_lock); ++ return bio; + } + +-static int +-lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) ++static void loop_put_buffer(struct loop_device *lo, struct bio *b, int flist) + { +- struct bio_vec *bvec; +- int i, ret = 0; ++ unsigned long flags; ++ int wk; + +- bio_for_each_segment(bvec, bio, i) { +- ret = do_lo_receive(lo, bvec, bsize, pos); +- if (ret < 0) +- break; +- pos += bvec->bv_len; ++ spin_lock_irqsave(&lo->lo_lock, flags); ++ if(!flist) { ++ b->bi_next = LDE_lo_bio_free0; ++ LDE_lo_bio_free0 = b; ++ wk = LDE_lo_bio_need & 1; ++ } else { ++ b->bi_next = LDE_lo_bio_free1; ++ LDE_lo_bio_free1 = b; ++ wk = LDE_lo_bio_need & 2; + } +- return ret; ++ spin_unlock_irqrestore(&lo->lo_lock, flags); ++ ++ if (wk && waitqueue_active(&LDE_lo_bio_wait)) ++ wake_up_interruptible(&LDE_lo_bio_wait); + } + +-static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) ++static int loop_end_io_transfer(struct bio *bio, unsigned int bytes_done, int err) + { +- loff_t pos; +- int ret; ++ struct loop_bio_extension *extension = bio->bi_private; ++ struct bio *merge = extension->bioext_merge; ++ struct loop_device *lo = extension->bioext_loop; ++ struct bio *origbio = merge->bi_private; + +- pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; +- if (bio_rw(bio) == WRITE) +- ret = lo_send(lo, bio, lo->lo_blocksize, pos); +- else +- ret = lo_receive(lo, bio, lo->lo_blocksize, pos); +- return ret; ++ if (err) ++ clear_bit(0, &merge->bi_flags); ++ if (bio->bi_size) ++ return 1; ++ if (bio_rw(bio) == WRITE) { ++ loop_put_buffer(lo, bio, 0); ++ if (!atomic_dec_and_test(&merge->bi_cnt)) ++ return 0; ++ if (bio_barrier(origbio)) ++ atomic_dec(&LDE_lo_bio_barr); ++ origbio->bi_next = NULL; ++ bio_endio(origbio, origbio->bi_size, test_bit(0, &merge->bi_flags) ? 0 : -EIO); ++ loop_put_buffer(lo, merge, 1); ++ if (atomic_dec_and_test(&lo->lo_pending)) ++ wake_up_interruptible(&LDE_lo_bio_wait); ++ } else { ++ loop_add_queue_last(lo, bio, &LDE_lo_bio_que0); ++ } ++ return 0; + } + +-/* +- * Add bio to back of pending list +- */ +-static void loop_add_bio(struct loop_device *lo, struct bio *bio) ++static struct bio *loop_get_buffer(struct loop_device *lo, ++ struct bio *orig_bio, int from_thread, struct bio **merge_ptr) + { ++ struct bio *bio = NULL, *merge = *merge_ptr; ++ struct loop_bio_extension *extension; + unsigned long flags; ++ int len; ++ ++ /* ++ * If called from make_request and if there are unprocessed ++ * barrier requests, fail allocation so that request is ++ * inserted to end of no-merge-allocated list. This guarantees ++ * FIFO processing order of requests. ++ */ ++ if (!from_thread && atomic_read(&LDE_lo_bio_barr)) ++ return NULL; + + spin_lock_irqsave(&lo->lo_lock, flags); +- if (lo->lo_biotail) { +- lo->lo_biotail->bi_next = bio; +- lo->lo_biotail = bio; +- } else +- lo->lo_bio = lo->lo_biotail = bio; ++ if (!merge) { ++ merge = LDE_lo_bio_free1; ++ if (merge) { ++ LDE_lo_bio_free1 = merge->bi_next; ++ if (from_thread) ++ LDE_lo_bio_need = 0; ++ } else { ++ if (from_thread) ++ LDE_lo_bio_need = 2; ++ } ++ } ++ ++ /* ++ * If there are unprocessed barrier requests and a merge-bio was just ++ * allocated, do not allocate a buffer-bio yet. This causes request ++ * to be moved from head of no-merge-allocated list to end of ++ * merge-allocated list. This guarantees FIFO processing order ++ * of requests. ++ */ ++ if (merge && (*merge_ptr || !atomic_read(&LDE_lo_bio_barr))) { ++ bio = LDE_lo_bio_free0; ++ if (bio) { ++ LDE_lo_bio_free0 = bio->bi_next; ++ if (from_thread) ++ LDE_lo_bio_need = 0; ++ } else { ++ if (from_thread) ++ LDE_lo_bio_need = 1; ++ } ++ } + spin_unlock_irqrestore(&lo->lo_lock, flags); + +- up(&lo->lo_bh_mutex); ++ if (!(*merge_ptr) && merge) { ++ /* ++ * initialize "merge-bio" which is used as ++ * rendezvous point among multiple vecs ++ */ ++ *merge_ptr = merge; ++ merge->bi_sector = orig_bio->bi_sector + LDE_lo_offs_sec; ++ set_bit(0, &merge->bi_flags); ++ merge->bi_idx = orig_bio->bi_idx; ++ atomic_set(&merge->bi_cnt, orig_bio->bi_vcnt - orig_bio->bi_idx); ++ merge->bi_private = orig_bio; ++ } ++ ++ if (!bio) ++ return NULL; ++ ++ /* ++ * initialize one page "buffer-bio" ++ */ ++ bio->bi_sector = merge->bi_sector; ++ bio->bi_next = NULL; ++ bio->bi_bdev = lo->lo_device; ++ bio->bi_flags = 0; ++ bio->bi_rw = orig_bio->bi_rw & ~(1 << BIO_RW_BARRIER); ++ if (bio_barrier(orig_bio) && ((merge->bi_idx == orig_bio->bi_idx) || (merge->bi_idx == (orig_bio->bi_vcnt - 1)))) ++ bio->bi_rw |= (1 << BIO_RW_BARRIER); ++ bio->bi_vcnt = 1; ++ bio->bi_idx = 0; ++ bio->bi_phys_segments = 0; ++ bio->bi_hw_segments = 0; ++ bio->bi_size = len = orig_bio->bi_io_vec[merge->bi_idx].bv_len; ++ /* bio->bi_max_vecs not touched */ ++ bio->bi_io_vec[0].bv_len = len; ++ bio->bi_io_vec[0].bv_offset = 0; ++ bio->bi_end_io = loop_end_io_transfer; ++ /* bio->bi_cnt not touched */ ++ /* bio->bi_private not touched */ ++ /* bio->bi_destructor not touched */ ++ ++ /* ++ * initialize "buffer-bio" extension. This extension is ++ * permanently glued to above "buffer-bio" via bio->bi_private ++ */ ++ extension = bio->bi_private; ++ extension->bioext_merge = merge; ++ extension->bioext_loop = lo; ++ extension->bioext_iv = merge->bi_sector - LDE_lo_iv_remove; ++ extension->bioext_index = merge->bi_idx; ++ extension->bioext_size = len; ++ ++ /* ++ * prepare "merge-bio" for next vec ++ */ ++ merge->bi_sector += len >> 9; ++ merge->bi_idx++; ++ ++ return bio; + } + +-/* +- * Grab first pending buffer +- */ +-static struct bio *loop_get_bio(struct loop_device *lo) ++static int figure_loop_size(struct loop_device *lo, struct block_device *bdev) + { +- struct bio *bio; ++ loff_t size, offs; ++ sector_t x; ++ int err = 0; + +- spin_lock_irq(&lo->lo_lock); +- if ((bio = lo->lo_bio)) { +- if (bio == lo->lo_biotail) +- lo->lo_biotail = NULL; +- lo->lo_bio = bio->bi_next; +- bio->bi_next = NULL; ++ size = i_size_read(lo->lo_backing_file->f_dentry->d_inode->i_mapping->host); ++ offs = lo->lo_offset; ++ if (!(lo->lo_flags & LO_FLAGS_DO_BMAP)) ++ offs &= ~((loff_t)511); ++ if ((offs > 0) && (offs < size)) { ++ size -= offs; ++ } else { ++ if (offs) ++ err = -EINVAL; ++ lo->lo_offset = 0; ++ LDE_lo_offs_sec = LDE_lo_iv_remove = 0; + } +- spin_unlock_irq(&lo->lo_lock); ++ if ((lo->lo_sizelimit > 0) && (lo->lo_sizelimit <= size)) { ++ size = lo->lo_sizelimit; ++ } else { ++ if (lo->lo_sizelimit) ++ err = -EINVAL; ++ lo->lo_sizelimit = 0; ++ } ++ size >>= 9; + +- return bio; ++ /* ++ * Unfortunately, if we want to do I/O on the device, ++ * the number of 512-byte sectors has to fit into a sector_t. ++ */ ++ x = (sector_t)size; ++ if ((loff_t)x != size) { ++ err = -EFBIG; ++ size = 0; ++ } ++ ++ bdev->bd_inode->i_size = size << 9; /* byte units */ ++ set_capacity(disks[lo->lo_number], size); /* 512 byte units */ ++ return err; ++} ++ ++static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf, ++ char *lbuf, int size, sector_t rblock) ++{ ++ if (!lo->transfer) ++ return 0; ++ ++ /* this ugly cast is needed to work around (possible) kmap damage in function prototype */ ++ /* should be: return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock); */ ++ return ((int (*)(struct loop_device *, int, char *, char *, int, sector_t))lo->transfer)(lo, cmd, rbuf, lbuf, size, rblock); ++} ++ ++static int loop_file_io(struct file *file, char *buf, int size, loff_t *ppos, int w) ++{ ++ mm_segment_t fs; ++ int x, y, z; ++ ++ y = 0; ++ do { ++ z = size - y; ++ fs = get_fs(); ++ set_fs(get_ds()); ++ if (w) { ++ x = file->f_op->write(file, buf + y, z, ppos); ++ set_fs(fs); ++ } else { ++ x = file->f_op->read(file, buf + y, z, ppos); ++ set_fs(fs); ++ if (!x) ++ return 1; ++ } ++ if (x < 0) { ++ if ((x == -EAGAIN) || (x == -ENOMEM) || (x == -ERESTART) || (x == -EINTR)) { ++ blk_run_queues(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(HZ / 2); ++ continue; ++ } ++ return 1; ++ } ++ y += x; ++ } while (y < size); ++ return 0; ++} ++ ++static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) ++{ ++ loff_t pos; ++ struct file *file = lo->lo_backing_file; ++ char *data, *buf; ++ unsigned int size, len; ++ sector_t IV; ++ struct page *pg; ++ ++ pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; ++ buf = page_address(LDE_lo_bio_free0->bi_io_vec[0].bv_page); ++ IV = bio->bi_sector; ++ if (!LDE_lo_iv_remove) ++ IV += LDE_lo_offs_sec; ++ do { ++ pg = bio->bi_io_vec[bio->bi_idx].bv_page; ++ len = bio->bi_io_vec[bio->bi_idx].bv_len; ++ data = kmap(pg) + bio->bi_io_vec[bio->bi_idx].bv_offset; ++ while (len > 0) { ++ if (!lo->lo_encryption) { ++ /* this code relies that NONE transfer is a no-op */ ++ buf = data; ++ } ++ size = PAGE_SIZE; ++ if (size > len) ++ size = len; ++ if (bio_rw(bio) == WRITE) { ++ if (lo_do_transfer(lo, WRITE, buf, data, size, IV)) { ++ printk(KERN_ERR "loop%d: write transfer error, sector %llu\n", lo->lo_number, (unsigned long long)IV); ++ goto kunmap_and_out; ++ } ++ if (loop_file_io(file, buf, size, &pos, 1)) { ++ printk(KERN_ERR "loop%d: write i/o error, sector %llu\n", lo->lo_number, (unsigned long long)IV); ++ goto kunmap_and_out; ++ } ++ } else { ++ if (loop_file_io(file, buf, size, &pos, 0)) { ++ printk(KERN_ERR "loop%d: read i/o error, sector %llu\n", lo->lo_number, (unsigned long long)IV); ++ goto kunmap_and_out; ++ } ++ if (lo_do_transfer(lo, READ, buf, data, size, IV)) { ++ printk(KERN_ERR "loop%d: read transfer error, sector %llu\n", lo->lo_number, (unsigned long long)IV); ++ goto kunmap_and_out; ++ } ++ } ++ data += size; ++ len -= size; ++ IV += size >> 9; ++ } ++ kunmap(pg); ++ } while (++bio->bi_idx < bio->bi_vcnt); ++ return 0; ++ ++kunmap_and_out: ++ kunmap(pg); ++ return -EIO; ++} ++ ++static int loop_make_request_err(request_queue_t *q, struct bio *old_bio) ++{ ++ old_bio->bi_next = NULL; ++ bio_io_error(old_bio, old_bio->bi_size); ++ return 0; + } + +-static int loop_make_request(request_queue_t *q, struct bio *old_bio) ++static int loop_make_request_real(request_queue_t *q, struct bio *old_bio) + { ++ struct bio *new_bio, *merge; + struct loop_device *lo = q->queuedata; +- int rw = bio_rw(old_bio); ++ struct loop_bio_extension *extension; ++ int rw = bio_rw(old_bio), y; ++ char *md; + ++ set_current_state(TASK_RUNNING); + if (!lo) + goto out; +- +- spin_lock_irq(&lo->lo_lock); +- if (lo->lo_state != Lo_bound) +- goto inactive; ++ if ((rw == WRITE) && (lo->lo_flags & LO_FLAGS_READ_ONLY)) ++ goto out; + atomic_inc(&lo->lo_pending); +- spin_unlock_irq(&lo->lo_lock); + ++ /* ++ * file backed, queue for loop_thread to handle ++ */ ++ if (lo->lo_flags & LO_FLAGS_DO_BMAP) { ++ loop_add_queue_last(lo, old_bio, &LDE_lo_bio_que0); ++ return 0; ++ } ++ ++ /* ++ * device backed, just remap bdev & sector for NONE transfer ++ */ ++ if (!lo->lo_encryption) { ++ old_bio->bi_sector += LDE_lo_offs_sec; ++ old_bio->bi_bdev = lo->lo_device; ++ generic_make_request(old_bio); ++ if (atomic_dec_and_test(&lo->lo_pending)) ++ wake_up_interruptible(&LDE_lo_bio_wait); ++ return 0; ++ } ++ ++ /* ++ * device backed, start reads and writes now if buffer available ++ */ ++ merge = NULL; ++ if (bio_barrier(old_bio)) ++ atomic_inc(&LDE_lo_bio_barr); ++ try_next_old_bio_vec: ++ new_bio = loop_get_buffer(lo, old_bio, 0, &merge); ++ if (!new_bio) { ++ /* just queue request and let thread handle allocs later */ ++ if (merge) ++ loop_add_queue_last(lo, merge, &LDE_lo_bio_que1); ++ else ++ loop_add_queue_last(lo, old_bio, &LDE_lo_bio_que2); ++ return 0; ++ } + if (rw == WRITE) { +- if (lo->lo_flags & LO_FLAGS_READ_ONLY) +- goto err; +- } else if (rw == READA) { +- rw = READ; +- } else if (rw != READ) { +- printk(KERN_ERR "loop: unknown command (%x)\n", rw); +- goto err; ++ extension = new_bio->bi_private; ++ y = extension->bioext_index; ++ md = kmap(old_bio->bi_io_vec[y].bv_page) + old_bio->bi_io_vec[y].bv_offset; ++ if (lo_do_transfer(lo, WRITE, page_address(new_bio->bi_io_vec[0].bv_page), md, extension->bioext_size, extension->bioext_iv)) { ++ clear_bit(0, &merge->bi_flags); ++ } ++ kunmap(old_bio->bi_io_vec[y].bv_page); + } +- loop_add_bio(lo, old_bio); ++ ++ /* merge & old_bio may vanish during generic_make_request() */ ++ /* if last vec gets processed before function returns */ ++ y = (merge->bi_idx < old_bio->bi_vcnt) ? 1 : 0; ++ generic_make_request(new_bio); ++ ++ /* other vecs may need processing too */ ++ if (y) ++ goto try_next_old_bio_vec; + return 0; +-err: +- if (atomic_dec_and_test(&lo->lo_pending)) +- up(&lo->lo_bh_mutex); ++ + out: ++ old_bio->bi_next = NULL; + bio_io_error(old_bio, old_bio->bi_size); + return 0; +-inactive: +- spin_unlock_irq(&lo->lo_lock); +- goto out; +-} +- +-static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) +-{ +- int ret; +- +- ret = do_bio_filebacked(lo, bio); +- bio_endio(bio, bio->bi_size, ret); + } + + /* +@@ -446,8 +751,15 @@ static inline void loop_handle_bio(struc + static int loop_thread(void *data) + { + struct loop_device *lo = data; +- struct bio *bio; ++ struct bio *bio, *xbio, *merge; ++ struct loop_bio_extension *extension; ++ int x, y, flushcnt = 0; ++ wait_queue_t waitq; ++ char *md; ++ static const struct rlimit loop_rlim_defaults[RLIM_NLIMITS] = INIT_RLIMITS; + ++ init_waitqueue_entry(&waitq, current); ++ memcpy(¤t->rlim[0], &loop_rlim_defaults[0], sizeof(current->rlim)); + daemonize("loop%d", lo->lo_number); + + /* +@@ -455,11 +767,19 @@ static int loop_thread(void *data) + * hence, it mustn't be stopped at all + * because it could be indirectly used during suspension + */ ++#if defined(PF_NOFREEZE) ++ current->flags |= PF_NOFREEZE; ++#elif defined(PF_IOTHREAD) + current->flags |= PF_IOTHREAD; ++#endif ++ current->flags |= PF_LESS_THROTTLE; + +- set_user_nice(current, -20); ++ if (lo_nice > 0) ++ lo_nice = 0; ++ if (lo_nice < -20) ++ lo_nice = -20; ++ set_user_nice(current, lo_nice); + +- lo->lo_state = Lo_bound; + atomic_inc(&lo->lo_pending); + + /* +@@ -468,23 +788,138 @@ static int loop_thread(void *data) + up(&lo->lo_sem); + + for (;;) { +- down_interruptible(&lo->lo_bh_mutex); ++ add_wait_queue(&LDE_lo_bio_wait, &waitq); ++ for (;;) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (!atomic_read(&lo->lo_pending)) ++ break; ++ ++ x = 0; ++ spin_lock_irq(&lo->lo_lock); ++ if (LDE_lo_bio_que0) { ++ /* don't sleep if device backed READ needs processing */ ++ /* don't sleep if file backed READ/WRITE needs processing */ ++ x = 1; ++ } else if (LDE_lo_bio_que1) { ++ /* don't sleep if a buffer-bio is available */ ++ /* don't sleep if need-buffer-bio request is not set */ ++ if (LDE_lo_bio_free0 || !(LDE_lo_bio_need & 1)) ++ x = 1; ++ } else if (LDE_lo_bio_que2) { ++ /* don't sleep if a merge-bio is available */ ++ /* don't sleep if need-merge-bio request is not set */ ++ if (LDE_lo_bio_free1 || !(LDE_lo_bio_need & 2)) ++ x = 1; ++ } ++ spin_unlock_irq(&lo->lo_lock); ++ if (x) ++ break; ++ ++ schedule(); ++ } ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&LDE_lo_bio_wait, &waitq); ++ + /* +- * could be upped because of tear-down, not because of ++ * could be woken because of tear-down, not because of + * pending work + */ + if (!atomic_read(&lo->lo_pending)) + break; + +- bio = loop_get_bio(lo); +- if (!bio) { +- printk("loop: missing bio\n"); ++ bio = loop_get_bio(lo, &x); ++ if (!bio) + continue; ++ ++ /* ++ * x list tag usage(has-buffer,has-merge) ++ * --- --------------- --------------------------- ++ * 0 LDE_lo_bio_que0 dev-r(y,y) / file-rw ++ * 1 LDE_lo_bio_que1 dev-rw(n,y) ++ * 2 LDE_lo_bio_que2 dev-rw(n,n) ++ */ ++ if (x >= 1) { ++ /* loop_make_request_real didn't allocate a buffer, do that now */ ++ if (x == 1) { ++ merge = bio; ++ bio = merge->bi_private; ++ } else { ++ merge = NULL; ++ } ++ try_next_bio_vec: ++ xbio = loop_get_buffer(lo, bio, 1, &merge); ++ if (!xbio) { ++ blk_run_queues(); ++ flushcnt = 0; ++ if (merge) ++ loop_add_queue_first(lo, merge, &LDE_lo_bio_que1); ++ else ++ loop_add_queue_first(lo, bio, &LDE_lo_bio_que2); ++ /* LDE_lo_bio_need should be non-zero now, go back to sleep */ ++ continue; ++ } ++ if (bio_rw(bio) == WRITE) { ++ extension = xbio->bi_private; ++ y = extension->bioext_index; ++ md = kmap(bio->bi_io_vec[y].bv_page) + bio->bi_io_vec[y].bv_offset; ++ if (lo_do_transfer(lo, WRITE, page_address(xbio->bi_io_vec[0].bv_page), md, extension->bioext_size, extension->bioext_iv)) { ++ clear_bit(0, &merge->bi_flags); ++ } ++ kunmap(bio->bi_io_vec[y].bv_page); ++ } ++ ++ /* merge & bio may vanish during generic_make_request() */ ++ /* if last vec gets processed before function returns */ ++ y = (merge->bi_idx < bio->bi_vcnt) ? 1 : 0; ++ generic_make_request(xbio); ++ ++ /* start I/O if there are no more requests lacking buffers */ ++ x = 0; ++ spin_lock_irq(&lo->lo_lock); ++ if (!y && !LDE_lo_bio_que1 && !LDE_lo_bio_que2) ++ x = 1; ++ spin_unlock_irq(&lo->lo_lock); ++ if (x || (++flushcnt >= LDE_lo_bio_flsh)) { ++ blk_run_queues(); ++ flushcnt = 0; ++ } ++ ++ /* other vecs may need processing too */ ++ if (y) ++ goto try_next_bio_vec; ++ ++ /* request not completely processed yet */ ++ continue; ++ } ++ ++ if (lo->lo_flags & LO_FLAGS_DO_BMAP) { ++ /* request is for file backed device */ ++ y = do_bio_filebacked(lo, bio); ++ bio->bi_next = NULL; ++ bio_endio(bio, bio->bi_size, y); ++ } else { ++ /* device backed read has completed, do decrypt now */ ++ extension = bio->bi_private; ++ merge = extension->bioext_merge; ++ y = extension->bioext_index; ++ xbio = merge->bi_private; ++ md = kmap(xbio->bi_io_vec[y].bv_page) + xbio->bi_io_vec[y].bv_offset; ++ if (lo_do_transfer(lo, READ, page_address(bio->bi_io_vec[0].bv_page), md, extension->bioext_size, extension->bioext_iv)) { ++ clear_bit(0, &merge->bi_flags); ++ } ++ kunmap(xbio->bi_io_vec[y].bv_page); ++ loop_put_buffer(lo, bio, 0); ++ if (!atomic_dec_and_test(&merge->bi_cnt)) ++ continue; ++ if (bio_barrier(xbio)) ++ atomic_dec(&LDE_lo_bio_barr); ++ xbio->bi_next = NULL; ++ bio_endio(xbio, xbio->bi_size, test_bit(0, &merge->bi_flags) ? 0 : -EIO); ++ loop_put_buffer(lo, merge, 1); + } +- loop_handle_bio(lo, bio); + + /* +- * upped both for pending work and tear-down, lo_pending ++ * woken both for pending work and tear-down, lo_pending + * will hit zero then + */ + if (atomic_dec_and_test(&lo->lo_pending)) +@@ -495,125 +930,200 @@ static int loop_thread(void *data) + return 0; + } + ++static void loop_set_softblksz(struct loop_device *lo, struct block_device *bdev) ++{ ++ int bs, x; ++ ++ if (lo->lo_device) ++ bs = block_size(lo->lo_device); ++ else ++ bs = PAGE_SIZE; ++ if (lo->lo_flags & LO_FLAGS_DO_BMAP) { ++ x = (int) bdev->bd_inode->i_size; ++ if ((bs == 8192) && (x & 0x1E00)) ++ bs = 4096; ++ if ((bs == 4096) && (x & 0x0E00)) ++ bs = 2048; ++ if ((bs == 2048) && (x & 0x0600)) ++ bs = 1024; ++ if ((bs == 1024) && (x & 0x0200)) ++ bs = 512; ++ } ++ set_blocksize(bdev, bs); ++} ++ + static int loop_set_fd(struct loop_device *lo, struct file *lo_file, + struct block_device *bdev, unsigned int arg) + { + struct file *file; + struct inode *inode; + struct block_device *lo_device = NULL; +- struct address_space *mapping; +- unsigned lo_blocksize; + int lo_flags = 0; + int error; + +- /* This is safe, since we have a reference from open(). */ +- __module_get(THIS_MODULE); +- +- error = -EBUSY; +- if (lo->lo_state != Lo_unbound) +- goto out; +- + error = -EBADF; + file = fget(arg); + if (!file) + goto out; + +- mapping = file->f_mapping; +- inode = mapping->host; ++ error = -EINVAL; ++ inode = file->f_dentry->d_inode; + + if (!(file->f_mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + +- error = -EINVAL; +- if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { +- struct address_space_operations *aops = mapping->a_ops; ++ init_MUTEX_LOCKED(&lo->lo_sem); ++ spin_lock_init(&lo->lo_lock); ++ init_waitqueue_head(&LDE_lo_bio_wait); ++ atomic_set(&lo->lo_pending, 0); ++ atomic_set(&LDE_lo_bio_barr, 0); ++ lo->lo_offset = lo->lo_sizelimit = 0; ++ LDE_lo_offs_sec = LDE_lo_iv_remove = 0; ++ lo->lo_encryption = NULL; ++ lo->lo_encrypt_key_size = 0; ++ lo->transfer = NULL; ++ lo->lo_crypt_name[0] = 0; ++ lo->lo_file_name[0] = 0; ++ lo->lo_init[1] = lo->lo_init[0] = 0; ++ lo->lo_key_owner = 0; ++ lo->ioctl = NULL; ++ lo->key_data = NULL; ++ LDE_lo_bio_que2 = LDE_lo_bio_que1 = LDE_lo_bio_que0 = NULL; ++ LDE_lo_bio_free1 = LDE_lo_bio_free0 = NULL; ++ LDE_lo_bio_flsh = LDE_lo_bio_need = 0; ++ ++ if (S_ISBLK(inode->i_mode)) { ++ lo_device = inode->i_bdev; ++ if (lo_device == bdev) { ++ error = -EBUSY; ++ goto out_putf; ++ } ++ if (loop_prealloc_init(lo, 0)) { ++ error = -ENOMEM; ++ goto out_putf; ++ } ++ if (bdev_read_only(lo_device)) ++ lo_flags |= LO_FLAGS_READ_ONLY; ++ else ++ filemap_fdatawrite(inode->i_mapping); ++ } else if (S_ISREG(inode->i_mode)) { + /* + * If we can't read - sorry. If we only can't write - well, + * it's going to be read-only. + */ +- if (!lo_file->f_op->sendfile) ++ if (!file->f_op || !file->f_op->read) + goto out_putf; + +- if (!aops->prepare_write || !aops->commit_write) ++ if (!file->f_op->write) + lo_flags |= LO_FLAGS_READ_ONLY; + +- lo_blocksize = inode->i_blksize; +- error = 0; +- } else { ++ lo_flags |= LO_FLAGS_DO_BMAP; ++ if (loop_prealloc_init(lo, 1)) { ++ error = -ENOMEM; ++ goto out_putf; ++ } ++ } else + goto out_putf; +- } ++ ++ get_file(file); + + if (!(lo_file->f_mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + + set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); + +- lo->lo_blocksize = lo_blocksize; + lo->lo_device = lo_device; + lo->lo_flags = lo_flags; ++ if(lo_flags & LO_FLAGS_READ_ONLY) ++ lo->lo_flags |= 0x200000; /* export to user space */ + lo->lo_backing_file = file; +- lo->transfer = NULL; +- lo->ioctl = NULL; +- lo->lo_sizelimit = 0; +- if (figure_loop_size(lo)) { ++ if (figure_loop_size(lo, bdev)) { + error = -EFBIG; +- goto out_putf; ++ goto out_cleanup; + } +- lo->old_gfp_mask = mapping_gfp_mask(mapping); +- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); +- +- lo->lo_bio = lo->lo_biotail = NULL; + + /* + * set queue make_request_fn, and add limits based on lower level + * device + */ +- blk_queue_make_request(lo->lo_queue, loop_make_request); +- lo->lo_queue->queuedata = lo; ++ blk_queue_make_request(lo->lo_queue, loop_make_request_err); ++ blk_queue_bounce_limit(lo->lo_queue, BLK_BOUNCE_ANY); ++ blk_queue_max_segment_size(lo->lo_queue, MAX_SEGMENT_SIZE); + +- set_blocksize(bdev, lo_blocksize); ++ /* ++ * we remap to a block device, make sure we correctly stack limits ++ */ ++ if (S_ISBLK(inode->i_mode) && lo_device) { ++ request_queue_t *q = bdev_get_queue(lo_device); + +- kernel_thread(loop_thread, lo, CLONE_KERNEL); ++ blk_queue_max_sectors(lo->lo_queue, q->max_sectors); ++ blk_queue_max_phys_segments(lo->lo_queue,q->max_phys_segments); ++ blk_queue_max_hw_segments(lo->lo_queue, q->max_hw_segments); ++ blk_queue_max_segment_size(lo->lo_queue, q->max_segment_size); ++ blk_queue_segment_boundary(lo->lo_queue, q->seg_boundary_mask); ++ blk_queue_merge_bvec(lo->lo_queue, q->merge_bvec_fn); ++ blk_queue_hardsect_size(lo->lo_queue, q->hardsect_size); ++ } ++ ++ if (lo_flags & LO_FLAGS_DO_BMAP) { ++ lo->old_gfp_mask = mapping_gfp_mask(inode->i_mapping); ++ mapping_set_gfp_mask(inode->i_mapping, (lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)) | __GFP_HIGH); ++ } else { ++ lo->old_gfp_mask = -1; ++ } ++ ++ loop_set_softblksz(lo, bdev); ++ ++ error = kernel_thread(loop_thread, lo, CLONE_KERNEL); ++ if(error < 0) ++ goto out_mapping; + down(&lo->lo_sem); ++ fput(file); ++ lo->lo_queue->queuedata = lo; ++ __module_get(THIS_MODULE); + return 0; + ++ out_mapping: ++ if(lo->old_gfp_mask != -1) ++ mapping_set_gfp_mask(inode->i_mapping, lo->old_gfp_mask); ++ out_cleanup: ++ loop_prealloc_cleanup(lo); ++ fput(file); + out_putf: + fput(file); + out: +- /* This is safe: open() is still holding a reference. */ +- module_put(THIS_MODULE); + return error; + } + +-static int +-loop_release_xfer(struct loop_device *lo) ++static int loop_release_xfer(struct loop_device *lo) + { + int err = 0; +- struct loop_func_table *xfer = lo->lo_encryption; ++ struct loop_func_table *xfer = lo->lo_encryption; + + if (xfer) { ++ lo->transfer = NULL; + if (xfer->release) +- err = xfer->release(lo); +- lo->transfer = NULL; ++ err = xfer->release(lo); + lo->lo_encryption = NULL; + module_put(xfer->owner); + } + return err; + } + +-static int +-loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, +- const struct loop_info64 *i) ++static int loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, struct loop_info64 *i) + { +- int err = 0; ++ int err = 0; + + if (xfer) { + struct module *owner = xfer->owner; + +- if (!try_module_get(owner)) ++ if(!try_module_get(owner)) + return -EINVAL; +- if (xfer->init) +- err = xfer->init(lo, i); ++ if (xfer->init) { ++ /* this ugly cast is needed to work around 'const' damage in function prototype */ ++ /* should be: err = xfer->init(lo, i); */ ++ err = ((int (*)(struct loop_device *, struct loop_info64 *))xfer->init)(lo, i); ++ } + if (err) + module_put(owner); + else +@@ -627,58 +1137,51 @@ static int loop_clr_fd(struct loop_devic + struct file *filp = lo->lo_backing_file; + int gfp = lo->old_gfp_mask; + +- if (lo->lo_state != Lo_bound) +- return -ENXIO; +- +- if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ ++ if (bdev->bd_openers != 1) /* one for this fd being open */ + return -EBUSY; +- +- if (filp == NULL) ++ if (filp==NULL) + return -EINVAL; + +- spin_lock_irq(&lo->lo_lock); +- lo->lo_state = Lo_rundown; ++ lo->lo_queue->queuedata = NULL; ++ lo->lo_queue->make_request_fn = loop_make_request_err; + if (atomic_dec_and_test(&lo->lo_pending)) +- up(&lo->lo_bh_mutex); +- spin_unlock_irq(&lo->lo_lock); +- ++ wake_up_interruptible(&LDE_lo_bio_wait); + down(&lo->lo_sem); + ++ loop_prealloc_cleanup(lo); + lo->lo_backing_file = NULL; +- + loop_release_xfer(lo); + lo->transfer = NULL; + lo->ioctl = NULL; + lo->lo_device = NULL; + lo->lo_encryption = NULL; +- lo->lo_offset = 0; +- lo->lo_sizelimit = 0; ++ lo->lo_offset = lo->lo_sizelimit = 0; ++ LDE_lo_offs_sec = LDE_lo_iv_remove = 0; + lo->lo_encrypt_key_size = 0; + lo->lo_flags = 0; ++ lo->lo_init[1] = lo->lo_init[0] = 0; ++ lo->lo_key_owner = 0; ++ lo->key_data = NULL; + memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); + memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); + memset(lo->lo_file_name, 0, LO_NAME_SIZE); + invalidate_bdev(bdev, 0); + set_capacity(disks[lo->lo_number], 0); +- mapping_set_gfp_mask(filp->f_mapping, gfp); +- lo->lo_state = Lo_unbound; ++ if (gfp != -1) ++ mapping_set_gfp_mask(filp->f_dentry->d_inode->i_mapping, gfp); + fput(filp); +- /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return 0; + } + +-static int +-loop_set_status(struct loop_device *lo, const struct loop_info64 *info) ++static int loop_set_status(struct loop_device *lo, struct block_device *bdev, struct loop_info64 *info) + { + int err; +- struct loop_func_table *xfer; ++ struct loop_func_table *xfer = NULL; + + if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid && + !capable(CAP_SYS_ADMIN)) + return -EPERM; +- if (lo->lo_state != Lo_bound) +- return -ENXIO; + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; + +@@ -686,6 +1189,22 @@ loop_set_status(struct loop_device *lo, + if (err) + return err; + ++ if ((loff_t)info->lo_offset < 0) { ++ /* negative offset == remove offset from IV computations */ ++ lo->lo_offset = -(info->lo_offset); ++ LDE_lo_iv_remove = lo->lo_offset >> 9; ++ } else { ++ /* positive offset == include offset in IV computations */ ++ lo->lo_offset = info->lo_offset; ++ LDE_lo_iv_remove = 0; ++ } ++ LDE_lo_offs_sec = lo->lo_offset >> 9; ++ lo->lo_sizelimit = info->lo_sizelimit; ++ err = figure_loop_size(lo, bdev); ++ if (err) ++ return err; ++ loop_set_softblksz(lo, bdev); ++ + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; + +@@ -694,31 +1213,20 @@ loop_set_status(struct loop_device *lo, + xfer = xfer_funcs[type]; + if (xfer == NULL) + return -EINVAL; +- } else +- xfer = NULL; +- ++ } + err = loop_init_xfer(lo, xfer, info); + if (err) + return err; + +- if (lo->lo_offset != info->lo_offset || +- lo->lo_sizelimit != info->lo_sizelimit) { +- lo->lo_offset = info->lo_offset; +- lo->lo_sizelimit = info->lo_sizelimit; +- if (figure_loop_size(lo)) +- return -EFBIG; +- } +- +- memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); +- memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); +- lo->lo_file_name[LO_NAME_SIZE-1] = 0; +- lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; +- + if (!xfer) + xfer = &none_funcs; + lo->transfer = xfer->transfer; + lo->ioctl = xfer->ioctl; +- ++ ++ memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); ++ memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); ++ lo->lo_file_name[LO_NAME_SIZE-1] = 0; ++ lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; +@@ -728,18 +1236,16 @@ loop_set_status(struct loop_device *lo, + lo->lo_key_owner = current->uid; + } + ++ lo->lo_queue->make_request_fn = loop_make_request_real; + return 0; + } + +-static int +-loop_get_status(struct loop_device *lo, struct loop_info64 *info) ++static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) + { + struct file *file = lo->lo_backing_file; + struct kstat stat; + int error; + +- if (lo->lo_state != Lo_bound) +- return -ENXIO; + error = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat); + if (error) + return error; +@@ -748,17 +1254,18 @@ loop_get_status(struct loop_device *lo, + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); +- info->lo_offset = lo->lo_offset; ++ info->lo_offset = LDE_lo_iv_remove ? -(lo->lo_offset) : lo->lo_offset; + info->lo_sizelimit = lo->lo_sizelimit; + info->lo_flags = lo->lo_flags; + memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); + memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); +- info->lo_encrypt_type = +- lo->lo_encryption ? lo->lo_encryption->number : 0; ++ info->lo_encrypt_type = lo->lo_encryption ? lo->lo_encryption->number : 0; + if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { + info->lo_encrypt_key_size = lo->lo_encrypt_key_size; + memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, + lo->lo_encrypt_key_size); ++ info->lo_init[0] = lo->lo_init[0]; ++ info->lo_init[1] = lo->lo_init[1]; + } + return 0; + } +@@ -772,7 +1279,6 @@ loop_info64_from_old(const struct loop_i + info64->lo_inode = info->lo_inode; + info64->lo_rdevice = info->lo_rdevice; + info64->lo_offset = info->lo_offset; +- info64->lo_sizelimit = 0; + info64->lo_encrypt_type = info->lo_encrypt_type; + info64->lo_encrypt_key_size = info->lo_encrypt_key_size; + info64->lo_flags = info->lo_flags; +@@ -786,7 +1292,7 @@ loop_info64_from_old(const struct loop_i + } + + static int +-loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) ++loop_info64_to_old(struct loop_info64 *info64, struct loop_info *info) + { + memset(info, 0, sizeof(*info)); + info->lo_number = info64->lo_number; +@@ -809,14 +1315,15 @@ loop_info64_to_old(const struct loop_inf + if (info->lo_device != info64->lo_device || + info->lo_rdevice != info64->lo_rdevice || + info->lo_inode != info64->lo_inode || +- info->lo_offset != info64->lo_offset) ++ info->lo_offset != info64->lo_offset || ++ info64->lo_sizelimit) + return -EOVERFLOW; + + return 0; + } + + static int +-loop_set_status_old(struct loop_device *lo, const struct loop_info *arg) ++loop_set_status_old(struct loop_device *lo, struct block_device *bdev, const struct loop_info *arg) + { + struct loop_info info; + struct loop_info64 info64; +@@ -824,17 +1331,18 @@ loop_set_status_old(struct loop_device * + if (copy_from_user(&info, arg, sizeof (struct loop_info))) + return -EFAULT; + loop_info64_from_old(&info, &info64); +- return loop_set_status(lo, &info64); ++ memset(&info.lo_encrypt_key[0], 0, sizeof(info.lo_encrypt_key)); ++ return loop_set_status(lo, bdev, &info64); + } + + static int +-loop_set_status64(struct loop_device *lo, const struct loop_info64 *arg) ++loop_set_status64(struct loop_device *lo, struct block_device *bdev, struct loop_info64 *arg) + { + struct loop_info64 info64; + + if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) + return -EFAULT; +- return loop_set_status(lo, &info64); ++ return loop_set_status(lo, bdev, &info64); + } + + static int +@@ -870,28 +1378,50 @@ loop_get_status64(struct loop_device *lo + return err; + } + +-static int lo_ioctl(struct inode * inode, struct file * file, +- unsigned int cmd, unsigned long arg) ++#if !defined(NEW_BLOCK_DRIVER_INTERFACE) ++static int lo_ioctl(struct inode *inode, struct file * file, unsigned int cmd, unsigned long arg) + { +- struct loop_device *lo = inode->i_bdev->bd_disk->private_data; ++ struct block_device *bdev = inode->i_bdev; ++#else ++static int lo_ioctl(struct block_device *bdev, struct file * file, unsigned int cmd, unsigned long arg) ++{ ++#endif ++ struct loop_device *lo = bdev->bd_disk->private_data; + int err; + +- down(&lo->lo_ctl_mutex); ++ down(&bdev->bd_sem); ++ ++ /* ++ * LOOP_SET_FD can only be called when no device is attached. ++ * All other ioctls can only be called when a device is attached. ++ */ ++ if (bdev->bd_disk->queue->queuedata != NULL) { ++ if (cmd == LOOP_SET_FD) { ++ err = -EBUSY; ++ goto out_err; ++ } ++ } else { ++ if (cmd != LOOP_SET_FD) { ++ err = -ENXIO; ++ goto out_err; ++ } ++ } ++ + switch (cmd) { + case LOOP_SET_FD: +- err = loop_set_fd(lo, file, inode->i_bdev, arg); ++ err = loop_set_fd(lo, file, bdev, arg); + break; + case LOOP_CLR_FD: +- err = loop_clr_fd(lo, inode->i_bdev); ++ err = loop_clr_fd(lo, bdev); + break; + case LOOP_SET_STATUS: +- err = loop_set_status_old(lo, (struct loop_info *) arg); ++ err = loop_set_status_old(lo, bdev, (struct loop_info *) arg); + break; + case LOOP_GET_STATUS: + err = loop_get_status_old(lo, (struct loop_info *) arg); + break; + case LOOP_SET_STATUS64: +- err = loop_set_status64(lo, (struct loop_info64 *) arg); ++ err = loop_set_status64(lo, bdev, (struct loop_info64 *) arg); + break; + case LOOP_GET_STATUS64: + err = loop_get_status64(lo, (struct loop_info64 *) arg); +@@ -899,29 +1429,28 @@ static int lo_ioctl(struct inode * inode + default: + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + } +- up(&lo->lo_ctl_mutex); ++out_err: ++ up(&bdev->bd_sem); + return err; + } + ++#if !defined(NEW_BLOCK_DRIVER_INTERFACE) + static int lo_open(struct inode *inode, struct file *file) ++#else ++static int lo_open(struct block_device *bdev, struct file *file) ++#endif + { +- struct loop_device *lo = inode->i_bdev->bd_disk->private_data; +- +- down(&lo->lo_ctl_mutex); +- lo->lo_refcnt++; +- up(&lo->lo_ctl_mutex); +- + return 0; + } + ++#if !defined(NEW_BLOCK_DRIVER_INTERFACE) + static int lo_release(struct inode *inode, struct file *file) + { +- struct loop_device *lo = inode->i_bdev->bd_disk->private_data; +- +- down(&lo->lo_ctl_mutex); +- --lo->lo_refcnt; +- up(&lo->lo_ctl_mutex); +- ++ sync_blockdev(inode->i_bdev); ++#else ++static int lo_release(struct gendisk *disk) ++{ ++#endif + return 0; + } + +@@ -938,7 +1467,12 @@ static struct block_device_operations lo + MODULE_PARM(max_loop, "i"); + MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)"); + MODULE_LICENSE("GPL"); ++ ++#if !defined(OLD_REQUEST_MODULE_INTERFACE) + MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); ++#else ++MODULE_ALIAS("block-major-7"); ++#endif + + int loop_register_transfer(struct loop_func_table *funcs) + { +@@ -953,34 +1487,31 @@ int loop_register_transfer(struct loop_f + int loop_unregister_transfer(int number) + { + unsigned int n = number; +- struct loop_device *lo; ++ struct loop_device *lo; + struct loop_func_table *xfer; ++ int x; + + if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) +- return -EINVAL; +- ++ return -EINVAL; + xfer_funcs[n] = NULL; +- +- for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) { +- down(&lo->lo_ctl_mutex); +- ++ for (x = 0; x < max_loop; x++) { ++ lo = loop_dev_ptr_arr[x]; ++ if (!lo) ++ continue; + if (lo->lo_encryption == xfer) + loop_release_xfer(lo); +- +- up(&lo->lo_ctl_mutex); + } +- +- return 0; ++ return 0; + } + + EXPORT_SYMBOL(loop_register_transfer); + EXPORT_SYMBOL(loop_unregister_transfer); + +-int __init loop_init(void) ++int __init loop_init(void) + { + int i; + +- if (max_loop < 1 || max_loop > 256) { ++ if ((max_loop < 1) || (max_loop > 256)) { + printk(KERN_WARNING "loop: invalid max_loop (must be between" + " 1 and 256), using default (8)\n"); + max_loop = 8; +@@ -989,62 +1520,78 @@ int __init loop_init(void) + if (register_blkdev(LOOP_MAJOR, "loop")) + return -EIO; + +- loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL); +- if (!loop_dev) ++ loop_dev_ptr_arr = kmalloc(max_loop * sizeof(struct loop_device *), GFP_KERNEL); ++ if (!loop_dev_ptr_arr) + goto out_mem1; +- memset(loop_dev, 0, max_loop * sizeof(struct loop_device)); + + disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL); + if (!disks) + goto out_mem2; + + for (i = 0; i < max_loop; i++) { ++ loop_dev_ptr_arr[i] = kmalloc(sizeof(LoDevExt), GFP_KERNEL); ++ if (!loop_dev_ptr_arr[i]) ++ goto out_mem3; ++ } ++ ++ for (i = 0; i < max_loop; i++) { + disks[i] = alloc_disk(1); + if (!disks[i]) +- goto out_mem3; ++ goto out_mem4; ++ } ++ ++ for (i = 0; i < max_loop; i++) { ++ disks[i]->queue = blk_alloc_queue(GFP_KERNEL); ++ if (!disks[i]->queue) ++ goto out_mem5; ++ disks[i]->queue->queuedata = NULL; ++ blk_queue_make_request(disks[i]->queue, loop_make_request_err); ++ } ++ ++ { extern int init_module_aes(void); init_module_aes(); } ++ for (i = 0; i < (sizeof(lo_prealloc) / sizeof(int)); i += 2) { ++ if (!lo_prealloc[i]) ++ continue; ++ if (lo_prealloc[i] < LO_PREALLOC_MIN) ++ lo_prealloc[i] = LO_PREALLOC_MIN; ++ if (lo_prealloc[i] > LO_PREALLOC_MAX) ++ lo_prealloc[i] = LO_PREALLOC_MAX; + } + + devfs_mk_dir("loop"); + + for (i = 0; i < max_loop; i++) { +- struct loop_device *lo = &loop_dev[i]; ++ struct loop_device *lo = loop_dev_ptr_arr[i]; + struct gendisk *disk = disks[i]; +- +- memset(lo, 0, sizeof(*lo)); +- lo->lo_queue = blk_alloc_queue(GFP_KERNEL); +- if (!lo->lo_queue) +- goto out_mem4; +- init_MUTEX(&lo->lo_ctl_mutex); +- init_MUTEX_LOCKED(&lo->lo_sem); +- init_MUTEX_LOCKED(&lo->lo_bh_mutex); ++ memset(lo, 0, sizeof(LoDevExt)); + lo->lo_number = i; +- spin_lock_init(&lo->lo_lock); ++ lo->lo_queue = disk->queue; + disk->major = LOOP_MAJOR; + disk->first_minor = i; + disk->fops = &lo_fops; + sprintf(disk->disk_name, "loop%d", i); + sprintf(disk->devfs_name, "loop/%d", i); + disk->private_data = lo; +- disk->queue = lo->lo_queue; ++ add_disk(disk); + } + +- /* We cannot fail after we call this, so another loop!*/ +- for (i = 0; i < max_loop; i++) +- add_disk(disks[i]); + printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop); + return 0; + ++out_mem5: ++ while (i--) ++ blk_put_queue(disks[i]->queue); ++ i = max_loop; + out_mem4: + while (i--) +- blk_put_queue(loop_dev[i].lo_queue); +- devfs_remove("loop"); ++ put_disk(disks[i]); + i = max_loop; + out_mem3: + while (i--) +- put_disk(disks[i]); ++ kfree(loop_dev_ptr_arr[i]); + kfree(disks); + out_mem2: +- kfree(loop_dev); ++ kfree(loop_dev_ptr_arr); + out_mem1: + unregister_blkdev(LOOP_MAJOR, "loop"); + printk(KERN_ERR "loop: ran out of memory\n"); +@@ -1055,17 +1602,17 @@ void loop_exit(void) + { + int i; + ++ { extern void cleanup_module_aes(void); cleanup_module_aes(); } + for (i = 0; i < max_loop; i++) { + del_gendisk(disks[i]); +- blk_put_queue(loop_dev[i].lo_queue); + put_disk(disks[i]); ++ blk_put_queue(loop_dev_ptr_arr[i]->lo_queue); ++ kfree(loop_dev_ptr_arr[i]); + } + devfs_remove("loop"); +- if (unregister_blkdev(LOOP_MAJOR, "loop")) +- printk(KERN_WARNING "loop: cannot unregister blkdev\n"); +- ++ unregister_blkdev(LOOP_MAJOR, "loop"); + kfree(disks); +- kfree(loop_dev); ++ kfree(loop_dev_ptr_arr); + } + + module_init(loop_init); +@@ -1080,3 +1627,10 @@ static int __init max_loop_setup(char *s + + __setup("max_loop=", max_loop_setup); + #endif ++ ++extern void loop_compute_sector_iv(sector_t, u_int32_t *); ++EXPORT_SYMBOL(loop_compute_sector_iv); ++extern void loop_compute_md5_iv(sector_t, u_int32_t *, u_int32_t *); ++EXPORT_SYMBOL(loop_compute_md5_iv); ++extern void md5_transform_CPUbyteorder(u_int32_t *, u_int32_t const *); ++EXPORT_SYMBOL_NOVERS(md5_transform_CPUbyteorder); diff --git a/package/blindcoder/loop-aes/loop-aes.conf b/package/blindcoder/loop-aes/loop-aes.conf new file mode 100644 index 000000000..475f24fa6 --- /dev/null +++ b/package/blindcoder/loop-aes/loop-aes.conf @@ -0,0 +1,10 @@ + +loop_aes_main () { + cd ${root}/usr/src + rm -rf loop-AES-v${ver} + tar --use-compress-program=bzip2 -xf ${archdir}/loop-AES-v${ver}.tar.bz2 +} + +createdocs=0 +autoextract=0 +custmain="loop_aes_main" diff --git a/package/blindcoder/loop-aes/loop-aes.desc b/package/blindcoder/loop-aes/loop-aes.desc new file mode 100644 index 000000000..680c11abc --- /dev/null +++ b/package/blindcoder/loop-aes/loop-aes.desc @@ -0,0 +1,42 @@ + +[COPY] --- ROCK-COPYRIGHT-NOTE-BEGIN --- +[COPY] +[COPY] This copyright note is auto-generated by ./scripts/Create-CopyPatch. +[COPY] Please add additional copyright information _after_ the line containing +[COPY] the ROCK-COPYRIGHT-NOTE-END tag. Otherwise it might get removed by +[COPY] the ./scripts/Create-CopyPatch script. Do not edit this copyright text! +[COPY] +[COPY] ROCK Linux: rock-src/package/blindcoder/ciphers/ciphers.desc +[COPY] ROCK Linux is Copyright (C) 1998 - 2003 Clifford Wolf +[COPY] +[COPY] This program is free software; you can redistribute it and/or modify +[COPY] it under the terms of the GNU General Public License as published by +[COPY] the Free Software Foundation; either version 2 of the License, or +[COPY] (at your option) any later version. A copy of the GNU General Public +[COPY] License can be found at Documentation/COPYING. +[COPY] +[COPY] Many people helped and are helping developing ROCK Linux. Please +[COPY] have a look at http://www.rocklinux.org/ and the Documentation/TEAM +[COPY] file for details. +[COPY] +[COPY] --- ROCK-COPYRIGHT-NOTE-END --- + +[I] Source for loop-aes + +[T] This is the source for the patched loop.{ko,o} kernel module +[T] which supports AES encryption. + +[U] http://loop-aes.sourceforge.net/ + +[A] Jari Ruusu +[M] Benjamin Schieder + +[C] extra/crypto + +[L] GPL +[S] Stable +[V] 2.0f +[P] O -1---5---9 104.000 + +[D] 2422820419 loop-AES-v2.0f.tar.bz2 http://loop-aes.sourceforge.net/loop-AES/ + diff --git a/package/blindcoder/loop-aes/pkg_linux24-src_post.conf b/package/blindcoder/loop-aes/pkg_linux24-src_post.conf new file mode 100644 index 000000000..708124283 --- /dev/null +++ b/package/blindcoder/loop-aes/pkg_linux24-src_post.conf @@ -0,0 +1 @@ +var_append patchfiles " " "${base}/package/blindcoder/loop-aes/linux24_cryptoloop.diff" diff --git a/package/blindcoder/loop-aes/pkg_linux24_post.conf b/package/blindcoder/loop-aes/pkg_linux24_post.conf new file mode 100644 index 000000000..708124283 --- /dev/null +++ b/package/blindcoder/loop-aes/pkg_linux24_post.conf @@ -0,0 +1 @@ +var_append patchfiles " " "${base}/package/blindcoder/loop-aes/linux24_cryptoloop.diff" diff --git a/package/blindcoder/loop-aes/pkg_linux26-src_post.conf b/package/blindcoder/loop-aes/pkg_linux26-src_post.conf new file mode 100644 index 000000000..bd52ea5d2 --- /dev/null +++ b/package/blindcoder/loop-aes/pkg_linux26-src_post.conf @@ -0,0 +1 @@ +var_append patchfiles " " "${base}/package/blindcoder/loop-aes/linux26_cryptoloop.patch" diff --git a/package/blindcoder/loop-aes/pkg_linux26_post.conf b/package/blindcoder/loop-aes/pkg_linux26_post.conf new file mode 100644 index 000000000..bd52ea5d2 --- /dev/null +++ b/package/blindcoder/loop-aes/pkg_linux26_post.conf @@ -0,0 +1 @@ +var_append patchfiles " " "${base}/package/blindcoder/loop-aes/linux26_cryptoloop.patch" diff --git a/package/blindcoder/loop-aes/pkg_util-linux_post.conf b/package/blindcoder/loop-aes/pkg_util-linux_post.conf new file mode 100644 index 000000000..6ee1d33e9 --- /dev/null +++ b/package/blindcoder/loop-aes/pkg_util-linux_post.conf @@ -0,0 +1,2 @@ +var_remove patchfiles " " "${confdir}/cryptoloop-support.diff" +var_append patchfiles " " "${root}/usr/src/loop-AES-v2.0f/util-linux-2.12.diff"