summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Yao <ryao@gentoo.org>2012-06-25 21:03:27 +0000
committerRichard Yao <ryao@gentoo.org>2012-06-25 21:03:27 +0000
commitbe3ad2890ae33fe58030d904e88068bd4294ad77 (patch)
tree72256bd3c3153dee9a4e2c0f73f2a419758e6370 /sys-fs/zfs
parentwhitespace (diff)
downloadgentoo-2-be3ad2890ae33fe58030d904e88068bd4294ad77.tar.gz
gentoo-2-be3ad2890ae33fe58030d904e88068bd4294ad77.tar.bz2
gentoo-2-be3ad2890ae33fe58030d904e88068bd4294ad77.zip
Fix additional deadlock fix regression
(Portage version: 2.1.10.49/cvs/Linux x86_64)
Diffstat (limited to 'sys-fs/zfs')
-rw-r--r--sys-fs/zfs/ChangeLog9
-rw-r--r--sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch161
-rw-r--r--sys-fs/zfs/zfs-0.6.0_rc9-r3.ebuild (renamed from sys-fs/zfs/zfs-0.6.0_rc9-r2.ebuild)2
3 files changed, 84 insertions, 88 deletions
diff --git a/sys-fs/zfs/ChangeLog b/sys-fs/zfs/ChangeLog
index d693068c29cf..e61c829dff2d 100644
--- a/sys-fs/zfs/ChangeLog
+++ b/sys-fs/zfs/ChangeLog
@@ -1,6 +1,13 @@
# ChangeLog for sys-fs/zfs
# Copyright 1999-2012 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/ChangeLog,v 1.30 2012/06/25 20:21:55 ryao Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/ChangeLog,v 1.31 2012/06/25 21:03:27 ryao Exp $
+
+*zfs-0.6.0_rc9-r3 (25 Jun 2012)
+
+ 25 Jun 2012; Richard Yao <ryao@gentoo.org> +zfs-0.6.0_rc9-r3.ebuild,
+ -zfs-0.6.0_rc9-r2.ebuild,
+ files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch:
+ Fix additional deadlock fix regression
*zfs-0.6.0_rc9-r2 (25 Jun 2012)
diff --git a/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch b/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch
index 4312f84fd5a7..465e72740f67 100644
--- a/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch
+++ b/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch
@@ -1,36 +1,28 @@
-From fc1f1d3940f4d2e5b1b85481d900d8198cf4b6f3 Mon Sep 17 00:00:00 2001
-From: Richard Yao <ryao@cs.stonybrook.edu>
-Date: Mon, 25 Jun 2012 14:41:30 -0400
-Subject: [PATCH] Make callers responsible for memory allocation in
- zfs_range_lock()
+commit e7deab3edf6940f13013ca147c91472577223923
+Author: Richard Yao <ryao@cs.stonybrook.edu>
+Date: Mon Jun 25 14:41:30 2012 -0400
-zfs_range_lock() is used in zvols, and previously, it could deadlock due
-to an allocation using KM_SLEEP. We avoid this by moving responsibility
-the memory allocation from zfs_range_lock() to the caller. This enables
-us to avoid such deadlocks and use stack allocations, which are more
-efficient and prevents deadlocks. The contexts in which stack
-allocations are done do not appear to be stack heavy, so we do not risk
-overflowing the stack from doing this.
-
-Signed-off-by: Richard Yao <ryao@cs.stonybrook.edu>
-
-Conflicts:
-
- module/zfs/zvol.c
----
- cmd/ztest/ztest.c | 32 +++++++++++++++++---------------
- include/sys/zfs_rlock.h | 2 +-
- module/zfs/zfs_rlock.c | 15 +++++++--------
- module/zfs/zfs_vnops.c | 30 ++++++++++++++++--------------
- module/zfs/zfs_znode.c | 30 +++++++++++++++---------------
- module/zfs/zvol.c | 24 +++++++++++++-----------
- 6 files changed, 69 insertions(+), 64 deletions(-)
+ Make callers responsible for memory allocation in zfs_range_lock()
+
+ zfs_range_lock() is used in zvols, and previously, it could deadlock due
+ to an allocation using KM_SLEEP. We avoid this by moving responsibility
+ the memory allocation from zfs_range_lock() to the caller. This enables
+ us to avoid such deadlocks and use stack allocations, which are more
+ efficient and prevents deadlocks. The contexts in which stack
+ allocations are done do not appear to be stack heavy, so we do not risk
+ overflowing the stack from doing this.
+
+ Signed-off-by: Richard Yao <ryao@cs.stonybrook.edu>
+
+ Conflicts:
+
+ module/zfs/zvol.c
diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
index 72d511b..c5dd0c2 100644
--- a/cmd/ztest/ztest.c
+++ b/cmd/ztest/ztest.c
-@@ -973,12 +973,11 @@ enum ztest_object {
+@@ -973,12 +973,11 @@ ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
}
static rl_t *
@@ -44,7 +36,7 @@ index 72d511b..c5dd0c2 100644
rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
rl->rl_object = object;
-@@ -1389,7 +1388,7 @@ enum ztest_object {
+@@ -1389,7 +1388,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
dmu_tx_t *tx;
dmu_buf_t *db;
arc_buf_t *abuf = NULL;
@@ -53,7 +45,7 @@ index 72d511b..c5dd0c2 100644
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
-@@ -1413,7 +1412,7 @@ enum ztest_object {
+@@ -1413,7 +1412,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
bt = NULL;
ztest_object_lock(zd, lr->lr_foid, RL_READER);
@@ -62,7 +54,7 @@ index 72d511b..c5dd0c2 100644
VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
-@@ -1438,7 +1437,7 @@ enum ztest_object {
+@@ -1438,7 +1437,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
if (abuf != NULL)
dmu_return_arcbuf(abuf);
dmu_buf_rele(db, FTAG);
@@ -71,7 +63,7 @@ index 72d511b..c5dd0c2 100644
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
-@@ -1495,7 +1494,7 @@ enum ztest_object {
+@@ -1495,7 +1494,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
dmu_tx_commit(tx);
@@ -80,7 +72,7 @@ index 72d511b..c5dd0c2 100644
ztest_object_unlock(zd, lr->lr_foid);
return (0);
-@@ -1507,13 +1506,13 @@ enum ztest_object {
+@@ -1507,13 +1506,13 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
@@ -96,7 +88,7 @@ index 72d511b..c5dd0c2 100644
RL_WRITER);
tx = dmu_tx_create(os);
-@@ -1522,7 +1521,7 @@ enum ztest_object {
+@@ -1522,7 +1521,7 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
@@ -105,7 +97,7 @@ index 72d511b..c5dd0c2 100644
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
-@@ -1534,7 +1533,7 @@ enum ztest_object {
+@@ -1534,7 +1533,7 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
dmu_tx_commit(tx);
@@ -114,7 +106,7 @@ index 72d511b..c5dd0c2 100644
ztest_object_unlock(zd, lr->lr_foid);
return (0);
-@@ -1670,6 +1669,8 @@ enum ztest_object {
+@@ -1670,6 +1669,8 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
dmu_object_info_t doi;
dmu_buf_t *db;
zgd_t *zgd;
@@ -123,7 +115,7 @@ index 72d511b..c5dd0c2 100644
int error;
ztest_object_lock(zd, object, RL_READER);
-@@ -1694,9 +1695,10 @@ enum ztest_object {
+@@ -1694,9 +1695,10 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
zgd->zgd_zilog = zd->zd_zilog;
zgd->zgd_private = zd;
@@ -135,7 +127,7 @@ index 72d511b..c5dd0c2 100644
RL_READER);
error = dmu_read(os, object, offset, size, buf,
-@@ -1711,7 +1713,7 @@ enum ztest_object {
+@@ -1711,7 +1713,7 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
offset = 0;
}
@@ -144,7 +136,7 @@ index 72d511b..c5dd0c2 100644
RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db,
-@@ -1953,12 +1955,12 @@ enum ztest_object {
+@@ -1953,12 +1955,12 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
@@ -159,7 +151,7 @@ index 72d511b..c5dd0c2 100644
tx = dmu_tx_create(os);
-@@ -1974,7 +1976,7 @@ enum ztest_object {
+@@ -1974,7 +1976,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
(void) dmu_free_long_range(os, object, offset, size);
}
@@ -172,7 +164,7 @@ diff --git a/include/sys/zfs_rlock.h b/include/sys/zfs_rlock.h
index da18b1f..85dc16a 100644
--- a/include/sys/zfs_rlock.h
+++ b/include/sys/zfs_rlock.h
-@@ -63,7 +63,7 @@
+@@ -63,7 +63,7 @@ typedef struct rl {
* is converted to WRITER that specified to lock from the start of the
* end of file. zfs_range_lock() returns the range lock structure.
*/
@@ -198,7 +190,7 @@ index f3ada17..eb81777 100644
*
* AVL tree
* --------
-@@ -420,13 +420,11 @@
+@@ -420,13 +420,11 @@ got_lock:
* previously locked as RL_WRITER).
*/
rl_t *
@@ -213,7 +205,7 @@ index f3ada17..eb81777 100644
new->r_zp = zp;
new->r_off = off;
if (len + off < off) /* overflow */
-@@ -531,7 +529,6 @@
+@@ -531,7 +529,6 @@ zfs_range_unlock_reader(znode_t *zp, rl_t *remove, list_t *free_list)
}
mutex_exit(&zp->z_range_lock);
@@ -221,7 +213,7 @@ index f3ada17..eb81777 100644
}
}
-@@ -572,7 +569,9 @@
+@@ -572,7 +569,9 @@ zfs_range_unlock(rl_t *rl)
while ((free_rl = list_head(&free_list)) != NULL) {
list_remove(&free_list, free_rl);
@@ -233,10 +225,10 @@ index f3ada17..eb81777 100644
list_destroy(&free_list);
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
-index 2da5fec..c8ca7c5 100644
+index 2da5fec..1ef5299 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
-@@ -370,7 +370,7 @@
+@@ -370,7 +370,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
objset_t *os;
ssize_t n, nbytes;
int error = 0;
@@ -245,7 +237,7 @@ index 2da5fec..c8ca7c5 100644
#ifdef HAVE_UIO_ZEROCOPY
xuio_t *xuio = NULL;
#endif /* HAVE_UIO_ZEROCOPY */
-@@ -418,7 +418,7 @@
+@@ -418,7 +418,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* Lock the range against changes.
*/
@@ -254,7 +246,7 @@ index 2da5fec..c8ca7c5 100644
/*
* If we are reading past end-of-file we can skip
-@@ -482,7 +482,7 @@
+@@ -482,7 +482,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
n -= nbytes;
}
out:
@@ -263,7 +255,7 @@ index 2da5fec..c8ca7c5 100644
ZFS_ACCESSTIME_STAMP(zsb, zp);
zfs_inode_update(zp);
-@@ -524,7 +524,7 @@
+@@ -524,7 +524,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
zilog_t *zilog;
offset_t woff;
ssize_t n, nbytes;
@@ -272,7 +264,7 @@ index 2da5fec..c8ca7c5 100644
int max_blksz = zsb->z_max_blksz;
int error = 0;
arc_buf_t *abuf;
-@@ -608,9 +608,9 @@
+@@ -608,9 +608,9 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
@@ -285,7 +277,7 @@ index 2da5fec..c8ca7c5 100644
/*
* We overlocked the file because this write will cause
* the file block size to increase.
-@@ -625,11 +625,11 @@
+@@ -625,11 +625,11 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
@@ -299,7 +291,7 @@ index 2da5fec..c8ca7c5 100644
ZFS_EXIT(zsb);
return (EFBIG);
}
-@@ -719,7 +719,7 @@
+@@ -719,7 +719,7 @@ again:
* on the first iteration since zfs_range_reduce() will
* shrink down r_len to the appropriate size.
*/
@@ -308,7 +300,7 @@ index 2da5fec..c8ca7c5 100644
uint64_t new_blksz;
if (zp->z_blksz > max_blksz) {
-@@ -729,7 +729,7 @@
+@@ -729,7 +729,7 @@ again:
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
@@ -317,7 +309,7 @@ index 2da5fec..c8ca7c5 100644
}
/*
-@@ -842,7 +842,7 @@
+@@ -842,7 +842,7 @@ again:
uio_prefaultpages(MIN(n, max_blksz), uio);
}
@@ -326,23 +318,23 @@ index 2da5fec..c8ca7c5 100644
/*
* If we're in replay mode, or we made no progress, return error.
-@@ -915,6 +915,7 @@
- blkptr_t *bp = &lr->lr_blkptr;
- dmu_buf_t *db;
- zgd_t *zgd;
-+ rl_t rl;
- int error = 0;
+@@ -893,6 +893,7 @@ zfs_get_done(zgd_t *zgd, int error)
+ if (error == 0 && zgd->zgd_bp)
+ zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
- ASSERT(zio != NULL);
-@@ -935,6 +936,7 @@
++ kmem_free(zgd->zgd_rl, sizeof (rl_t));
+ kmem_free(zgd, sizeof (zgd_t));
+ }
+
+@@ -935,6 +936,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
}
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
-+ zgd->zgd_rl = &rl;
++ zgd->zgd_rl = (rl_t *)kmem_zalloc(sizeof (rl_t), KM_SLEEP);
zgd->zgd_zilog = zsb->z_log;
zgd->zgd_private = zp;
-@@ -946,7 +948,7 @@
+@@ -946,7 +948,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
@@ -351,7 +343,7 @@ index 2da5fec..c8ca7c5 100644
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = ENOENT;
-@@ -967,7 +969,7 @@
+@@ -967,7 +969,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
@@ -364,7 +356,7 @@ diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index 3a6872f..e363839 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
-@@ -1158,20 +1158,20 @@
+@@ -1158,20 +1158,20 @@ zfs_extend(znode_t *zp, uint64_t end)
{
zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
@@ -388,7 +380,7 @@ index 3a6872f..e363839 100644
return (0);
}
top:
-@@ -1202,7 +1202,7 @@
+@@ -1202,7 +1202,7 @@ top:
goto top;
}
dmu_tx_abort(tx);
@@ -397,7 +389,7 @@ index 3a6872f..e363839 100644
return (error);
}
-@@ -1214,7 +1214,7 @@
+@@ -1214,7 +1214,7 @@ top:
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
@@ -406,7 +398,7 @@ index 3a6872f..e363839 100644
dmu_tx_commit(tx);
-@@ -1235,19 +1235,19 @@
+@@ -1235,19 +1235,19 @@ static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfs_sb_t *zsb = ZTOZSB(zp);
@@ -429,7 +421,7 @@ index 3a6872f..e363839 100644
return (0);
}
-@@ -1256,7 +1256,7 @@
+@@ -1256,7 +1256,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
@@ -438,7 +430,7 @@ index 3a6872f..e363839 100644
return (error);
}
-@@ -1275,7 +1275,7 @@
+@@ -1275,7 +1275,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
{
zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
@@ -447,7 +439,7 @@ index 3a6872f..e363839 100644
int error;
sa_bulk_attr_t bulk[2];
int count = 0;
-@@ -1283,19 +1283,19 @@
+@@ -1283,19 +1283,19 @@ zfs_trunc(znode_t *zp, uint64_t end)
/*
* We will change zp_size, lock the whole file.
*/
@@ -470,7 +462,7 @@ index 3a6872f..e363839 100644
return (error);
}
top:
-@@ -1310,7 +1310,7 @@
+@@ -1310,7 +1310,7 @@ top:
goto top;
}
dmu_tx_abort(tx);
@@ -479,7 +471,7 @@ index 3a6872f..e363839 100644
return (error);
}
-@@ -1327,7 +1327,7 @@
+@@ -1327,7 +1327,7 @@ top:
dmu_tx_commit(tx);
@@ -492,7 +484,7 @@ diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index 125d58d..bbe53d9 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
-@@ -537,7 +537,7 @@
+@@ -537,7 +537,7 @@ zvol_write(void *arg)
uint64_t size = blk_rq_bytes(req);
int error = 0;
dmu_tx_t *tx;
@@ -501,7 +493,7 @@ index 125d58d..bbe53d9 100644
if (req->cmd_flags & VDEV_REQ_FLUSH)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
-@@ -550,7 +550,7 @@
+@@ -550,7 +550,7 @@ zvol_write(void *arg)
return;
}
@@ -510,7 +502,7 @@ index 125d58d..bbe53d9 100644
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
-@@ -559,7 +559,7 @@
+@@ -559,7 +559,7 @@ zvol_write(void *arg)
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
@@ -519,7 +511,7 @@ index 125d58d..bbe53d9 100644
blk_end_request(req, -error, size);
return;
}
-@@ -570,7 +570,7 @@
+@@ -570,7 +570,7 @@ zvol_write(void *arg)
req->cmd_flags & VDEV_REQ_FUA);
dmu_tx_commit(tx);
@@ -528,7 +520,7 @@ index 125d58d..bbe53d9 100644
if ((req->cmd_flags & VDEV_REQ_FUA) ||
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
-@@ -589,7 +589,7 @@
+@@ -589,7 +589,7 @@ zvol_discard(void *arg)
uint64_t offset = blk_rq_pos(req) << 9;
uint64_t size = blk_rq_bytes(req);
int error;
@@ -537,7 +529,7 @@ index 125d58d..bbe53d9 100644
if (offset + size > zv->zv_volsize) {
blk_end_request(req, -EIO, size);
-@@ -601,7 +601,7 @@
+@@ -601,7 +601,7 @@ zvol_discard(void *arg)
return;
}
@@ -546,7 +538,7 @@ index 125d58d..bbe53d9 100644
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);
-@@ -609,7 +609,7 @@
+@@ -609,7 +609,7 @@ zvol_discard(void *arg)
* TODO: maybe we should add the operation to the log.
*/
@@ -555,7 +547,7 @@ index 125d58d..bbe53d9 100644
blk_end_request(req, -error, size);
}
-@@ -630,18 +630,18 @@
+@@ -630,18 +630,18 @@ zvol_read(void *arg)
uint64_t offset = blk_rq_pos(req) << 9;
uint64_t size = blk_rq_bytes(req);
int error;
@@ -577,7 +569,7 @@ index 125d58d..bbe53d9 100644
/* convert checksum errors into IO errors */
if (error == ECKSUM)
-@@ -744,6 +744,7 @@
+@@ -744,6 +744,7 @@ zvol_get_done(zgd_t *zgd, int error)
if (error == 0 && zgd->zgd_bp)
zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
@@ -585,7 +577,7 @@ index 125d58d..bbe53d9 100644
kmem_free(zgd, sizeof (zgd_t));
}
-@@ -766,7 +767,8 @@
+@@ -766,7 +767,8 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_zilog = zv->zv_zilog;
@@ -595,6 +587,3 @@ index 125d58d..bbe53d9 100644
/*
* Write records come in two flavors: immediate and indirect.
---
-1.7.10
-
diff --git a/sys-fs/zfs/zfs-0.6.0_rc9-r2.ebuild b/sys-fs/zfs/zfs-0.6.0_rc9-r3.ebuild
index 93f7e7633fee..d771ce78aab7 100644
--- a/sys-fs/zfs/zfs-0.6.0_rc9-r2.ebuild
+++ b/sys-fs/zfs/zfs-0.6.0_rc9-r3.ebuild
@@ -1,6 +1,6 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/zfs-0.6.0_rc9-r2.ebuild,v 1.1 2012/06/25 20:21:55 ryao Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/zfs-0.6.0_rc9-r3.ebuild,v 1.1 2012/06/25 21:03:27 ryao Exp $
EAPI="4"