qubes-linux-kernel/patches.suse/xfs-dmapi-src
2010-07-07 13:12:45 +02:00

10792 lines
291 KiB
Plaintext

Date: Thu, 09 Oct 2008 17:11:31 +1100
From: Donald Douwsma <donaldd@sgi.com>
Subject: DMAPI Source
Patch-mainline: ?
References: bnc#450658
Acked-by: Jan Kara <jack@suse.cz>
---
fs/dmapi/Makefile | 53 +
fs/dmapi/Status | 128 +++
fs/dmapi/dmapi.h | 1086 ++++++++++++++++++++++++++
fs/dmapi/dmapi_attr.c | 93 ++
fs/dmapi/dmapi_bulkattr.c | 170 ++++
fs/dmapi/dmapi_config.c | 117 ++
fs/dmapi/dmapi_dmattr.c | 228 +++++
fs/dmapi/dmapi_event.c | 860 +++++++++++++++++++++
fs/dmapi/dmapi_handle.c | 119 ++
fs/dmapi/dmapi_hole.c | 119 ++
fs/dmapi/dmapi_io.c | 142 +++
fs/dmapi/dmapi_kern.h | 598 ++++++++++++++
fs/dmapi/dmapi_mountinfo.c | 527 +++++++++++++
fs/dmapi/dmapi_port.h | 138 +++
fs/dmapi/dmapi_private.h | 619 +++++++++++++++
fs/dmapi/dmapi_region.c | 91 ++
fs/dmapi/dmapi_register.c | 1638 ++++++++++++++++++++++++++++++++++++++++
fs/dmapi/dmapi_right.c | 1256 ++++++++++++++++++++++++++++++
fs/dmapi/dmapi_session.c | 1824 +++++++++++++++++++++++++++++++++++++++++++++
fs/dmapi/dmapi_sysent.c | 801 +++++++++++++++++++
fs/dmapi/sv.h | 89 ++
21 files changed, 10696 insertions(+)
--- /dev/null
+++ b/fs/dmapi/Makefile
@@ -0,0 +1,53 @@
+#
+# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+# Mountain View, CA 94043, or:
+#
+# http://www.sgi.com
+#
+# For further information regarding this notice, see:
+#
+# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+#
+
+ifeq ($(CONFIG_DMAPI_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+ EXTRA_CFLAGS += -g
+endif
+
+obj-$(CONFIG_DMAPI) += dmapi.o
+
+dmapi-y += dmapi_sysent.o \
+ dmapi_attr.o \
+ dmapi_config.o \
+ dmapi_bulkattr.o \
+ dmapi_dmattr.o \
+ dmapi_event.o \
+ dmapi_handle.o \
+ dmapi_hole.o \
+ dmapi_io.o \
+ dmapi_mountinfo.o \
+ dmapi_region.o \
+ dmapi_register.o \
+ dmapi_right.o \
+ dmapi_session.o
--- /dev/null
+++ b/fs/dmapi/Status
@@ -0,0 +1,128 @@
+Jan21,04 - dm_get_bulkall is now implemented. roehrich
+
+for linux:
+
+
+68 external interfaces in libdm
+
+ 56 of those interfaces go through to dmi(), the kernel side of DMAPI
+
+
+
+Functions known to work
+----------------------------------------------
+
+dm_create_session
+dm_create_userevent
+dm_destroy_session
+dm_getall_sessions
+dm_getall_tokens
+dm_get_allocinfo
+dm_get_bulkall
+dm_get_bulkattr
+dm_get_config_events
+dm_get_dmattr
+dm_get_eventlist
+dm_get_events
+dm_get_fileattr
+dm_get_region
+dm_handle_free
+dm_init_attrloc
+dm_init_service
+dm_obj_ref_hold
+dm_obj_ref_query
+dm_obj_ref_rele
+dm_path_to_fshandle
+dm_path_to_handle
+dm_punch_hole
+dm_query_session
+dm_read_invis
+dm_remove_dmattr
+dm_respond_event
+dm_send_msg
+dm_set_disp
+dm_set_dmattr
+dm_set_eventlist
+dm_set_fileattr
+dm_set_region
+dm_sync_by_handle
+dm_write_invis
+35
+
+Functions that seem to work (would like more rigorous test case)
+------------------------------------------
+
+dm_pending
+dm_probe_hole - one test case of test_hole.c fails
+dm_request_right
+3
+
+Functions untested but probably work
+----------------------------------------------
+
+dm_find_eventmsg
+dm_handle_cmp
+dm_handle_to_fshandle
+dm_handle_to_ino
+dm_release_right
+5
+
+Functions that do not work
+-----------------------------------------
+
+dm_get_dioinfo - directio not implemented
+1
+
+Functions not supported in SGI DMAPI
+-------------------------------------------------------------
+
+dm_clear_inherit
+dm_create_by_handle
+dm_getall_inherit
+dm_mkdir_by_handle
+dm_set_inherit
+dm_symlink_by_handle
+
+
+
+
+Functions that seem to work (would like more rigorous test case)
+----------------------------------------------------------------
+
+dm_get_config
+dm_downgrade_right
+dm_get_mountinfo
+dm_set_return_on_destory
+dm_upgrade_right
+
+
+
+Functions that do not work
+-----------------------------------------------------------------
+
+dm_fd_to_handle - Irix getf not implemented on linux
+dm_get_dirattrs - null pointer reference
+dm_handle_to_path
+dm_getall_dmattr - needs a copy_from_user in place of useracc
+
+
+Functions that are untested, but probably work
+-----------------------------------------------------------------
+
+dm_getall_disp
+dm_handle_hash
+dm_handle_is_valid
+dm_handle_to_fsid
+dm_handle_to_igen
+dm_make_fshandle
+dm_make_handle
+dm_move_event
+dm_query_right
+
+
+
+Other things not working
+----------------------------------
+
+- read/write events for memory-mapped I/O?
+
--- /dev/null
+++ b/fs/dmapi/dmapi.h
@@ -0,0 +1,1086 @@
+/*
+ * Copyright (c) 1995-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#ifndef __DMAPI_H__
+#define __DMAPI_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __KERNEL__
+#include <sys/types.h>
+#endif
+#include <linux/types.h>
+
+#ifndef __user
+#define __user
+#endif
+
+/**************************************************************************
+ * *
+ * The SGI implementation of DMAPI is based upon the X/Open document *
+ * Systems Management: Data Storage Managment (XDSM) API *
+ * dated February 1997. Not all DMAPI functions and structure fields *
+ * have been implemented. Most importantly, the DMAPI functions *
+ * dm_request_right, dm_release_right, dm_query_right, dm_upgrade_right *
+ * and dm_downgrade_right do not work as described in the specification. *
+ * *
+ * The XFS filesystem currently does not allow its locking mechanisms to *
+ * be externally accessed from user space. While the above-mentioned *
+ * dm_xxx_right functions exist and can be called by applications, they *
+ * always return successfully without actually obtaining any locks *
+ * within the filesystem. *
+ * *
+ * Applications which do not need full rights support and which only *
+ * make dm_xxx_right calls in order to satisfy the input requirements of *
+ * other DMAPI calls should be able to use these routines to avoid *
+ * having to implement special-case code for SGI platforms. Applications *
+ * which truely need the capabilities of a full implementation of rights *
+ * will unfortunately have to come up with alternate software solutions *
+ * until such time as rights can be completely implemented. *
+ * *
+ * Functions and structure fields defined within this file which are not *
+ * supported in the SGI implementation of DMAPI are indicated by comments *
+ * following their definitions such as "not supported", or "not *
+ * completely supported". Any function or field not so marked may be *
+ * assumed to work exactly according to the spec. *
+ * *
+ **************************************************************************/
+
+
+
+/* The first portion of this file contains defines and typedefs that are
+ DMAPI implementation-dependent, and could be different on other platforms.
+*/
+
+typedef __s64 dm_attrloc_t;
+typedef unsigned int dm_boolean_t;
+typedef __u64 dm_eventset_t;
+typedef __u64 dm_fsid_t;
+typedef __u64 dm_ino_t;
+typedef __u32 dm_igen_t;
+typedef __s64 dm_off_t;
+typedef unsigned int dm_sequence_t;
+typedef int dm_sessid_t;
+typedef __u64 dm_size_t;
+typedef __s64 dm_ssize_t;
+typedef int dm_token_t;
+
+/* XXX dev_t, mode_t, and nlink_t are not the same size in kernel space
+ and user space. This affects the field offsets for dm_stat_t.
+ The following solution is temporary.
+
+ user space sizes: dev_t=8 mode_t=4 nlink_t=4
+ kernel space : dev_t=2 mode_t=2 nlink_t=2
+
+*/
+typedef __s64 dm_dev_t;
+typedef int dm_mode_t;
+typedef int dm_nlink_t;
+
+
+#define DM_REGION_NOEVENT 0x0
+#define DM_REGION_READ 0x1
+#define DM_REGION_WRITE 0x2
+#define DM_REGION_TRUNCATE 0x4
+
+/* Values for the mask argument used with dm_get_fileattr, dm_get_bulkattr,
+ dm_get_dirattrs, and dm_set_fileattr.
+*/
+
+#define DM_AT_MODE 0x0001
+#define DM_AT_UID 0x0002
+#define DM_AT_GID 0x0004
+#define DM_AT_ATIME 0x0008
+#define DM_AT_MTIME 0x0010
+#define DM_AT_CTIME 0x0020
+#define DM_AT_SIZE 0x0040
+#define DM_AT_DTIME 0x0080
+#define DM_AT_HANDLE 0x0100
+#define DM_AT_EMASK 0x0200
+#define DM_AT_PMANR 0x0400
+#define DM_AT_PATTR 0x0800
+#define DM_AT_STAT 0x1000
+#define DM_AT_CFLAG 0x2000
+
+#define DM_EV_WAIT 0x1 /* used in dm_get_events() */
+
+#define DM_MOUNT_RDONLY 0x1 /* me_mode field in dm_mount_event_t */
+
+#define DM_RR_WAIT 0x1
+
+#define DM_UNMOUNT_FORCE 0x1 /* ne_mode field in dm_namesp_event_t */
+
+#define DM_WRITE_SYNC 0x1 /* used in dm_write_invis() */
+
+#define DM_SESSION_INFO_LEN 256
+#define DM_NO_SESSION 0
+#define DM_TRUE 1
+#define DM_FALSE 0
+#define DM_INVALID_TOKEN 0
+#define DM_NO_TOKEN (-1)
+#define DM_INVALID_HANP NULL
+#define DM_INVALID_HLEN 0
+#define DM_GLOBAL_HANP ((void *)(1LL))
+#define DM_GLOBAL_HLEN ((size_t)(1))
+#define DM_VER_STR_CONTENTS "SGI DMAPI (XDSM) API, Release 1.1."
+
+
+#define DMEV_SET(event_type, event_list) \
+ ((event_list) |= (1 << (event_type)))
+#define DMEV_CLR(event_type, event_list) \
+ ((event_list) &= ~(1 << (event_type)))
+#define DMEV_ISSET(event_type, event_list) \
+ (int)(((event_list) & (1 << (event_type))) != 0)
+#define DMEV_ZERO(event_list) \
+ (event_list) = 0
+
+
+typedef struct {
+ int vd_offset; /* offset from start of containing struct */
+ unsigned int vd_length; /* length of data starting at vd_offset */
+} dm_vardata_t;
+
+#define DM_GET_VALUE(p, field, type) \
+ ((type) ((char *)(p) + (p)->field.vd_offset))
+
+#define DM_GET_LEN(p, field) \
+ ((p)->field.vd_length)
+
+#define DM_STEP_TO_NEXT(p, type) \
+ ((type) ((p)->_link ? (char *)(p) + (p)->_link : NULL))
+
+
+
+
+/* The remainder of this include file contains defines, typedefs, and
+ structures which are strictly defined by the DMAPI 2.3 specification.
+
+ (The _link field which appears in several structures is an
+ implementation-specific way to implement DM_STEP_TO_NEXT, and
+ should not be referenced directly by application code.)
+*/
+
+
+#define DM_ATTR_NAME_SIZE 8
+
+
+struct dm_attrname {
+ unsigned char an_chars[DM_ATTR_NAME_SIZE];
+};
+typedef struct dm_attrname dm_attrname_t;
+
+
+struct dm_attrlist {
+ int _link;
+ dm_attrname_t al_name;
+ dm_vardata_t al_data;
+};
+typedef struct dm_attrlist dm_attrlist_t;
+
+
+typedef enum {
+ DM_CONFIG_INVALID,
+ DM_CONFIG_BULKALL,
+ DM_CONFIG_CREATE_BY_HANDLE,
+ DM_CONFIG_DTIME_OVERLOAD,
+ DM_CONFIG_LEGACY,
+ DM_CONFIG_LOCK_UPGRADE,
+ DM_CONFIG_MAX_ATTR_ON_DESTROY,
+ DM_CONFIG_MAX_ATTRIBUTE_SIZE,
+ DM_CONFIG_MAX_HANDLE_SIZE,
+ DM_CONFIG_MAX_MANAGED_REGIONS,
+ DM_CONFIG_MAX_MESSAGE_DATA,
+ DM_CONFIG_OBJ_REF,
+ DM_CONFIG_PENDING,
+ DM_CONFIG_PERS_ATTRIBUTES,
+ DM_CONFIG_PERS_EVENTS,
+ DM_CONFIG_PERS_INHERIT_ATTRIBS,
+ DM_CONFIG_PERS_MANAGED_REGIONS,
+ DM_CONFIG_PUNCH_HOLE,
+ DM_CONFIG_TOTAL_ATTRIBUTE_SPACE,
+ DM_CONFIG_WILL_RETRY
+} dm_config_t;
+
+
+struct dm_dioinfo { /* non-standard SGI addition */
+ unsigned int d_mem;
+ unsigned int d_miniosz;
+ unsigned int d_maxiosz;
+ dm_boolean_t d_dio_only;
+};
+typedef struct dm_dioinfo dm_dioinfo_t;
+
+
+struct dm_dispinfo {
+ int _link;
+ unsigned int di_pad1; /* reserved; do not reference */
+ dm_vardata_t di_fshandle;
+ dm_eventset_t di_eventset;
+};
+typedef struct dm_dispinfo dm_dispinfo_t;
+
+
+#ifndef HAVE_DM_EVENTTYPE_T
+#define HAVE_DM_EVENTTYPE_T
+typedef enum {
+ DM_EVENT_INVALID = -1,
+ DM_EVENT_CANCEL = 0, /* not supported */
+ DM_EVENT_MOUNT = 1,
+ DM_EVENT_PREUNMOUNT = 2,
+ DM_EVENT_UNMOUNT = 3,
+ DM_EVENT_DEBUT = 4, /* not supported */
+ DM_EVENT_CREATE = 5,
+ DM_EVENT_CLOSE = 6, /* not supported */
+ DM_EVENT_POSTCREATE = 7,
+ DM_EVENT_REMOVE = 8,
+ DM_EVENT_POSTREMOVE = 9,
+ DM_EVENT_RENAME = 10,
+ DM_EVENT_POSTRENAME = 11,
+ DM_EVENT_LINK = 12,
+ DM_EVENT_POSTLINK = 13,
+ DM_EVENT_SYMLINK = 14,
+ DM_EVENT_POSTSYMLINK = 15,
+ DM_EVENT_READ = 16,
+ DM_EVENT_WRITE = 17,
+ DM_EVENT_TRUNCATE = 18,
+ DM_EVENT_ATTRIBUTE = 19,
+ DM_EVENT_DESTROY = 20,
+ DM_EVENT_NOSPACE = 21,
+ DM_EVENT_USER = 22,
+ DM_EVENT_MAX = 23
+} dm_eventtype_t;
+#endif
+
+
+struct dm_eventmsg {
+ int _link;
+ dm_eventtype_t ev_type;
+ dm_token_t ev_token;
+ dm_sequence_t ev_sequence;
+ dm_vardata_t ev_data;
+};
+typedef struct dm_eventmsg dm_eventmsg_t;
+
+
+struct dm_cancel_event { /* not supported */
+ dm_sequence_t ce_sequence;
+ dm_token_t ce_token;
+};
+typedef struct dm_cancel_event dm_cancel_event_t;
+
+
+struct dm_data_event {
+ dm_vardata_t de_handle;
+ dm_off_t de_offset;
+ dm_size_t de_length;
+};
+typedef struct dm_data_event dm_data_event_t;
+
+struct dm_destroy_event {
+ dm_vardata_t ds_handle;
+ dm_attrname_t ds_attrname;
+ dm_vardata_t ds_attrcopy;
+};
+typedef struct dm_destroy_event dm_destroy_event_t;
+
+struct dm_mount_event {
+ dm_mode_t me_mode;
+ dm_vardata_t me_handle1;
+ dm_vardata_t me_handle2;
+ dm_vardata_t me_name1;
+ dm_vardata_t me_name2;
+ dm_vardata_t me_roothandle;
+};
+typedef struct dm_mount_event dm_mount_event_t;
+
+struct dm_namesp_event {
+ dm_mode_t ne_mode;
+ dm_vardata_t ne_handle1;
+ dm_vardata_t ne_handle2;
+ dm_vardata_t ne_name1;
+ dm_vardata_t ne_name2;
+ int ne_retcode;
+};
+typedef struct dm_namesp_event dm_namesp_event_t;
+
+
+typedef enum {
+ DM_EXTENT_INVALID,
+ DM_EXTENT_RES,
+ DM_EXTENT_HOLE
+} dm_extenttype_t;
+
+
+struct dm_extent {
+ dm_extenttype_t ex_type;
+ unsigned int ex_pad1; /* reserved; do not reference */
+ dm_off_t ex_offset;
+ dm_size_t ex_length;
+};
+typedef struct dm_extent dm_extent_t;
+
+struct dm_fileattr {
+ dm_mode_t fa_mode;
+ uid_t fa_uid;
+ gid_t fa_gid;
+ time_t fa_atime;
+ time_t fa_mtime;
+ time_t fa_ctime;
+ time_t fa_dtime;
+ unsigned int fa_pad1; /* reserved; do not reference */
+ dm_off_t fa_size;
+};
+typedef struct dm_fileattr dm_fileattr_t;
+
+
+struct dm_inherit { /* not supported */
+ dm_attrname_t ih_name;
+ dm_mode_t ih_filetype;
+};
+typedef struct dm_inherit dm_inherit_t;
+
+
+typedef enum {
+ DM_MSGTYPE_INVALID,
+ DM_MSGTYPE_SYNC,
+ DM_MSGTYPE_ASYNC
+} dm_msgtype_t;
+
+
+struct dm_region {
+ dm_off_t rg_offset;
+ dm_size_t rg_size;
+ unsigned int rg_flags;
+ unsigned int rg_pad1; /* reserved; do not reference */
+};
+typedef struct dm_region dm_region_t;
+
+
+typedef enum {
+ DM_RESP_INVALID,
+ DM_RESP_CONTINUE,
+ DM_RESP_ABORT,
+ DM_RESP_DONTCARE
+} dm_response_t;
+
+
+#ifndef HAVE_DM_RIGHT_T
+#define HAVE_DM_RIGHT_T
+typedef enum {
+ DM_RIGHT_NULL,
+ DM_RIGHT_SHARED,
+ DM_RIGHT_EXCL
+} dm_right_t;
+#endif
+
+
+struct dm_stat {
+ int _link;
+ dm_vardata_t dt_handle;
+ dm_vardata_t dt_compname;
+ int dt_nevents;
+ dm_eventset_t dt_emask;
+ int dt_pers; /* field not supported */
+ int dt_pmanreg;
+ time_t dt_dtime;
+ unsigned int dt_change; /* field not supported */
+ unsigned int dt_pad1; /* reserved; do not reference */
+ dm_dev_t dt_dev;
+ dm_ino_t dt_ino;
+ dm_mode_t dt_mode;
+ dm_nlink_t dt_nlink;
+ uid_t dt_uid;
+ gid_t dt_gid;
+ dm_dev_t dt_rdev;
+ unsigned int dt_pad2; /* reserved; do not reference */
+ dm_off_t dt_size;
+ time_t dt_atime;
+ time_t dt_mtime;
+ time_t dt_ctime;
+ unsigned int dt_blksize;
+ dm_size_t dt_blocks;
+
+ /* Non-standard filesystem-specific fields. Currently XFS is the only
+ supported filesystem type.
+ */
+
+ __u64 dt_pad3; /* reserved; do not reference */
+ int dt_fstype; /* filesystem index; see sysfs(2) */
+ union {
+ struct {
+ dm_igen_t igen;
+ unsigned int xflags;
+ unsigned int extsize;
+ unsigned int extents;
+ unsigned short aextents;
+ unsigned short dmstate;
+ } sgi_xfs;
+ } fsys_dep;
+};
+typedef struct dm_stat dm_stat_t;
+
+#define dt_xfs_igen fsys_dep.sgi_xfs.igen
+#define dt_xfs_xflags fsys_dep.sgi_xfs.xflags
+#define dt_xfs_extsize fsys_dep.sgi_xfs.extsize
+#define dt_xfs_extents fsys_dep.sgi_xfs.extents
+#define dt_xfs_aextents fsys_dep.sgi_xfs.aextents
+#define dt_xfs_dmstate fsys_dep.sgi_xfs.dmstate
+
+/* Flags for the non-standard dt_xfs_xflags field. */
+
+#define DM_XFLAG_REALTIME 0x00000001
+#define DM_XFLAG_PREALLOC 0x00000002
+#define DM_XFLAG_IMMUTABLE 0x00000008
+#define DM_XFLAG_APPEND 0x00000010
+#define DM_XFLAG_SYNC 0x00000020
+#define DM_XFLAG_NOATIME 0x00000040
+#define DM_XFLAG_NODUMP 0x00000080
+#define DM_XFLAG_HASATTR 0x80000000
+
+
+struct dm_timestruct {
+ time_t dm_tv_sec;
+ int dm_tv_nsec;
+};
+typedef struct dm_timestruct dm_timestruct_t;
+
+
+struct dm_xstat { /* not supported */
+ dm_stat_t dx_statinfo;
+ dm_vardata_t dx_attrdata;
+};
+typedef struct dm_xstat dm_xstat_t;
+
+
+#define MAXDMFSFIDSZ 46
+
+struct dm_fid {
+ __u16 dm_fid_len; /* length of remainder */
+ __u16 dm_fid_pad;
+ __u32 dm_fid_gen; /* generation number */
+ __u64 dm_fid_ino; /* 64 bits inode number */
+};
+typedef struct dm_fid dm_fid_t;
+
+
+struct dm_handle {
+ union {
+ __s64 align; /* force alignment of ha_fid */
+ dm_fsid_t _ha_fsid; /* unique file system identifier */
+ } ha_u;
+ dm_fid_t ha_fid; /* file system specific file ID */
+};
+typedef struct dm_handle dm_handle_t;
+#define ha_fsid ha_u._ha_fsid
+
+#define DM_HSIZE(handle) (((char *) &(handle).ha_fid.dm_fid_pad \
+ - (char *) &(handle)) \
+ + (handle).ha_fid.dm_fid_len)
+
+#define DM_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(dm_handle_t))
+
+#define DM_FSHSIZE sizeof(dm_fsid_t)
+
+
+/* The following list provides the prototypes for all functions defined in
+ the DMAPI interface.
+*/
+
+extern int
+dm_clear_inherit( /* not supported */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep);
+
+extern int
+dm_create_by_handle( /* not supported */
+ dm_sessid_t sid,
+ void __user *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname);
+
+extern int
+dm_create_session(
+ dm_sessid_t oldsid,
+ char __user *sessinfop,
+ dm_sessid_t __user *newsidp);
+
+extern int
+dm_create_userevent(
+ dm_sessid_t sid,
+ size_t msglen,
+ void __user *msgdatap,
+ dm_token_t __user *tokenp);
+
+extern int
+dm_destroy_session(
+ dm_sessid_t sid);
+
+extern int
+dm_downgrade_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern int
+dm_fd_to_handle(
+ int fd,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_find_eventmsg(
+ dm_sessid_t sid,
+ dm_token_t token,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+extern int
+dm_get_allocinfo(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t *offp,
+ unsigned int nelem,
+ dm_extent_t *extentp,
+ unsigned int *nelemp);
+
+extern int
+dm_get_bulkall( /* not supported */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_attrname_t *attrnamep,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_bulkattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_config(
+ void __user *hanp,
+ size_t hlen,
+ dm_config_t flagname,
+ dm_size_t __user *retvalp);
+
+extern int
+dm_get_config_events(
+ void __user *hanp,
+ size_t hlen,
+ unsigned int nelem,
+ dm_eventset_t __user *eventsetp,
+ unsigned int __user *nelemp);
+
+extern int
+dm_get_dirattrs(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+extern int
+dm_get_eventlist(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_eventset_t __user *eventsetp,
+ unsigned int __user *nelemp);
+
+extern int
+dm_get_events(
+ dm_sessid_t sid,
+ unsigned int maxmsgs,
+ unsigned int flags,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+extern int
+dm_get_fileattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_stat_t __user *statp);
+
+extern int
+dm_get_mountinfo(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+extern int
+dm_get_region(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_region_t __user *regbufp,
+ unsigned int __user *nelemp);
+
+extern int
+dm_getall_disp(
+ dm_sessid_t sid,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+extern int
+dm_getall_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+extern int
+dm_getall_inherit( /* not supported */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_inherit_t __user *inheritbufp,
+ unsigned int __user *nelemp);
+
+extern int
+dm_getall_sessions(
+ unsigned int nelem,
+ dm_sessid_t __user *sidbufp,
+ unsigned int __user *nelemp);
+
+extern int
+dm_getall_tokens(
+ dm_sessid_t sid,
+ unsigned int nelem,
+ dm_token_t __user *tokenbufp,
+ unsigned int __user *nelemp);
+
+extern int
+dm_handle_cmp(
+ void *hanp1,
+ size_t hlen1,
+ void *hanp2,
+ size_t hlen2);
+
+extern void
+dm_handle_free(
+ void *hanp,
+ size_t hlen);
+
+extern u_int
+dm_handle_hash(
+ void *hanp,
+ size_t hlen);
+
+extern dm_boolean_t
+dm_handle_is_valid(
+ void *hanp,
+ size_t hlen);
+
+extern int
+dm_handle_to_fshandle(
+ void *hanp,
+ size_t hlen,
+ void **fshanpp,
+ size_t *fshlenp);
+
+extern int
+dm_handle_to_fsid(
+ void *hanp,
+ size_t hlen,
+ dm_fsid_t *fsidp);
+
+extern int
+dm_handle_to_igen(
+ void *hanp,
+ size_t hlen,
+ dm_igen_t *igenp);
+
+extern int
+dm_handle_to_ino(
+ void *hanp,
+ size_t hlen,
+ dm_ino_t *inop);
+
+extern int
+dm_handle_to_path(
+ void *dirhanp,
+ size_t dirhlen,
+ void *targhanp,
+ size_t targhlen,
+ size_t buflen,
+ char *pathbufp,
+ size_t *rlenp);
+
+extern int
+dm_init_attrloc(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrloc_t __user *locp);
+
+extern int
+dm_init_service(
+ char **versionstrpp);
+
+extern int
+dm_make_handle(
+ dm_fsid_t *fsidp,
+ dm_ino_t *inop,
+ dm_igen_t *igenp,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_make_fshandle(
+ dm_fsid_t *fsidp,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_mkdir_by_handle( /* not supported */
+ dm_sessid_t sid,
+ void __user *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname);
+
+extern int
+dm_move_event(
+ dm_sessid_t srcsid,
+ dm_token_t token,
+ dm_sessid_t targetsid,
+ dm_token_t __user *rtokenp);
+
+extern int
+dm_obj_ref_hold(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen);
+
+extern int
+dm_obj_ref_query(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen);
+
+extern int
+dm_obj_ref_rele(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen);
+
+extern int
+dm_path_to_fshandle(
+ char *path,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_path_to_handle(
+ char *path,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_pending(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_timestruct_t __user *delay);
+
+extern int
+dm_probe_hole(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ dm_off_t __user *roffp,
+ dm_size_t __user *rlenp);
+
+extern int
+dm_punch_hole(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len);
+
+extern int
+dm_query_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_right_t __user *rightp);
+
+extern int
+dm_query_session(
+ dm_sessid_t sid,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+extern dm_ssize_t
+dm_read_invis(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp);
+
+extern int
+dm_release_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern int
+dm_remove_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int setdtime,
+ dm_attrname_t __user *attrnamep);
+
+extern int
+dm_request_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int flags,
+ dm_right_t right);
+
+extern int
+dm_respond_event(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_response_t response,
+ int reterror,
+ size_t buflen,
+ void __user *respbufp);
+
+extern int
+dm_send_msg(
+ dm_sessid_t targetsid,
+ dm_msgtype_t msgtype,
+ size_t buflen,
+ void __user *bufp);
+
+extern int
+dm_set_disp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t __user *eventsetp,
+ unsigned int maxevent);
+
+extern int
+dm_set_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ int setdtime,
+ size_t buflen,
+ void __user *bufp);
+
+extern int
+dm_set_eventlist(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t __user *eventsetp,
+ unsigned int maxevent);
+
+extern int
+dm_set_fileattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_fileattr_t __user *attrp);
+
+extern int
+dm_set_inherit( /* not supported */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ mode_t mode);
+
+extern int
+dm_set_region(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_region_t __user *regbufp,
+ dm_boolean_t __user *exactflagp);
+
+extern int
+dm_set_return_on_destroy(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ dm_boolean_t enable);
+
+extern int
+dm_symlink_by_handle( /* not supported */
+ dm_sessid_t sid,
+ void __user *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname,
+ char __user *path);
+
+extern int
+dm_sync_by_handle(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern int
+dm_upgrade_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern dm_ssize_t
+dm_write_invis(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp);
+
+/* Non-standard SGI additions to the DMAPI interface. */
+
+int
+dm_open_by_handle(
+ void __user *hanp,
+ size_t hlen,
+ int mode);
+
+extern int
+dm_get_dioinfo(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_dioinfo_t __user *diop);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DMAPI_H__ */
--- /dev/null
+++ b/fs/dmapi/dmapi_attr.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+/* Retrieve attributes for a single file, directory or symlink. */
+
+int
+dm_get_fileattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_stat_t __user *statp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_fileattr(tdp->td_ip, tdp->td_right,
+ mask, statp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/* Set one or more file attributes of a file, directory, or symlink. */
+
+int
+dm_set_fileattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_fileattr_t __user *attrp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_fileattr(tdp->td_ip, tdp->td_right,
+ mask, attrp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_bulkattr.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+int
+dm_init_attrloc(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrloc_t __user *locp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS|DM_TDT_DIR,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->init_attrloc(tdp->td_ip, tdp->td_right, locp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/*
+ * Retrieves both standard and DM specific file attributes for the file
+ * system indicated by the handle. (The FS has to be mounted).
+ * Syscall returns 1 to indicate SUCCESS and more information is available.
+ * -1 is returned on error, and errno will be set appropriately.
+ * 0 is returned upon successful completion.
+ */
+
+int
+dm_get_bulkattr_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_bulkattr_rvp(tdp->td_ip, tdp->td_right,
+ mask, locp, buflen, bufp, rlenp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/*
+ * Retrieves attributes of directory entries given a handle to that
+ * directory. Iterative.
+ * Syscall returns 1 to indicate SUCCESS and more information is available.
+ * -1 is returned on error, and errno will be set appropriately.
+ * 0 is returned upon successful completion.
+ */
+
+int
+dm_get_dirattrs_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_DIR,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_dirattrs_rvp(tdp->td_ip, tdp->td_right,
+ mask, locp, buflen, bufp, rlenp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_bulkall_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrname_t __user *attrnamep,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_bulkall_rvp(tdp->td_ip, tdp->td_right,
+ mask, attrnamep, locp, buflen, bufp, rlenp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_config.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <asm/uaccess.h>
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+int
+dm_get_config(
+ void __user *hanp,
+ size_t hlen,
+ dm_config_t flagname,
+ dm_size_t __user *retvalp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ dm_size_t retval;
+ int system = 1;
+ int error;
+
+ /* Trap and process configuration parameters which are system-wide. */
+
+ switch (flagname) {
+ case DM_CONFIG_LEGACY:
+ case DM_CONFIG_PENDING:
+ case DM_CONFIG_OBJ_REF:
+ retval = DM_TRUE;
+ break;
+ case DM_CONFIG_MAX_MESSAGE_DATA:
+ retval = DM_MAX_MSG_DATA;
+ break;
+ default:
+ system = 0;
+ break;
+ }
+ if (system) {
+ if (copy_to_user(retvalp, &retval, sizeof(retval)))
+ return(-EFAULT);
+ return(0);
+ }
+
+ /* Must be filesystem-specific. Convert the handle into an inode. */
+
+ if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0)
+ return(error);
+
+ /* Now call the filesystem-specific routine to determine the
+ value of the configuration option for that filesystem.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_config(tdp->td_ip, tdp->td_right,
+ flagname, retvalp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_config_events(
+ void __user *hanp,
+ size_t hlen,
+ u_int nelem,
+ dm_eventset_t __user *eventsetp,
+ u_int __user *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ /* Convert the handle into an inode. */
+
+ if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0)
+ return(error);
+
+ /* Now call the filesystem-specific routine to determine the
+ events supported by that filesystem.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_config_events(tdp->td_ip, tdp->td_right,
+ nelem, eventsetp, nelemp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_dmattr.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+int
+dm_clear_inherit(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->clear_inherit(tdp->td_ip, tdp->td_right,
+ attrnamep);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_dmattr(tdp->td_ip, tdp->td_right,
+ attrnamep, buflen, bufp, rlenp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_getall_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->getall_dmattr(tdp->td_ip, tdp->td_right,
+ buflen, bufp, rlenp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_getall_inherit(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_inherit_t __user *inheritbufp,
+ u_int __user *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->getall_inherit(tdp->td_ip, tdp->td_right,
+ nelem, inheritbufp, nelemp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_remove_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int setdtime,
+ dm_attrname_t __user *attrnamep)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->remove_dmattr(tdp->td_ip, tdp->td_right,
+ setdtime, attrnamep);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_set_dmattr(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ int setdtime,
+ size_t buflen,
+ void __user *bufp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_dmattr(tdp->td_ip, tdp->td_right,
+ attrnamep, setdtime, buflen, bufp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_set_inherit(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ mode_t mode)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_inherit(tdp->td_ip, tdp->td_right,
+ attrnamep, mode);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_event.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include <asm/uaccess.h>
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+/* The "rights" portion of the DMAPI spec is not currently implemented. A
+ framework for rights is provided in the code, but turns out to be a noop
+ in practice. The following comments are a brain dump to serve as input to
+ the poor soul that eventually has to get DMAPI rights working in IRIX.
+
+ A DMAPI right is similar but not identical to the mrlock_t mechanism
+ already used within the kernel. The similarities are that it is a
+ sleeping lock, and that a multiple-reader, single-writer protocol is used.
+ How locks are obtained and dropped are different however. With a mrlock_t,
+ a thread grabs the lock, does some stuff, then drops the lock, and all other
+ threads block in the meantime (assuming a write lock). There is a one-to-
+ one relationship between the lock and the thread which obtained the lock.
+ Not so with DMAPI right locks. A DMAPI lock is associated with a particular
+ session/token/hanp/hlen quad; since there is a dm_tokdata_t structure for
+ each such quad, you can think of it as a one-to-one relationship between the
+ lock and a dm_tokdata_t. Any application thread which presents the correct
+ quad is entitled to grab or release the lock, or to use the rights
+ associated with that lock. The thread that grabs the lock does not have to
+ be the one to use the lock, nor does it have to be the thread which drops
+ the lock. The lock can be held for very long periods of time, even across
+ multiple systems calls by multiple application threads. The idea is that a
+ coordinated group of DMAPI application threads can grab the lock, issue a
+ series of inode accesses and/or updates, then drop the lock, and be assured
+ that no other thread in the system could be modifying the inode at the same
+ time. The kernel is expected to blindly trust that the application will
+ not forget to unlock inodes it has locked, and will not deadlock itself
+ against the kernel.
+
+ There are two types of DMAPI rights, file object (inode) and filesystem
+ object (superblock?). An inode right is the equivalent of the combination
+ of both the XFS ilock and iolock; if held exclusively, no data or metadata
+ within the file can be changed by non-lock-holding threads. The filesystem
+ object lock is a little fuzzier; I think that if it is held, things like
+ unmounts can be blocked, plus there is an event mask associated with the
+ filesystem which can't be updated without the lock. (By the way, that
+ event mask is supposed to be persistent in the superblock; add that to
+ your worklist :-)
+
+ All events generated by XFS currently arrive with no rights, i.e.
+ DM_RIGHT_NULL, and return to the filesystem with no rights. It would be
+ smart to leave it this way if possible, because it otherwise becomes more
+ likely that an application thread will deadlock against the kernel if the
+ one responsible for calling dm_get_events() happens to touch a file which
+ was locked at the time the event was queued. Since the thread is blocked,
+ it can't read the event in order to find and drop the lock. Catch-22. If
+ you do have events that arrive with non-null rights, then dm_enqueue() needs
+ to have code added for synchronous events which atomically switches the
+ right from being a thread-based right to a dm_tokdata_t-based right without
+ allowing the lock to drop in between. You will probably have to add a new
+ dm_fsys_vector entry point to do this. The lock can't be lost during the
+ switch, or other threads might change the inode or superblock in between.
+ Likewise, if you need to return to the filesystem holding a right, then
+ you need a DMAPI-to-thread atomic switch to occur, most likely in
+ dm_change_right(). Again, the lock must not be lost during the switch; the
+ DMAPI spec spends a couple of pages stressing this. Another dm_fsys_vector
+ entry point is probably the answer.
+
+ There are several assumptions implied in the current layout of the code.
+ First of all, if an event returns to the filesystem with a return value of
+ zero, then the filesystem can assume that any locks (rights) held at the
+ start of the event are still in effect at the end of the event. (Note that
+ the application could have temporarily dropped and reaquired the right
+ while the event was outstanding, however). If the event returns to the
+ filesystem with an errno, then the filesystem must assume that it has lost
+ any and all rights associated with any of the objects in the event. This
+ was done for a couple of reasons. First of all, since an errno is being
+ returned, most likely the filesystem is going to immediately drop all the
+ locks anyway. If the DMAPI code was required to unconditionally reobtain
+ all locks before returning to the filesystem, then dm_pending() wouldn't
+ work for NFS server threads because the process would block indefinitely
+ trying to get its thread-based rights back, because the DMAPI-rights
+ associated with the dm_tokdata_t in the outstanding event would prevent
+ the rights from being obtained. That would be a bad thing. We wouldn't
+ be able to let users Cntl-C out of read/write/truncate events either.
+
+ If a case should ever surface where the thread has lost its rights even
+ though it has a zero return status, or where the thread has rights even
+ though it is returning with an errno, then this logic will have to be
+ reworked. This could be done by changing the 'right' parameters on all
+ the event calls to (dm_right_t *), so that they could serve both as IN
+ and OUT parameters.
+
+ Some events such as DM_EVENT_DESTROY arrive without holding an inode
+ reference; if you don't have an inode reference, you can't have a right
+ on the file.
+
+ One more quirk. The DM_EVENT_UNMOUNT event is defined to be synchronous
+ when it's behavior is asynchronous. If an unmount event arrives with
+ rights, the event should return with the same rights and should NOT leave
+ any rights in the dm_tokdata_t where the application could use them.
+*/
+
+
+#define GETNEXTOFF(vdat) ((vdat).vd_offset + (vdat).vd_length)
+#define HANDLE_SIZE(tdp) \
+ ((tdp)->td_type & DM_TDT_VFS ? DM_FSHSIZE : DM_HSIZE((tdp)->td_handle))
+
+
+/* Given an inode pointer in a filesystem known to support DMAPI,
+ build a tdp structure for the corresponding inode.
+*/
+
+static dm_tokdata_t *
+dm_ip_data(
+ struct inode *ip,
+ dm_right_t right,
+ int referenced) /* != 0, caller holds inode reference */
+{
+ int error;
+ dm_tokdata_t *tdp;
+ int filetype;
+
+ tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL);
+ if (tdp == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+
+ tdp->td_next = NULL;
+ tdp->td_tevp = NULL;
+ tdp->td_app_ref = 0;
+ tdp->td_orig_right = right;
+ tdp->td_right = right;
+ tdp->td_flags = DM_TDF_ORIG;
+ if (referenced) {
+ tdp->td_flags |= DM_TDF_EVTREF;
+ }
+
+ filetype = ip->i_mode & S_IFMT;
+ if (filetype == S_IFREG) {
+ tdp->td_type = DM_TDT_REG;
+ } else if (filetype == S_IFDIR) {
+ tdp->td_type = DM_TDT_DIR;
+ } else if (filetype == S_IFLNK) {
+ tdp->td_type = DM_TDT_LNK;
+ } else {
+ tdp->td_type = DM_TDT_OTH;
+ }
+
+ if (referenced) {
+ tdp->td_ip = ip;
+ } else {
+ tdp->td_ip = NULL;
+ }
+ tdp->td_vcount = 0;
+
+ if ((error = dm_ip_to_handle(ip, &tdp->td_handle)) != 0) {
+ panic("dm_ip_data: dm_ip_to_handle failed for ip %p in "
+ "a DMAPI filesystem, errno %d\n", ip, error);
+ }
+
+ return(tdp);
+}
+
+
+/* Given a sb pointer to a filesystem known to support DMAPI, build a tdp
+ structure for that sb.
+*/
+static dm_tokdata_t *
+dm_sb_data(
+ struct super_block *sb,
+ struct inode *ip, /* will be NULL for DM_EVENT_UNMOUNT */
+ dm_right_t right)
+{
+ dm_tokdata_t *tdp;
+ struct filesystem_dmapi_operations *dops;
+ dm_fsid_t fsid;
+
+ dops = dm_fsys_ops(sb);
+ ASSERT(dops);
+ dops->get_fsid(sb, &fsid);
+
+ tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL);
+ if (tdp == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+
+ tdp->td_next = NULL;
+ tdp->td_tevp = NULL;
+ tdp->td_app_ref = 0;
+ tdp->td_orig_right = right;
+ tdp->td_right = right;
+ tdp->td_flags = DM_TDF_ORIG;
+ if (ip) {
+ tdp->td_flags |= DM_TDF_EVTREF;
+ }
+ tdp->td_type = DM_TDT_VFS;
+ tdp->td_ip = ip;
+ tdp->td_vcount = 0;
+
+ memcpy(&tdp->td_handle.ha_fsid, &fsid, sizeof(fsid));
+ memset((char *)&tdp->td_handle.ha_fsid + sizeof(fsid), 0,
+ sizeof(tdp->td_handle) - sizeof(fsid));
+
+ return(tdp);
+}
+
+
+/* Link a tdp structure into the tevp. */
+
+static void
+dm_add_handle_to_event(
+ dm_tokevent_t *tevp,
+ dm_tokdata_t *tdp)
+{
+ tdp->td_next = tevp->te_tdp;
+ tevp->te_tdp = tdp;
+ tdp->td_tevp = tevp;
+}
+
+
+/* Generate the given data event for the inode, and wait for a reply. The
+ caller must guarantee that the inode's reference count is greater than zero
+ so that the filesystem can't disappear while the request is outstanding.
+*/
+
+int
+dm_send_data_event(
+ dm_eventtype_t event,
+ struct inode *ip,
+ dm_right_t vp_right, /* current right for ip */
+ dm_off_t offset,
+ size_t length,
+ int flags) /* 0 or DM_FLAGS_NDELAY */
+{
+ dm_data_event_t *datap;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp;
+ int error;
+
+ tdp = dm_ip_data(ip, vp_right, /* reference held */ 1);
+ if (tdp == NULL)
+ return -ENOMEM;
+
+ /* Calculate the size of the event in bytes, create an event structure
+ for it, and insert the file's handle into the event.
+ */
+
+ tevp = dm_evt_create_tevp(event, HANDLE_SIZE(tdp), (void **)&datap);
+ if (tevp == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ return(-ENOMEM);
+ }
+ dm_add_handle_to_event(tevp, tdp);
+
+ /* Now fill in all the dm_data_event_t fields. */
+
+ datap->de_handle.vd_offset = sizeof(*datap);
+ datap->de_handle.vd_length = HANDLE_SIZE(tdp);
+ memcpy((char *)datap + datap->de_handle.vd_offset, &tdp->td_handle,
+ datap->de_handle.vd_length);
+ datap->de_offset = offset;
+ datap->de_length = length;
+
+ /* Queue the message and wait for the reply. */
+
+ error = dm_enqueue_normal_event(ip->i_sb, &tevp, flags);
+
+ /* If no errors occurred, we must leave with the same rights we had
+ upon entry. If errors occurred, we must leave with no rights.
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ return(error);
+}
+
+
+/* Generate the destroy event for the inode and wait until the request has been
+ queued. The caller does not hold an inode reference or a right on the inode,
+ but it must otherwise lock down the inode such that the filesystem can't
+ disappear while the request is waiting to be queued. While waiting to be
+ queued, the inode must not be referenceable either by path or by a call
+ to dm_handle_to_ip().
+*/
+
+int
+dm_send_destroy_event(
+ struct inode *ip,
+ dm_right_t vp_right) /* always DM_RIGHT_NULL */
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp;
+ dm_destroy_event_t *destp;
+ dm_attrname_t attrname;
+ char *value;
+ int value_len;
+ int error;
+
+ tdp = dm_ip_data(ip, vp_right, /* no reference held */ 0);
+ if (tdp == NULL)
+ return -ENOMEM;
+
+ if ((error = dm_waitfor_destroy_attrname(ip->i_sb, &attrname)) != 0)
+ return(error);
+
+ /* If a return-on-destroy attribute name exists for this filesystem,
+ see if the object being deleted has this attribute. If the object
+ doesn't have the attribute or if we encounter an error, then send
+ the event without the attribute.
+ */
+
+ value_len = -1; /* because zero is a valid attribute length */
+ if (attrname.an_chars[0] != '\0') {
+ fsys_vector = dm_fsys_vector(ip);
+ error = fsys_vector->get_destroy_dmattr(ip, vp_right, &attrname,
+ &value, &value_len);
+ if (error && error != -ENODATA)
+ return error;
+ }
+
+ /* Now that we know the size of the attribute value, if any, calculate
+ the size of the event in bytes, create an event structure for it,
+ and insert the handle into the event.
+ */
+
+ tevp = dm_evt_create_tevp(DM_EVENT_DESTROY,
+ HANDLE_SIZE(tdp) + (value_len >= 0 ? value_len : 0),
+ (void **)&destp);
+ if (tevp == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ if (value_len > 0)
+ kfree(value);
+ return(-ENOMEM);
+ }
+ dm_add_handle_to_event(tevp, tdp);
+
+ /* Now fill in all the dm_destroy_event_t fields. */
+
+ destp->ds_handle.vd_offset = sizeof(*destp);
+ destp->ds_handle.vd_length = HANDLE_SIZE(tdp);
+ memcpy((char *)destp + destp->ds_handle.vd_offset, &tdp->td_handle,
+ destp->ds_handle.vd_length);
+ if (value_len >= 0) {
+ destp->ds_attrname = attrname;
+ destp->ds_attrcopy.vd_length = value_len;
+ if (value_len == 0) {
+ destp->ds_attrcopy.vd_offset = 0;
+ } else {
+ destp->ds_attrcopy.vd_offset = GETNEXTOFF(destp->ds_handle);
+ memcpy((char *)destp + destp->ds_attrcopy.vd_offset, value,
+ value_len);
+ kfree(value);
+ }
+ }
+
+ /* Queue the message asynchronously. */
+
+ error = dm_enqueue_normal_event(ip->i_sb, &tevp, 0);
+
+ /* Since we had no rights upon entry, we have none to reobtain before
+ leaving.
+ */
+
+ dm_evt_rele_tevp(tevp, 1);
+
+ return(error);
+}
+
+
+/* The dm_mount_event_t event is sent in turn to all sessions that have asked
+ for it until one either rejects it or accepts it. The filesystem is not
+ going anywhere because the mount is blocked until the event is answered.
+*/
+
+int
+dm_send_mount_event(
+ struct super_block *sb, /* filesystem being mounted */
+ dm_right_t vfsp_right,
+ struct inode *ip, /* mounted on directory */
+ dm_right_t vp_right,
+ struct inode *rootip,
+ dm_right_t rootvp_right,
+ char *name1, /* mount path */
+ char *name2) /* filesystem device name */
+{
+ int error;
+ dm_tokevent_t *tevp = NULL;
+ dm_tokdata_t *tdp1 = NULL; /* filesystem handle for event */
+ dm_tokdata_t *tdp2 = NULL; /* file handle for mounted-on dir. */
+ dm_tokdata_t *tdp3 = NULL; /* file handle for root inode */
+ dm_mount_event_t *mp;
+ size_t nextoff;
+
+ /* Convert the sb to a filesystem handle, and ip and rootip into
+ file handles. ip (the mounted-on directory) may not have a handle
+ if it is a different filesystem type which does not support DMAPI.
+ */
+
+ tdp1 = dm_sb_data(sb, rootip, vfsp_right);
+ if (tdp1 == NULL)
+ goto out_nomem;
+
+ if ((ip == NULL) || dm_check_dmapi_ip(ip)) {
+ ip = NULL; /* we are mounting on non-DMAPI FS */
+ } else {
+ tdp2 = dm_ip_data(ip, vp_right, /* reference held */ 1);
+ if (tdp2 == NULL)
+ goto out_nomem;
+ }
+
+ tdp3 = dm_ip_data(rootip, rootvp_right, /* reference held */ 1);
+ if (tdp3 == NULL)
+ goto out_nomem;
+
+ /* Calculate the size of the event in bytes, create an event structure
+ for it, and insert the handles into the event.
+ */
+
+ tevp = dm_evt_create_tevp(DM_EVENT_MOUNT,
+ HANDLE_SIZE(tdp1) + (ip ? HANDLE_SIZE(tdp2) : 0) +
+ HANDLE_SIZE(tdp3) + strlen(name1) + 1 +
+ strlen(name2) + 1, (void **)&mp);
+ if (tevp == NULL)
+ goto out_nomem;
+
+ dm_add_handle_to_event(tevp, tdp1);
+ if (ip)
+ dm_add_handle_to_event(tevp, tdp2);
+ dm_add_handle_to_event(tevp, tdp3);
+
+ /* Now fill in all the dm_mount_event_t fields. */
+
+ mp->me_handle1.vd_offset = sizeof(*mp);
+ mp->me_handle1.vd_length = HANDLE_SIZE(tdp1);
+ memcpy((char *) mp + mp->me_handle1.vd_offset, &tdp1->td_handle,
+ mp->me_handle1.vd_length);
+ nextoff = GETNEXTOFF(mp->me_handle1);
+
+ if (ip) {
+ mp->me_handle2.vd_offset = nextoff;
+ mp->me_handle2.vd_length = HANDLE_SIZE(tdp2);
+ memcpy((char *)mp + mp->me_handle2.vd_offset, &tdp2->td_handle,
+ mp->me_handle2.vd_length);
+ nextoff = GETNEXTOFF(mp->me_handle2);
+ }
+
+ mp->me_name1.vd_offset = nextoff;
+ mp->me_name1.vd_length = strlen(name1) + 1;
+ memcpy((char *)mp + mp->me_name1.vd_offset, name1, mp->me_name1.vd_length);
+ nextoff = GETNEXTOFF(mp->me_name1);
+
+ mp->me_name2.vd_offset = nextoff;
+ mp->me_name2.vd_length = strlen(name2) + 1;
+ memcpy((char *)mp + mp->me_name2.vd_offset, name2, mp->me_name2.vd_length);
+ nextoff = GETNEXTOFF(mp->me_name2);
+
+ mp->me_roothandle.vd_offset = nextoff;
+ mp->me_roothandle.vd_length = HANDLE_SIZE(tdp3);
+ memcpy((char *)mp + mp->me_roothandle.vd_offset, &tdp3->td_handle,
+ mp->me_roothandle.vd_length);
+
+ mp->me_mode = (sb->s_flags & MS_RDONLY ? DM_MOUNT_RDONLY : 0);
+
+ /* Queue the message and wait for the reply. */
+
+ error = dm_enqueue_mount_event(sb, tevp);
+
+ /* If no errors occurred, we must leave with the same rights we had
+ upon entry. If errors occurred, we must leave with no rights.
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ return(error);
+
+out_nomem:
+ if (tevp)
+ kfree(tevp);
+ if (tdp1)
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ if (tdp2)
+ kmem_cache_free(dm_tokdata_cachep, tdp2);
+ if (tdp3)
+ kmem_cache_free(dm_tokdata_cachep, tdp3);
+ return -ENOMEM;
+}
+
+
+/* Generate an DM_EVENT_UNMOUNT event and wait for a reply. The 'retcode'
+ field indicates whether this is a successful or unsuccessful unmount.
+ If successful, the filesystem is already unmounted, and any pending handle
+ reference to the filesystem will be failed. If the unmount was
+ unsuccessful, then the filesystem will be placed back into full service.
+
+ The DM_EVENT_UNMOUNT event should really be asynchronous, because the
+ application has no control over whether or not the unmount succeeds. (The
+ DMAPI spec defined it that way because asynchronous events aren't always
+ guaranteed to be delivered.)
+
+ Since the filesystem is already unmounted in the successful case, the
+ DM_EVENT_UNMOUNT event can't make available any inode to be used in
+ subsequent sid/hanp/hlen/token calls by the application. The event will
+ hang around until the application does a DM_RESP_CONTINUE, but the handle
+ within the event is unusable by the application.
+*/
+
+void
+dm_send_unmount_event(
+ struct super_block *sb,
+ struct inode *ip, /* NULL if unmount successful */
+ dm_right_t vfsp_right,
+ mode_t mode,
+ int retcode, /* errno, if unmount failed */
+ int flags)
+{
+ dm_namesp_event_t *np;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp1;
+
+ /* If the unmount failed, put the filesystem back into full service,
+ allowing blocked handle references to finish. If it succeeded, put
+ the filesystem into the DM_STATE_UNMOUNTED state and fail all
+ blocked DM_NO_TOKEN handle accesses.
+ */
+
+ if (retcode != 0) { /* unmount was unsuccessful */
+ dm_change_fsys_entry(sb, DM_STATE_MOUNTED);
+ } else {
+ dm_change_fsys_entry(sb, DM_STATE_UNMOUNTED);
+ }
+
+ /* If the event wasn't in the filesystem dm_eventset_t, just remove
+ the filesystem from the list of DMAPI filesystems and return.
+ */
+
+ if (flags & DM_FLAGS_UNWANTED) {
+ if (retcode == 0)
+ dm_remove_fsys_entry(sb);
+ return;
+ }
+
+ /* Calculate the size of the event in bytes and allocate zeroed memory
+ for it.
+ */
+
+ tdp1 = dm_sb_data(sb, ip, vfsp_right);
+ if (tdp1 == NULL)
+ return;
+
+ tevp = dm_evt_create_tevp(DM_EVENT_UNMOUNT, HANDLE_SIZE(tdp1),
+ (void **)&np);
+ if (tevp == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return;
+ }
+
+ dm_add_handle_to_event(tevp, tdp1);
+
+ /* Now copy in all the dm_namesp_event_t specific fields. */
+
+ np->ne_handle1.vd_offset = sizeof(*np);
+ np->ne_handle1.vd_length = HANDLE_SIZE(tdp1);
+ memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle,
+ np->ne_handle1.vd_length);
+ np->ne_mode = mode;
+ np->ne_retcode = retcode;
+
+ /* Since DM_EVENT_UNMOUNT is effectively asynchronous, queue the
+ message and ignore any error return for DM_EVENT_UNMOUNT.
+ */
+
+ (void)dm_enqueue_normal_event(sb, &tevp, flags);
+
+ if (retcode == 0)
+ dm_remove_fsys_entry(sb);
+
+ dm_evt_rele_tevp(tevp, 0);
+}
+
+
+/* Generate the given namespace event and wait for a reply (if synchronous) or
+ until the event has been queued (asynchronous). The caller must guarantee
+ that at least one inode within the filesystem has had its reference count
+ bumped so that the filesystem can't disappear while the event is
+ outstanding.
+*/
+
+int
+dm_send_namesp_event(
+ dm_eventtype_t event,
+ struct super_block *sb, /* used by PREUNMOUNT */
+ struct inode *ip1,
+ dm_right_t vp1_right,
+ struct inode *ip2,
+ dm_right_t vp2_right,
+ const char *name1,
+ const char *name2,
+ mode_t mode,
+ int retcode,
+ int flags)
+{
+ dm_namesp_event_t *np;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp1 = NULL; /* primary handle for event */
+ dm_tokdata_t *tdp2 = NULL; /* additional handle for event */
+ size_t nextoff;
+ int error;
+
+ if (sb == NULL)
+ sb = ip1->i_sb;
+
+ switch (event) {
+ case DM_EVENT_PREUNMOUNT:
+ /*
+ * PREUNMOUNT - Send the file system handle in handle1,
+ * and the handle for the root dir in the second. Otherwise
+ * it's a normal sync message; i.e. succeeds or fails
+ * depending on the app's return code.
+ * ip1 and ip2 are both the root dir of mounted FS
+ * vp1_right is the filesystem right.
+ * vp2_right is the root inode right.
+ */
+
+ if (flags & DM_FLAGS_UNWANTED) {
+ dm_change_fsys_entry(sb, DM_STATE_UNMOUNTING);
+ return(0);
+ }
+ if (ip1 == NULL) {
+ /* If preunmount happens after kill_super then
+ * it's too late; there's nothing left with which
+ * to construct an event.
+ */
+ return(0);
+ }
+ tdp1 = dm_sb_data(sb, ip1, vp1_right);
+ if (tdp1 == NULL)
+ return -ENOMEM;
+ tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1);
+ if (tdp2 == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return -ENOMEM;
+ }
+ break;
+
+ case DM_EVENT_NOSPACE:
+ /* vp1_right is the filesystem right. */
+
+ tdp1 = dm_sb_data(sb, ip1, vp1_right);
+ if (tdp1 == NULL)
+ return -ENOMEM;
+ tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1); /* additional info - not in the spec */
+ if (tdp2 == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return -ENOMEM;
+ }
+ break;
+
+ default:
+ /* All other events only pass in inodes and don't require any
+ special cases.
+ */
+
+ tdp1 = dm_ip_data(ip1, vp1_right, /* reference held */ 1);
+ if (tdp1 == NULL)
+ return -ENOMEM;
+ if (ip2) {
+ tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1);
+ if (tdp2 == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ /* Calculate the size of the event in bytes and allocate zeroed memory
+ for it.
+ */
+
+ tevp = dm_evt_create_tevp(event,
+ HANDLE_SIZE(tdp1) + (ip2 ? HANDLE_SIZE(tdp2) : 0) +
+ (name1 ? strlen(name1) + 1 : 0) +
+ (name2 ? strlen(name2) + 1 : 0), (void **)&np);
+ if (tevp == NULL) {
+ if (tdp1)
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ if (tdp2)
+ kmem_cache_free(dm_tokdata_cachep, tdp2);
+ return(-ENOMEM);
+ }
+
+ dm_add_handle_to_event(tevp, tdp1);
+ if (ip2)
+ dm_add_handle_to_event(tevp, tdp2);
+
+ /* Now copy in all the dm_namesp_event_t specific fields. */
+
+ np->ne_handle1.vd_offset = sizeof(*np);
+ np->ne_handle1.vd_length = HANDLE_SIZE(tdp1);
+ memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle,
+ np->ne_handle1.vd_length);
+ nextoff = GETNEXTOFF(np->ne_handle1);
+ if (ip2) {
+ np->ne_handle2.vd_offset = nextoff;
+ np->ne_handle2.vd_length = HANDLE_SIZE(tdp2);
+ memcpy((char *)np + np->ne_handle2.vd_offset, &tdp2->td_handle,
+ np->ne_handle2.vd_length);
+ nextoff = GETNEXTOFF(np->ne_handle2);
+ }
+ if (name1) {
+ np->ne_name1.vd_offset = nextoff;
+ np->ne_name1.vd_length = strlen(name1) + 1;
+ memcpy((char *)np + np->ne_name1.vd_offset, name1,
+ np->ne_name1.vd_length);
+ nextoff = GETNEXTOFF(np->ne_name1);
+ }
+ if (name2) {
+ np->ne_name2.vd_offset = nextoff;
+ np->ne_name2.vd_length = strlen(name2) + 1;
+ memcpy((char *)np + np->ne_name2.vd_offset, name2,
+ np->ne_name2.vd_length);
+ }
+ np->ne_mode = mode;
+ np->ne_retcode = retcode;
+
+ /* Queue the message and wait for the reply. */
+
+ error = dm_enqueue_normal_event(sb, &tevp, flags);
+
+ /* If no errors occurred, we must leave with the same rights we had
+ upon entry. If errors occurred, we must leave with no rights.
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ if (!error && event == DM_EVENT_PREUNMOUNT) {
+ dm_change_fsys_entry(sb, DM_STATE_UNMOUNTING);
+ }
+
+ return(error);
+}
+
+
+/*
+ * Send a message of type "DM_EVENT_USER". Since no inode is involved, we
+ * don't have to worry about rights here.
+ */
+
+int
+dm_send_msg(
+ dm_sessid_t targetsid,
+ dm_msgtype_t msgtype, /* SYNC or ASYNC */
+ size_t buflen,
+ void __user *bufp)
+{
+ dm_tokevent_t *tevp;
+ int sync;
+ void *msgp;
+ int error;
+
+ if (buflen > DM_MAX_MSG_DATA)
+ return(-E2BIG);
+ if (msgtype == DM_MSGTYPE_ASYNC) {
+ sync = 0;
+ } else if (msgtype == DM_MSGTYPE_SYNC) {
+ sync = 1;
+ } else {
+ return(-EINVAL);
+ }
+
+ tevp = dm_evt_create_tevp(DM_EVENT_USER, buflen, (void **)&msgp);
+ if (tevp == NULL)
+ return -ENOMEM;
+
+ if (buflen && copy_from_user(msgp, bufp, buflen)) {
+ dm_evt_rele_tevp(tevp, 0);
+ return(-EFAULT);
+ }
+
+ /* Enqueue the request and wait for the reply. */
+
+ error = dm_enqueue_sendmsg_event(targetsid, tevp, sync);
+
+ /* Destroy the tevp and return the reply. (dm_pending is not
+ supported here.)
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ return(error);
+}
+
+
+/*
+ * Send a message of type "DM_EVENT_USER". Since no inode is involved, we
+ * don't have to worry about rights here.
+ */
+
+int
+dm_create_userevent(
+ dm_sessid_t sid,
+ size_t msglen,
+ void __user *msgdatap,
+ dm_token_t __user *tokenp) /* return token created */
+{
+ dm_tokevent_t *tevp;
+ dm_token_t token;
+ int error;
+ void *msgp;
+
+ if (msglen > DM_MAX_MSG_DATA)
+ return(-E2BIG);
+
+ tevp = dm_evt_create_tevp(DM_EVENT_USER, msglen, (void **)&msgp);
+ if (tevp == NULL)
+ return(-ENOMEM);
+
+ if (msglen && copy_from_user(msgp, msgdatap, msglen)) {
+ dm_evt_rele_tevp(tevp, 0);
+ return(-EFAULT);
+ }
+
+ /* Queue the message. If that didn't work, free the tevp structure. */
+
+ if ((error = dm_enqueue_user_event(sid, tevp, &token)) != 0)
+ dm_evt_rele_tevp(tevp, 0);
+
+ if (!error && copy_to_user(tokenp, &token, sizeof(token)))
+ error = -EFAULT;
+
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_handle.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+int
+dm_create_by_handle(
+ dm_sessid_t sid,
+ void __user *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->create_by_handle(tdp->td_ip, tdp->td_right,
+ hanp, hlen, cname);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_mkdir_by_handle(
+ dm_sessid_t sid,
+ void __user *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->mkdir_by_handle(tdp->td_ip, tdp->td_right,
+ hanp, hlen, cname);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_symlink_by_handle(
+ dm_sessid_t sid,
+ void __user *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname,
+ char __user *path)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->symlink_by_handle(tdp->td_ip, tdp->td_right,
+ hanp, hlen, cname, path);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_hole.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+int
+dm_get_allocinfo_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t __user *offp,
+ u_int nelem,
+ dm_extent_t __user *extentp,
+ u_int __user *nelemp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_allocinfo_rvp(tdp->td_ip, tdp->td_right,
+ offp, nelem, extentp, nelemp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_probe_hole(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ dm_off_t __user *roffp,
+ dm_size_t __user *rlenp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->probe_hole(tdp->td_ip, tdp->td_right,
+ off, len, roffp, rlenp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_punch_hole(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->punch_hole(tdp->td_ip, tdp->td_right, off, len);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_io.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+int
+dm_read_invis_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ void __user *bufp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->read_invis_rvp(tdp->td_ip, tdp->td_right,
+ off, len, bufp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_write_invis_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void __user *bufp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->write_invis_rvp(tdp->td_ip, tdp->td_right,
+ flags, off, len, bufp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_sync_by_handle (
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->sync_by_handle(tdp->td_ip, tdp->td_right);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_dioinfo (
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_dioinfo_t __user *diop)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_dioinfo(tdp->td_ip, tdp->td_right, diop);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_kern.h
@@ -0,0 +1,598 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#ifndef __DMAPI_KERN_H__
+#define __DMAPI_KERN_H__
+
+#include <linux/fs.h>
+
+union sys_dmapi_uarg {
+ void *p;
+ __u64 u;
+};
+typedef union sys_dmapi_uarg sys_dmapi_u;
+
+struct sys_dmapi_args {
+ sys_dmapi_u uarg1, uarg2, uarg3, uarg4, uarg5, uarg6, uarg7, uarg8,
+ uarg9, uarg10, uarg11;
+};
+typedef struct sys_dmapi_args sys_dmapi_args_t;
+
+#define DM_Uarg(uap,i) uap->uarg##i.u
+#define DM_Parg(uap,i) uap->uarg##i.p
+
+#ifdef __KERNEL__
+
+struct dm_handle_t;
+
+/* The first group of definitions and prototypes define the filesystem's
+ interface into the DMAPI code.
+*/
+
+
+/* Definitions used for the flags field on dm_send_data_event(),
+ dm_send_unmount_event(), and dm_send_namesp_event() calls.
+*/
+
+#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
+#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
+
+/* Possible code levels reported by dm_code_level(). */
+
+#define DM_CLVL_INIT 0 /* DMAPI prior to X/Open compliance */
+#define DM_CLVL_XOPEN 1 /* X/Open compliant DMAPI */
+
+
+/*
+ * Filesystem operations accessed by the DMAPI core.
+ */
+struct filesystem_dmapi_operations {
+ int (*get_fsys_vector)(struct super_block *sb, void *addr);
+ int (*fh_to_inode)(struct super_block *sb, struct inode **ip,
+ dm_fid_t *fid);
+ int (*inode_to_fh)(struct inode *ip, dm_fid_t *fid,
+ dm_fsid_t *fsid );
+ void (*get_fsid)(struct super_block *sb, dm_fsid_t *fsid);
+#define HAVE_DM_QUEUE_FLUSH
+ int (*flushing)(struct inode *ip);
+};
+
+
+/* Prototypes used outside of the DMI module/directory. */
+
+int dm_send_data_event(
+ dm_eventtype_t event,
+ struct inode *ip,
+ dm_right_t vp_right,
+ dm_off_t off,
+ size_t len,
+ int flags);
+
+int dm_send_destroy_event(
+ struct inode *ip,
+ dm_right_t vp_right);
+
+int dm_send_mount_event(
+ struct super_block *sb,
+ dm_right_t vfsp_right,
+ struct inode *ip,
+ dm_right_t vp_right,
+ struct inode *rootip,
+ dm_right_t rootvp_right,
+ char *name1,
+ char *name2);
+
+int dm_send_namesp_event(
+ dm_eventtype_t event,
+ struct super_block *sb,
+ struct inode *ip1,
+ dm_right_t vp1_right,
+ struct inode *ip2,
+ dm_right_t vp2_right,
+ const char *name1,
+ const char *name2,
+ mode_t mode,
+ int retcode,
+ int flags);
+
+void dm_send_unmount_event(
+ struct super_block *sbp,
+ struct inode *ip,
+ dm_right_t sbp_right,
+ mode_t mode,
+ int retcode,
+ int flags);
+
+int dm_code_level(void);
+
+int dm_ip_to_handle (
+ struct inode *ip,
+ dm_handle_t *handlep);
+
+#define HAVE_DM_RELEASE_THREADS_ERRNO
+int dm_release_threads(
+ struct super_block *sb,
+ struct inode *inode,
+ int errno);
+
+void dmapi_register(
+ struct file_system_type *fstype,
+ struct filesystem_dmapi_operations *dmapiops);
+
+void dmapi_unregister(
+ struct file_system_type *fstype);
+
+int dmapi_registered(
+ struct file_system_type *fstype,
+ struct filesystem_dmapi_operations **dmapiops);
+
+
+/* The following prototypes and definitions are used by DMAPI as its
+ interface into the filesystem code. Communication between DMAPI and the
+ filesystem are established as follows:
+ 1. DMAPI uses the VFS_DMAPI_FSYS_VECTOR to ask for the addresses
+ of all the functions within the filesystem that it may need to call.
+ 2. The filesystem returns an array of function name/address pairs which
+ DMAPI builds into a function vector.
+ The VFS_DMAPI_FSYS_VECTOR call is only made one time for a particular
+ filesystem type. From then on, DMAPI uses its function vector to call the
+ filesystem functions directly. Functions in the array which DMAPI doesn't
+ recognize are ignored. A dummy function which returns ENOSYS is used for
+ any function that DMAPI needs but which was not provided by the filesystem.
+ If XFS doesn't recognize the VFS_DMAPI_FSYS_VECTOR, DMAPI assumes that it
+ doesn't have the X/Open support code; in this case DMAPI uses the XFS-code
+ originally bundled within DMAPI.
+
+ The goal of this interface is allow incremental changes to be made to
+ both the filesystem and to DMAPI while minimizing inter-patch dependencies,
+ and to eventually allow DMAPI to support multiple filesystem types at the
+ same time should that become necessary.
+*/
+
+typedef enum {
+ DM_FSYS_CLEAR_INHERIT = 0,
+ DM_FSYS_CREATE_BY_HANDLE = 1,
+ DM_FSYS_DOWNGRADE_RIGHT = 2,
+ DM_FSYS_GET_ALLOCINFO_RVP = 3,
+ DM_FSYS_GET_BULKALL_RVP = 4,
+ DM_FSYS_GET_BULKATTR_RVP = 5,
+ DM_FSYS_GET_CONFIG = 6,
+ DM_FSYS_GET_CONFIG_EVENTS = 7,
+ DM_FSYS_GET_DESTROY_DMATTR = 8,
+ DM_FSYS_GET_DIOINFO = 9,
+ DM_FSYS_GET_DIRATTRS_RVP = 10,
+ DM_FSYS_GET_DMATTR = 11,
+ DM_FSYS_GET_EVENTLIST = 12,
+ DM_FSYS_GET_FILEATTR = 13,
+ DM_FSYS_GET_REGION = 14,
+ DM_FSYS_GETALL_DMATTR = 15,
+ DM_FSYS_GETALL_INHERIT = 16,
+ DM_FSYS_INIT_ATTRLOC = 17,
+ DM_FSYS_MKDIR_BY_HANDLE = 18,
+ DM_FSYS_PROBE_HOLE = 19,
+ DM_FSYS_PUNCH_HOLE = 20,
+ DM_FSYS_READ_INVIS_RVP = 21,
+ DM_FSYS_RELEASE_RIGHT = 22,
+ DM_FSYS_REMOVE_DMATTR = 23,
+ DM_FSYS_REQUEST_RIGHT = 24,
+ DM_FSYS_SET_DMATTR = 25,
+ DM_FSYS_SET_EVENTLIST = 26,
+ DM_FSYS_SET_FILEATTR = 27,
+ DM_FSYS_SET_INHERIT = 28,
+ DM_FSYS_SET_REGION = 29,
+ DM_FSYS_SYMLINK_BY_HANDLE = 30,
+ DM_FSYS_SYNC_BY_HANDLE = 31,
+ DM_FSYS_UPGRADE_RIGHT = 32,
+ DM_FSYS_WRITE_INVIS_RVP = 33,
+ DM_FSYS_OBJ_REF_HOLD = 34,
+ DM_FSYS_MAX = 35
+} dm_fsys_switch_t;
+
+
+#define DM_FSYS_OBJ 0x1 /* object refers to a fsys handle */
+
+
+/*
+ * Prototypes for filesystem-specific functions.
+ */
+
+typedef int (*dm_fsys_clear_inherit_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t __user *attrnamep);
+
+typedef int (*dm_fsys_create_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname);
+
+typedef int (*dm_fsys_downgrade_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type); /* DM_FSYS_OBJ or zero */
+
+typedef int (*dm_fsys_get_allocinfo_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t __user *offp,
+ u_int nelem,
+ dm_extent_t __user *extentp,
+ u_int __user *nelemp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_bulkall_rvp_t)(
+ struct inode *ip, /* root inode */
+ dm_right_t right,
+ u_int mask,
+ dm_attrname_t __user *attrnamep,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_bulkattr_rvp_t)(
+ struct inode *ip, /* root inode */
+ dm_right_t right,
+ u_int mask,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_config_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_config_t flagname,
+ dm_size_t __user *retvalp);
+
+typedef int (*dm_fsys_get_config_events_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_eventset_t __user *eventsetp,
+ u_int __user *nelemp);
+
+typedef int (*dm_fsys_get_destroy_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ char **valuepp,
+ int *vlenp);
+
+typedef int (*dm_fsys_get_dioinfo_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_dioinfo_t __user *diop);
+
+typedef int (*dm_fsys_get_dirattrs_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t __user *attrnamep,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+typedef int (*dm_fsys_get_eventlist_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type,
+ u_int nelem,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int *nelemp); /* in kernel space! */
+
+typedef int (*dm_fsys_get_fileattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_stat_t __user *statp);
+
+typedef int (*dm_fsys_get_region_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t __user *regbufp,
+ u_int __user *nelemp);
+
+typedef int (*dm_fsys_getall_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+typedef int (*dm_fsys_getall_inherit_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_inherit_t __user *inheritbufp,
+ u_int __user *nelemp);
+
+typedef int (*dm_fsys_init_attrloc_t)(
+ struct inode *ip, /* sometimes root inode */
+ dm_right_t right,
+ dm_attrloc_t __user *locp);
+
+typedef int (*dm_fsys_mkdir_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname);
+
+typedef int (*dm_fsys_probe_hole_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len,
+ dm_off_t __user *roffp,
+ dm_size_t __user *rlenp);
+
+typedef int (*dm_fsys_punch_hole_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len);
+
+typedef int (*dm_fsys_read_invis_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len,
+ void __user *bufp,
+ int *rvp);
+
+typedef int (*dm_fsys_release_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type);
+
+typedef int (*dm_fsys_remove_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ int setdtime,
+ dm_attrname_t __user *attrnamep);
+
+typedef int (*dm_fsys_request_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type, /* DM_FSYS_OBJ or zero */
+ u_int flags,
+ dm_right_t newright);
+
+typedef int (*dm_fsys_set_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t __user *attrnamep,
+ int setdtime,
+ size_t buflen,
+ void __user *bufp);
+
+typedef int (*dm_fsys_set_eventlist_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int maxevent);
+
+typedef int (*dm_fsys_set_fileattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_fileattr_t __user *attrp);
+
+typedef int (*dm_fsys_set_inherit_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t __user *attrnamep,
+ mode_t mode);
+
+typedef int (*dm_fsys_set_region_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t __user *regbufp,
+ dm_boolean_t __user *exactflagp);
+
+typedef int (*dm_fsys_symlink_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right,
+ void __user *hanp,
+ size_t hlen,
+ char __user *cname,
+ char __user *path);
+
+typedef int (*dm_fsys_sync_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right);
+
+typedef int (*dm_fsys_upgrade_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type); /* DM_FSYS_OBJ or zero */
+
+typedef int (*dm_fsys_write_invis_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void __user *bufp,
+ int *rvp);
+
+typedef void (*dm_fsys_obj_ref_hold_t)(
+ struct inode *ip);
+
+
+/* Structure definitions used by the VFS_DMAPI_FSYS_VECTOR call. */
+
+typedef struct {
+ dm_fsys_switch_t func_no; /* function number */
+ union {
+ dm_fsys_clear_inherit_t clear_inherit;
+ dm_fsys_create_by_handle_t create_by_handle;
+ dm_fsys_downgrade_right_t downgrade_right;
+ dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp;
+ dm_fsys_get_bulkall_rvp_t get_bulkall_rvp;
+ dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp;
+ dm_fsys_get_config_t get_config;
+ dm_fsys_get_config_events_t get_config_events;
+ dm_fsys_get_destroy_dmattr_t get_destroy_dmattr;
+ dm_fsys_get_dioinfo_t get_dioinfo;
+ dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp;
+ dm_fsys_get_dmattr_t get_dmattr;
+ dm_fsys_get_eventlist_t get_eventlist;
+ dm_fsys_get_fileattr_t get_fileattr;
+ dm_fsys_get_region_t get_region;
+ dm_fsys_getall_dmattr_t getall_dmattr;
+ dm_fsys_getall_inherit_t getall_inherit;
+ dm_fsys_init_attrloc_t init_attrloc;
+ dm_fsys_mkdir_by_handle_t mkdir_by_handle;
+ dm_fsys_probe_hole_t probe_hole;
+ dm_fsys_punch_hole_t punch_hole;
+ dm_fsys_read_invis_rvp_t read_invis_rvp;
+ dm_fsys_release_right_t release_right;
+ dm_fsys_remove_dmattr_t remove_dmattr;
+ dm_fsys_request_right_t request_right;
+ dm_fsys_set_dmattr_t set_dmattr;
+ dm_fsys_set_eventlist_t set_eventlist;
+ dm_fsys_set_fileattr_t set_fileattr;
+ dm_fsys_set_inherit_t set_inherit;
+ dm_fsys_set_region_t set_region;
+ dm_fsys_symlink_by_handle_t symlink_by_handle;
+ dm_fsys_sync_by_handle_t sync_by_handle;
+ dm_fsys_upgrade_right_t upgrade_right;
+ dm_fsys_write_invis_rvp_t write_invis_rvp;
+ dm_fsys_obj_ref_hold_t obj_ref_hold;
+ } u_fc;
+} fsys_function_vector_t;
+
+struct dm_fcntl_vector {
+ int code_level;
+ int count; /* Number of functions in the vector */
+ fsys_function_vector_t *vecp;
+};
+typedef struct dm_fcntl_vector dm_fcntl_vector_t;
+
+struct dm_fcntl_mapevent {
+ size_t length; /* length of transfer */
+ dm_eventtype_t max_event; /* Maximum (WRITE or READ) event */
+ int error; /* returned error code */
+};
+typedef struct dm_fcntl_mapevent dm_fcntl_mapevent_t;
+
+#endif /* __KERNEL__ */
+
+
+/* The following definitions are needed both by the kernel and by the
+ library routines.
+*/
+
+#define DM_MAX_HANDLE_SIZE 56 /* maximum size for a file handle */
+
+
+/*
+ * Opcodes for dmapi ioctl.
+ */
+
+#define DM_CLEAR_INHERIT 1
+#define DM_CREATE_BY_HANDLE 2
+#define DM_CREATE_SESSION 3
+#define DM_CREATE_USEREVENT 4
+#define DM_DESTROY_SESSION 5
+#define DM_DOWNGRADE_RIGHT 6
+#define DM_FD_TO_HANDLE 7
+#define DM_FIND_EVENTMSG 8
+#define DM_GET_ALLOCINFO 9
+#define DM_GET_BULKALL 10
+#define DM_GET_BULKATTR 11
+#define DM_GET_CONFIG 12
+#define DM_GET_CONFIG_EVENTS 13
+#define DM_GET_DIOINFO 14
+#define DM_GET_DIRATTRS 15
+#define DM_GET_DMATTR 16
+#define DM_GET_EVENTLIST 17
+#define DM_GET_EVENTS 18
+#define DM_GET_FILEATTR 19
+#define DM_GET_MOUNTINFO 20
+#define DM_GET_REGION 21
+#define DM_GETALL_DISP 22
+#define DM_GETALL_DMATTR 23
+#define DM_GETALL_INHERIT 24
+#define DM_GETALL_SESSIONS 25
+#define DM_GETALL_TOKENS 26
+#define DM_INIT_ATTRLOC 27
+#define DM_MKDIR_BY_HANDLE 28
+#define DM_MOVE_EVENT 29
+#define DM_OBJ_REF_HOLD 30
+#define DM_OBJ_REF_QUERY 31
+#define DM_OBJ_REF_RELE 32
+#define DM_PATH_TO_FSHANDLE 33
+#define DM_PATH_TO_HANDLE 34
+#define DM_PENDING 35
+#define DM_PROBE_HOLE 36
+#define DM_PUNCH_HOLE 37
+#define DM_QUERY_RIGHT 38
+#define DM_QUERY_SESSION 39
+#define DM_READ_INVIS 40
+#define DM_RELEASE_RIGHT 41
+#define DM_REMOVE_DMATTR 42
+#define DM_REQUEST_RIGHT 43
+#define DM_RESPOND_EVENT 44
+#define DM_SEND_MSG 45
+#define DM_SET_DISP 46
+#define DM_SET_DMATTR 47
+#define DM_SET_EVENTLIST 48
+#define DM_SET_FILEATTR 49
+#define DM_SET_INHERIT 50
+#define DM_SET_REGION 51
+#define DM_SET_RETURN_ON_DESTROY 52
+#define DM_SYMLINK_BY_HANDLE 53
+#define DM_SYNC_BY_HANDLE 54
+#define DM_UPGRADE_RIGHT 55
+#define DM_WRITE_INVIS 56
+#define DM_OPEN_BY_HANDLE 57
+
+#endif /* __DMAPI_KERN_H__ */
--- /dev/null
+++ b/fs/dmapi/dmapi_mountinfo.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+static LIST_HEAD(dm_fsys_map);
+static spinlock_t dm_fsys_lock = SPIN_LOCK_UNLOCKED;
+
+int
+dm_code_level(void)
+{
+ return DM_CLVL_XOPEN; /* initial X/Open compliant release */
+}
+
+
+/* Dummy routine which is stored in each function vector slot for which the
+ filesystem provides no function of its own. If an application calls the
+ function, he will just get ENOSYS.
+*/
+
+static int
+dm_enosys(void)
+{
+ return -ENOSYS; /* function not supported by filesystem */
+}
+
+
+/* dm_query_fsys_for_vector() asks a filesystem for its list of supported
+ DMAPI functions, and builds a dm_vector_map_t structure based upon the
+ reply. We ignore functions supported by the filesystem which we do not
+ know about, and we substitute the subroutine 'dm_enosys' for each function
+ we know about but the filesystem does not support.
+*/
+
+static void
+dm_query_fsys_for_vector(
+ dm_vector_map_t *map)
+{
+ struct super_block *sb = map->sb;
+ fsys_function_vector_t *vecp;
+ dm_fcntl_vector_t vecrq;
+ dm_fsys_vector_t *vptr;
+ struct filesystem_dmapi_operations *dmapiops = map->dmapiops;
+ int error;
+ int i;
+
+
+ /* Allocate a function vector and initialize all fields with a
+ dummy function that returns ENOSYS.
+ */
+
+ vptr = map->vptr = kmem_cache_alloc(dm_fsys_vptr_cachep, GFP_KERNEL);
+ if (vptr == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_fsys_vptr_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return;
+ }
+
+ vptr->code_level = 0;
+ vptr->clear_inherit = (dm_fsys_clear_inherit_t)dm_enosys;
+ vptr->create_by_handle = (dm_fsys_create_by_handle_t)dm_enosys;
+ vptr->downgrade_right = (dm_fsys_downgrade_right_t)dm_enosys;
+ vptr->get_allocinfo_rvp = (dm_fsys_get_allocinfo_rvp_t)dm_enosys;
+ vptr->get_bulkall_rvp = (dm_fsys_get_bulkall_rvp_t)dm_enosys;
+ vptr->get_bulkattr_rvp = (dm_fsys_get_bulkattr_rvp_t)dm_enosys;
+ vptr->get_config = (dm_fsys_get_config_t)dm_enosys;
+ vptr->get_config_events = (dm_fsys_get_config_events_t)dm_enosys;
+ vptr->get_destroy_dmattr = (dm_fsys_get_destroy_dmattr_t)dm_enosys;
+ vptr->get_dioinfo = (dm_fsys_get_dioinfo_t)dm_enosys;
+ vptr->get_dirattrs_rvp = (dm_fsys_get_dirattrs_rvp_t)dm_enosys;
+ vptr->get_dmattr = (dm_fsys_get_dmattr_t)dm_enosys;
+ vptr->get_eventlist = (dm_fsys_get_eventlist_t)dm_enosys;
+ vptr->get_fileattr = (dm_fsys_get_fileattr_t)dm_enosys;
+ vptr->get_region = (dm_fsys_get_region_t)dm_enosys;
+ vptr->getall_dmattr = (dm_fsys_getall_dmattr_t)dm_enosys;
+ vptr->getall_inherit = (dm_fsys_getall_inherit_t)dm_enosys;
+ vptr->init_attrloc = (dm_fsys_init_attrloc_t)dm_enosys;
+ vptr->mkdir_by_handle = (dm_fsys_mkdir_by_handle_t)dm_enosys;
+ vptr->probe_hole = (dm_fsys_probe_hole_t)dm_enosys;
+ vptr->punch_hole = (dm_fsys_punch_hole_t)dm_enosys;
+ vptr->read_invis_rvp = (dm_fsys_read_invis_rvp_t)dm_enosys;
+ vptr->release_right = (dm_fsys_release_right_t)dm_enosys;
+ vptr->request_right = (dm_fsys_request_right_t)dm_enosys;
+ vptr->remove_dmattr = (dm_fsys_remove_dmattr_t)dm_enosys;
+ vptr->set_dmattr = (dm_fsys_set_dmattr_t)dm_enosys;
+ vptr->set_eventlist = (dm_fsys_set_eventlist_t)dm_enosys;
+ vptr->set_fileattr = (dm_fsys_set_fileattr_t)dm_enosys;
+ vptr->set_inherit = (dm_fsys_set_inherit_t)dm_enosys;
+ vptr->set_region = (dm_fsys_set_region_t)dm_enosys;
+ vptr->symlink_by_handle = (dm_fsys_symlink_by_handle_t)dm_enosys;
+ vptr->sync_by_handle = (dm_fsys_sync_by_handle_t)dm_enosys;
+ vptr->upgrade_right = (dm_fsys_upgrade_right_t)dm_enosys;
+ vptr->write_invis_rvp = (dm_fsys_write_invis_rvp_t)dm_enosys;
+ vptr->obj_ref_hold = (dm_fsys_obj_ref_hold_t)dm_enosys;
+
+ /* Issue a call to the filesystem in order to obtain
+ its vector of filesystem-specific DMAPI routines.
+ */
+
+ vecrq.count = 0;
+ vecrq.vecp = NULL;
+
+ error = -ENOSYS;
+ ASSERT(dmapiops);
+ if (dmapiops->get_fsys_vector)
+ error = dmapiops->get_fsys_vector(sb, (caddr_t)&vecrq);
+
+ /* If we still have an error at this point, then the filesystem simply
+ does not support DMAPI, so we give up with all functions set to
+ ENOSYS.
+ */
+
+ if (error || vecrq.count == 0) {
+ kmem_cache_free(dm_fsys_vptr_cachep, vptr);
+ map->vptr = NULL;
+ return;
+ }
+
+ /* The request succeeded and we were given a vector which we need to
+ map to our current level. Overlay the dummy function with every
+ filesystem function we understand.
+ */
+
+ vptr->code_level = vecrq.code_level;
+ vecp = vecrq.vecp;
+ for (i = 0; i < vecrq.count; i++) {
+ switch (vecp[i].func_no) {
+ case DM_FSYS_CLEAR_INHERIT:
+ vptr->clear_inherit = vecp[i].u_fc.clear_inherit;
+ break;
+ case DM_FSYS_CREATE_BY_HANDLE:
+ vptr->create_by_handle = vecp[i].u_fc.create_by_handle;
+ break;
+ case DM_FSYS_DOWNGRADE_RIGHT:
+ vptr->downgrade_right = vecp[i].u_fc.downgrade_right;
+ break;
+ case DM_FSYS_GET_ALLOCINFO_RVP:
+ vptr->get_allocinfo_rvp = vecp[i].u_fc.get_allocinfo_rvp;
+ break;
+ case DM_FSYS_GET_BULKALL_RVP:
+ vptr->get_bulkall_rvp = vecp[i].u_fc.get_bulkall_rvp;
+ break;
+ case DM_FSYS_GET_BULKATTR_RVP:
+ vptr->get_bulkattr_rvp = vecp[i].u_fc.get_bulkattr_rvp;
+ break;
+ case DM_FSYS_GET_CONFIG:
+ vptr->get_config = vecp[i].u_fc.get_config;
+ break;
+ case DM_FSYS_GET_CONFIG_EVENTS:
+ vptr->get_config_events = vecp[i].u_fc.get_config_events;
+ break;
+ case DM_FSYS_GET_DESTROY_DMATTR:
+ vptr->get_destroy_dmattr = vecp[i].u_fc.get_destroy_dmattr;
+ break;
+ case DM_FSYS_GET_DIOINFO:
+ vptr->get_dioinfo = vecp[i].u_fc.get_dioinfo;
+ break;
+ case DM_FSYS_GET_DIRATTRS_RVP:
+ vptr->get_dirattrs_rvp = vecp[i].u_fc.get_dirattrs_rvp;
+ break;
+ case DM_FSYS_GET_DMATTR:
+ vptr->get_dmattr = vecp[i].u_fc.get_dmattr;
+ break;
+ case DM_FSYS_GET_EVENTLIST:
+ vptr->get_eventlist = vecp[i].u_fc.get_eventlist;
+ break;
+ case DM_FSYS_GET_FILEATTR:
+ vptr->get_fileattr = vecp[i].u_fc.get_fileattr;
+ break;
+ case DM_FSYS_GET_REGION:
+ vptr->get_region = vecp[i].u_fc.get_region;
+ break;
+ case DM_FSYS_GETALL_DMATTR:
+ vptr->getall_dmattr = vecp[i].u_fc.getall_dmattr;
+ break;
+ case DM_FSYS_GETALL_INHERIT:
+ vptr->getall_inherit = vecp[i].u_fc.getall_inherit;
+ break;
+ case DM_FSYS_INIT_ATTRLOC:
+ vptr->init_attrloc = vecp[i].u_fc.init_attrloc;
+ break;
+ case DM_FSYS_MKDIR_BY_HANDLE:
+ vptr->mkdir_by_handle = vecp[i].u_fc.mkdir_by_handle;
+ break;
+ case DM_FSYS_PROBE_HOLE:
+ vptr->probe_hole = vecp[i].u_fc.probe_hole;
+ break;
+ case DM_FSYS_PUNCH_HOLE:
+ vptr->punch_hole = vecp[i].u_fc.punch_hole;
+ break;
+ case DM_FSYS_READ_INVIS_RVP:
+ vptr->read_invis_rvp = vecp[i].u_fc.read_invis_rvp;
+ break;
+ case DM_FSYS_RELEASE_RIGHT:
+ vptr->release_right = vecp[i].u_fc.release_right;
+ break;
+ case DM_FSYS_REMOVE_DMATTR:
+ vptr->remove_dmattr = vecp[i].u_fc.remove_dmattr;
+ break;
+ case DM_FSYS_REQUEST_RIGHT:
+ vptr->request_right = vecp[i].u_fc.request_right;
+ break;
+ case DM_FSYS_SET_DMATTR:
+ vptr->set_dmattr = vecp[i].u_fc.set_dmattr;
+ break;
+ case DM_FSYS_SET_EVENTLIST:
+ vptr->set_eventlist = vecp[i].u_fc.set_eventlist;
+ break;
+ case DM_FSYS_SET_FILEATTR:
+ vptr->set_fileattr = vecp[i].u_fc.set_fileattr;
+ break;
+ case DM_FSYS_SET_INHERIT:
+ vptr->set_inherit = vecp[i].u_fc.set_inherit;
+ break;
+ case DM_FSYS_SET_REGION:
+ vptr->set_region = vecp[i].u_fc.set_region;
+ break;
+ case DM_FSYS_SYMLINK_BY_HANDLE:
+ vptr->symlink_by_handle = vecp[i].u_fc.symlink_by_handle;
+ break;
+ case DM_FSYS_SYNC_BY_HANDLE:
+ vptr->sync_by_handle = vecp[i].u_fc.sync_by_handle;
+ break;
+ case DM_FSYS_UPGRADE_RIGHT:
+ vptr->upgrade_right = vecp[i].u_fc.upgrade_right;
+ break;
+ case DM_FSYS_WRITE_INVIS_RVP:
+ vptr->write_invis_rvp = vecp[i].u_fc.write_invis_rvp;
+ break;
+ case DM_FSYS_OBJ_REF_HOLD:
+ vptr->obj_ref_hold = vecp[i].u_fc.obj_ref_hold;
+ break;
+ default: /* ignore ones we don't understand */
+ break;
+ }
+ }
+}
+
+
+/* Must hold dm_fsys_lock.
+ * This returns the prototype for all instances of the fstype.
+ */
+static dm_vector_map_t *
+dm_fsys_map_by_fstype(
+ struct file_system_type *fstype)
+{
+ struct list_head *p;
+ dm_vector_map_t *proto = NULL;
+ dm_vector_map_t *m;
+
+ ASSERT_ALWAYS(fstype);
+ list_for_each(p, &dm_fsys_map) {
+ m = list_entry(p, dm_vector_map_t, ftype_list);
+ if (m->f_type == fstype) {
+ proto = m;
+ break;
+ }
+ }
+ return proto;
+}
+
+
+/* Must hold dm_fsys_lock */
+static dm_vector_map_t *
+dm_fsys_map_by_sb(
+ struct super_block *sb)
+{
+ struct list_head *p;
+ dm_vector_map_t *proto;
+ dm_vector_map_t *m;
+ dm_vector_map_t *foundmap = NULL;
+
+ proto = dm_fsys_map_by_fstype(sb->s_type);
+ if(proto == NULL) {
+ return NULL;
+ }
+
+ list_for_each(p, &proto->sb_list) {
+ m = list_entry(p, dm_vector_map_t, sb_list);
+ if (m->sb == sb) {
+ foundmap = m;
+ break;
+ }
+ }
+ return foundmap;
+}
+
+
+#ifdef CONFIG_DMAPI_DEBUG
+static void
+sb_list(
+ struct super_block *sb)
+{
+ struct list_head *p;
+ dm_vector_map_t *proto;
+ dm_vector_map_t *m;
+
+ proto = dm_fsys_map_by_fstype(sb->s_type);
+ ASSERT(proto);
+
+printk("%s/%d: Current sb_list\n", __FUNCTION__, __LINE__);
+ list_for_each(p, &proto->sb_list) {
+ m = list_entry(p, dm_vector_map_t, sb_list);
+printk("%s/%d: map 0x%p, sb 0x%p, vptr 0x%p, dmapiops 0x%p\n", __FUNCTION__, __LINE__, m, m->sb, m->vptr, m->dmapiops);
+ }
+printk("%s/%d: Done sb_list\n", __FUNCTION__, __LINE__);
+}
+#else
+#define sb_list(x)
+#endif
+
+#ifdef CONFIG_DMAPI_DEBUG
+static void
+ftype_list(void)
+{
+ struct list_head *p;
+ dm_vector_map_t *m;
+
+printk("%s/%d: Current ftype_list\n", __FUNCTION__, __LINE__);
+ list_for_each(p, &dm_fsys_map) {
+ m = list_entry(p, dm_vector_map_t, ftype_list);
+ printk("%s/%d: FS 0x%p, ftype 0x%p %s\n", __FUNCTION__, __LINE__, m, m->f_type, m->f_type->name);
+ }
+printk("%s/%d: Done ftype_list\n", __FUNCTION__, __LINE__);
+}
+#else
+#define ftype_list()
+#endif
+
+/* Ask for vptr for this filesystem instance.
+ * The caller knows this inode is on a dmapi-managed filesystem.
+ */
+dm_fsys_vector_t *
+dm_fsys_vector(
+ struct inode *ip)
+{
+ dm_vector_map_t *map;
+
+ spin_lock(&dm_fsys_lock);
+ ftype_list();
+ map = dm_fsys_map_by_sb(ip->i_sb);
+ spin_unlock(&dm_fsys_lock);
+ ASSERT(map);
+ ASSERT(map->vptr);
+ return map->vptr;
+}
+
+
+/* Ask for the dmapiops for this filesystem instance. The caller is
+ * also asking if this is a dmapi-managed filesystem.
+ */
+struct filesystem_dmapi_operations *
+dm_fsys_ops(
+ struct super_block *sb)
+{
+ dm_vector_map_t *proto = NULL;
+ dm_vector_map_t *map;
+
+ spin_lock(&dm_fsys_lock);
+ ftype_list();
+ sb_list(sb);
+ map = dm_fsys_map_by_sb(sb);
+ if (map == NULL)
+ proto = dm_fsys_map_by_fstype(sb->s_type);
+ spin_unlock(&dm_fsys_lock);
+
+ if ((map == NULL) && (proto == NULL))
+ return NULL;
+
+ if (map == NULL) {
+ /* Find out if it's dmapi-managed */
+ dm_vector_map_t *m;
+
+ ASSERT(proto);
+ m = kmem_cache_alloc(dm_fsys_map_cachep, GFP_KERNEL);
+ if (m == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_fsys_map_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+ memset(m, 0, sizeof(*m));
+ m->dmapiops = proto->dmapiops;
+ m->f_type = sb->s_type;
+ m->sb = sb;
+ INIT_LIST_HEAD(&m->sb_list);
+ INIT_LIST_HEAD(&m->ftype_list);
+
+ dm_query_fsys_for_vector(m);
+ if (m->vptr == NULL) {
+ /* This isn't dmapi-managed */
+ kmem_cache_free(dm_fsys_map_cachep, m);
+ return NULL;
+ }
+
+ spin_lock(&dm_fsys_lock);
+ if ((map = dm_fsys_map_by_sb(sb)) == NULL)
+ list_add(&m->sb_list, &proto->sb_list);
+ spin_unlock(&dm_fsys_lock);
+
+ if (map) {
+ kmem_cache_free(dm_fsys_vptr_cachep, m->vptr);
+ kmem_cache_free(dm_fsys_map_cachep, m);
+ }
+ else {
+ map = m;
+ }
+ }
+
+ return map->dmapiops;
+}
+
+
+
+/* Called when a filesystem instance is unregistered from dmapi */
+void
+dm_fsys_ops_release(
+ struct super_block *sb)
+{
+ dm_vector_map_t *map;
+
+ spin_lock(&dm_fsys_lock);
+ ASSERT(!list_empty(&dm_fsys_map));
+ map = dm_fsys_map_by_sb(sb);
+ ASSERT(map);
+ list_del(&map->sb_list);
+ spin_unlock(&dm_fsys_lock);
+
+ ASSERT(map->vptr);
+ kmem_cache_free(dm_fsys_vptr_cachep, map->vptr);
+ kmem_cache_free(dm_fsys_map_cachep, map);
+}
+
+
+/* Called by a filesystem module that is loading into the kernel.
+ * This creates a new dm_vector_map_t which serves as the prototype
+ * for instances of this fstype and also provides the list_head
+ * for instances of this fstype. The prototypes are the only ones
+ * on the fstype_list, and will never be on the sb_list.
+ */
+void
+dmapi_register(
+ struct file_system_type *fstype,
+ struct filesystem_dmapi_operations *dmapiops)
+{
+ dm_vector_map_t *proto;
+
+ proto = kmem_cache_alloc(dm_fsys_map_cachep, GFP_KERNEL);
+ if (proto == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_fsys_map_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return;
+ }
+ memset(proto, 0, sizeof(*proto));
+ proto->dmapiops = dmapiops;
+ proto->f_type = fstype;
+ INIT_LIST_HEAD(&proto->sb_list);
+ INIT_LIST_HEAD(&proto->ftype_list);
+
+ spin_lock(&dm_fsys_lock);
+ ASSERT(dm_fsys_map_by_fstype(fstype) == NULL);
+ list_add(&proto->ftype_list, &dm_fsys_map);
+ ftype_list();
+ spin_unlock(&dm_fsys_lock);
+}
+
+/* Called by a filesystem module that is unloading from the kernel */
+void
+dmapi_unregister(
+ struct file_system_type *fstype)
+{
+ struct list_head *p;
+ dm_vector_map_t *proto;
+ dm_vector_map_t *m;
+
+ spin_lock(&dm_fsys_lock);
+ ASSERT(!list_empty(&dm_fsys_map));
+ proto = dm_fsys_map_by_fstype(fstype);
+ ASSERT(proto);
+ list_del(&proto->ftype_list);
+ spin_unlock(&dm_fsys_lock);
+
+ p = &proto->sb_list;
+ while (!list_empty(p)) {
+ m = list_entry(p->next, dm_vector_map_t, sb_list);
+ list_del(&m->sb_list);
+ ASSERT(m->vptr);
+ kmem_cache_free(dm_fsys_vptr_cachep, m->vptr);
+ kmem_cache_free(dm_fsys_map_cachep, m);
+ }
+ kmem_cache_free(dm_fsys_map_cachep, proto);
+}
+
+
+int
+dmapi_registered(
+ struct file_system_type *fstype,
+ struct filesystem_dmapi_operations **dmapiops)
+{
+ return 0;
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_port.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef _DMAPI_PORT_H
+#define _DMAPI_PORT_H
+
+#include <asm/div64.h>
+#include "sv.h"
+
+#include <linux/sched.h> /* preempt needs this */
+#include <linux/spinlock.h>
+
+typedef spinlock_t lock_t;
+
+#define spinlock_init(lock, name) spin_lock_init(lock)
+#define spinlock_destroy(lock)
+
+#define mutex_spinlock(lock) ({ spin_lock(lock); 0; })
+#define mutex_spinunlock(lock, s) spin_unlock(lock)
+#define nested_spinlock(lock) spin_lock(lock)
+#define nested_spinunlock(lock) spin_unlock(lock)
+
+typedef signed int __int32_t;
+typedef unsigned int __uint32_t;
+typedef signed long long int __int64_t;
+typedef unsigned long long int __uint64_t;
+
+
+/* __psint_t is the same size as a pointer */
+#if (BITS_PER_LONG == 32)
+typedef __int32_t __psint_t;
+typedef __uint32_t __psunsigned_t;
+#elif (BITS_PER_LONG == 64)
+typedef __int64_t __psint_t;
+typedef __uint64_t __psunsigned_t;
+#else
+#error BITS_PER_LONG must be 32 or 64
+#endif
+
+static inline void
+assfail(char *a, char *f, int l)
+{
+ printk("DMAPI assertion failed: %s, file: %s, line: %d\n", a, f, l);
+ BUG();
+}
+
+#ifdef DEBUG
+#define doass 1
+# ifdef lint
+# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */
+# else
+# define ASSERT(EX) ((!doass||(EX))?((void)0):assfail(#EX, __FILE__, __LINE__))
+# endif /* lint */
+#else
+# define ASSERT(x) ((void)0)
+#endif /* DEBUG */
+
+#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__))
+
+
+#if defined __i386__
+
+/* Side effect free 64 bit mod operation */
+static inline __u32 dmapi_do_mod(void *a, __u32 b, int n)
+{
+ switch (n) {
+ case 4:
+ return *(__u32 *)a % b;
+ case 8:
+ {
+ unsigned long __upper, __low, __high, __mod;
+ __u64 c = *(__u64 *)a;
+ __upper = __high = c >> 32;
+ __low = c;
+ if (__high) {
+ __upper = __high % (b);
+ __high = __high / (b);
+ }
+ asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
+ asm("":"=A" (c):"a" (__low),"d" (__high));
+ return __mod;
+ }
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+#else
+
+/* Side effect free 64 bit mod operation */
+static inline __u32 dmapi_do_mod(void *a, __u32 b, int n)
+{
+ switch (n) {
+ case 4:
+ return *(__u32 *)a % b;
+ case 8:
+ {
+ __u64 c = *(__u64 *)a;
+ return do_div(c, b);
+ }
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+#endif
+
+#define do_mod(a, b) dmapi_do_mod(&(a), (b), sizeof(a))
+
+#endif /* _DMAPI_PORT_H */
--- /dev/null
+++ b/fs/dmapi/dmapi_private.h
@@ -0,0 +1,619 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef _DMAPI_PRIVATE_H
+#define _DMAPI_PRIVATE_H
+
+#include <linux/slab.h>
+#include "dmapi_port.h"
+#include "sv.h"
+
+#ifdef CONFIG_PROC_FS
+#define DMAPI_PROCFS "orig/fs/dmapi_v2" /* DMAPI device in /proc. */
+#define DMAPI_DBG_PROCFS "orig/fs/dmapi_d" /* DMAPI debugging dir */
+#endif
+
+extern struct kmem_cache *dm_fsreg_cachep;
+extern struct kmem_cache *dm_tokdata_cachep;
+extern struct kmem_cache *dm_session_cachep;
+extern struct kmem_cache *dm_fsys_map_cachep;
+extern struct kmem_cache *dm_fsys_vptr_cachep;
+
+typedef struct dm_tokdata {
+ struct dm_tokdata *td_next;
+ struct dm_tokevent *td_tevp; /* pointer to owning tevp */
+ int td_app_ref; /* # app threads currently active */
+ dm_right_t td_orig_right; /* original right held when created */
+ dm_right_t td_right; /* current right held for this handle */
+ short td_flags;
+ short td_type; /* object type */
+ int td_vcount; /* # of current application VN_HOLDs */
+ struct inode *td_ip; /* inode pointer */
+ dm_handle_t td_handle; /* handle for ip or sb */
+} dm_tokdata_t;
+
+/* values for td_type */
+
+#define DM_TDT_NONE 0x00 /* td_handle is empty */
+#define DM_TDT_VFS 0x01 /* td_handle points to a sb */
+#define DM_TDT_REG 0x02 /* td_handle points to a file */
+#define DM_TDT_DIR 0x04 /* td_handle points to a directory */
+#define DM_TDT_LNK 0x08 /* td_handle points to a symlink */
+#define DM_TDT_OTH 0x10 /* some other object eg. pipe, socket */
+
+#define DM_TDT_VNO (DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH)
+#define DM_TDT_ANY (DM_TDT_VFS|DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH)
+
+/* values for td_flags */
+
+#define DM_TDF_ORIG 0x0001 /* part of the original event */
+#define DM_TDF_EVTREF 0x0002 /* event thread holds inode reference */
+#define DM_TDF_STHREAD 0x0004 /* only one app can use this handle */
+#define DM_TDF_RIGHT 0x0008 /* vcount bumped for dm_request_right */
+#define DM_TDF_HOLD 0x0010 /* vcount bumped for dm_obj_ref_hold */
+
+
+/* Because some events contain __u64 fields, we force te_msg and te_event
+ to always be 8-byte aligned. In order to send more than one message in
+ a single dm_get_events() call, we also ensure that each message is an
+ 8-byte multiple.
+*/
+
+typedef struct dm_tokevent {
+ struct dm_tokevent *te_next;
+ struct dm_tokevent *te_hashnext; /* hash chain */
+ lock_t te_lock; /* lock for all fields but te_*next.
+ * te_next and te_hashnext are
+ * protected by the session lock.
+ */
+ short te_flags;
+ short te_allocsize; /* alloc'ed size of this structure */
+ sv_t te_evt_queue; /* queue waiting for dm_respond_event */
+ sv_t te_app_queue; /* queue waiting for handle access */
+ int te_evt_ref; /* number of event procs using token */
+ int te_app_ref; /* number of app procs using token */
+ int te_app_slp; /* number of app procs sleeping */
+ int te_reply; /* return errno for sync messages */
+ dm_tokdata_t *te_tdp; /* list of handle/right pairs */
+ union {
+ __u64 align; /* force alignment of te_msg */
+ dm_eventmsg_t te_msg; /* user visible part */
+ } te_u;
+ __u64 te_event; /* start of dm_xxx_event_t message */
+} dm_tokevent_t;
+
+#define te_msg te_u.te_msg
+
+/* values for te_flags */
+
+#define DM_TEF_LOCKED 0x0001 /* event "locked" by dm_get_events() */
+#define DM_TEF_INTERMED 0x0002 /* a dm_pending reply was received */
+#define DM_TEF_FINAL 0x0004 /* dm_respond_event has been received */
+#define DM_TEF_HASHED 0x0010 /* event is on hash chain */
+#define DM_TEF_FLUSH 0x0020 /* flushing threads from queues */
+
+
+#ifdef CONFIG_DMAPI_DEBUG
+#define DM_SHASH_DEBUG
+#endif
+
+typedef struct dm_sesshash {
+ dm_tokevent_t *h_next; /* ptr to chain of tokevents */
+#ifdef DM_SHASH_DEBUG
+ int maxlength;
+ int curlength;
+ int num_adds;
+ int num_dels;
+ int dup_hits;
+#endif
+} dm_sesshash_t;
+
+
+typedef struct dm_eventq {
+ dm_tokevent_t *eq_head;
+ dm_tokevent_t *eq_tail;
+ int eq_count; /* size of queue */
+} dm_eventq_t;
+
+
+typedef struct dm_session {
+ struct dm_session *sn_next; /* sessions linkage */
+ dm_sessid_t sn_sessid; /* user-visible session number */
+ u_int sn_flags;
+ lock_t sn_qlock; /* lock for newq/delq related fields */
+ sv_t sn_readerq; /* waiting for message on sn_newq */
+ sv_t sn_writerq; /* waiting for room on sn_newq */
+ u_int sn_readercnt; /* count of waiting readers */
+ u_int sn_writercnt; /* count of waiting readers */
+ dm_eventq_t sn_newq; /* undelivered event queue */
+ dm_eventq_t sn_delq; /* delivered event queue */
+ dm_eventq_t sn_evt_writerq; /* events of thrds in sn_writerq */
+ dm_sesshash_t *sn_sesshash; /* buckets for tokevent hash chains */
+#ifdef DM_SHASH_DEBUG
+ int sn_buckets_in_use;
+ int sn_max_buckets_in_use;
+#endif
+ char sn_info[DM_SESSION_INFO_LEN]; /* user-supplied info */
+} dm_session_t;
+
+/* values for sn_flags */
+
+#define DM_SN_WANTMOUNT 0x0001 /* session wants to get mount events */
+
+
+typedef enum {
+ DM_STATE_MOUNTING,
+ DM_STATE_MOUNTED,
+ DM_STATE_UNMOUNTING,
+ DM_STATE_UNMOUNTED
+} dm_fsstate_t;
+
+
+typedef struct dm_fsreg {
+ struct dm_fsreg *fr_next;
+ struct super_block *fr_sb; /* filesystem pointer */
+ dm_tokevent_t *fr_tevp;
+ dm_fsid_t fr_fsid; /* filesystem ID */
+ void *fr_msg; /* dm_mount_event_t for filesystem */
+ int fr_msgsize; /* size of dm_mount_event_t */
+ dm_fsstate_t fr_state;
+ sv_t fr_dispq;
+ int fr_dispcnt;
+ dm_eventq_t fr_evt_dispq; /* events of thrds in fr_dispq */
+ sv_t fr_queue; /* queue for hdlcnt/sbcnt/unmount */
+ lock_t fr_lock;
+ int fr_hdlcnt; /* threads blocked during unmount */
+ int fr_vfscnt; /* threads in VFS_VGET or VFS_ROOT */
+ int fr_unmount; /* if non-zero, umount is sleeping */
+ dm_attrname_t fr_rattr; /* dm_set_return_on_destroy attribute */
+ dm_session_t *fr_sessp [DM_EVENT_MAX];
+} dm_fsreg_t;
+
+
+
+
+/* events valid in dm_set_disp() when called with a filesystem handle. */
+
+#define DM_VALID_DISP_EVENTS ( \
+ (1 << DM_EVENT_PREUNMOUNT) | \
+ (1 << DM_EVENT_UNMOUNT) | \
+ (1 << DM_EVENT_NOSPACE) | \
+ (1 << DM_EVENT_DEBUT) | \
+ (1 << DM_EVENT_CREATE) | \
+ (1 << DM_EVENT_POSTCREATE) | \
+ (1 << DM_EVENT_REMOVE) | \
+ (1 << DM_EVENT_POSTREMOVE) | \
+ (1 << DM_EVENT_RENAME) | \
+ (1 << DM_EVENT_POSTRENAME) | \
+ (1 << DM_EVENT_LINK) | \
+ (1 << DM_EVENT_POSTLINK) | \
+ (1 << DM_EVENT_SYMLINK) | \
+ (1 << DM_EVENT_POSTSYMLINK) | \
+ (1 << DM_EVENT_READ) | \
+ (1 << DM_EVENT_WRITE) | \
+ (1 << DM_EVENT_TRUNCATE) | \
+ (1 << DM_EVENT_ATTRIBUTE) | \
+ (1 << DM_EVENT_DESTROY) )
+
+
+/* isolate the read/write/trunc events of a dm_tokevent_t */
+
+#define DM_EVENT_RDWRTRUNC(tevp) ( \
+ ((tevp)->te_msg.ev_type == DM_EVENT_READ) || \
+ ((tevp)->te_msg.ev_type == DM_EVENT_WRITE) || \
+ ((tevp)->te_msg.ev_type == DM_EVENT_TRUNCATE) )
+
+
+/*
+ * Global handle hack isolation.
+ */
+
+#define DM_GLOBALHAN(hanp, hlen) (((hanp) == DM_GLOBAL_HANP) && \
+ ((hlen) == DM_GLOBAL_HLEN))
+
+
+#define DM_MAX_MSG_DATA 3960
+
+
+
+/* Supported filesystem function vector functions. */
+
+
+typedef struct {
+ int code_level;
+ dm_fsys_clear_inherit_t clear_inherit;
+ dm_fsys_create_by_handle_t create_by_handle;
+ dm_fsys_downgrade_right_t downgrade_right;
+ dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp;
+ dm_fsys_get_bulkall_rvp_t get_bulkall_rvp;
+ dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp;
+ dm_fsys_get_config_t get_config;
+ dm_fsys_get_config_events_t get_config_events;
+ dm_fsys_get_destroy_dmattr_t get_destroy_dmattr;
+ dm_fsys_get_dioinfo_t get_dioinfo;
+ dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp;
+ dm_fsys_get_dmattr_t get_dmattr;
+ dm_fsys_get_eventlist_t get_eventlist;
+ dm_fsys_get_fileattr_t get_fileattr;
+ dm_fsys_get_region_t get_region;
+ dm_fsys_getall_dmattr_t getall_dmattr;
+ dm_fsys_getall_inherit_t getall_inherit;
+ dm_fsys_init_attrloc_t init_attrloc;
+ dm_fsys_mkdir_by_handle_t mkdir_by_handle;
+ dm_fsys_probe_hole_t probe_hole;
+ dm_fsys_punch_hole_t punch_hole;
+ dm_fsys_read_invis_rvp_t read_invis_rvp;
+ dm_fsys_release_right_t release_right;
+ dm_fsys_remove_dmattr_t remove_dmattr;
+ dm_fsys_request_right_t request_right;
+ dm_fsys_set_dmattr_t set_dmattr;
+ dm_fsys_set_eventlist_t set_eventlist;
+ dm_fsys_set_fileattr_t set_fileattr;
+ dm_fsys_set_inherit_t set_inherit;
+ dm_fsys_set_region_t set_region;
+ dm_fsys_symlink_by_handle_t symlink_by_handle;
+ dm_fsys_sync_by_handle_t sync_by_handle;
+ dm_fsys_upgrade_right_t upgrade_right;
+ dm_fsys_write_invis_rvp_t write_invis_rvp;
+ dm_fsys_obj_ref_hold_t obj_ref_hold;
+} dm_fsys_vector_t;
+
+
+typedef struct {
+ struct list_head ftype_list; /* list of fstypes */
+ struct list_head sb_list; /* list of sb's per fstype */
+ struct file_system_type *f_type;
+ struct filesystem_dmapi_operations *dmapiops;
+ dm_fsys_vector_t *vptr;
+ struct super_block *sb;
+} dm_vector_map_t;
+
+
+extern dm_session_t *dm_sessions; /* head of session list */
+extern dm_fsreg_t *dm_registers;
+extern lock_t dm_reg_lock; /* lock for registration list */
+
+/*
+ * Kernel only prototypes.
+ */
+
+int dm_find_session_and_lock(
+ dm_sessid_t sid,
+ dm_session_t **sessionpp,
+ unsigned long *lcp);
+
+int dm_find_msg_and_lock(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_tokevent_t **tevpp,
+ unsigned long *lcp);
+
+dm_tokevent_t * dm_evt_create_tevp(
+ dm_eventtype_t event,
+ int variable_size,
+ void **msgpp);
+
+int dm_app_get_tdp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ short types,
+ dm_right_t right,
+ dm_tokdata_t **tdpp);
+
+int dm_get_config_tdp(
+ void __user *hanp,
+ size_t hlen,
+ dm_tokdata_t **tdpp);
+
+void dm_app_put_tdp(
+ dm_tokdata_t *tdp);
+
+void dm_put_tevp(
+ dm_tokevent_t *tevp,
+ dm_tokdata_t *tdp);
+
+void dm_evt_rele_tevp(
+ dm_tokevent_t *tevp,
+ int droprights);
+
+int dm_enqueue_normal_event(
+ struct super_block *sbp,
+ dm_tokevent_t **tevpp,
+ int flags);
+
+int dm_enqueue_mount_event(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp);
+
+int dm_enqueue_sendmsg_event(
+ dm_sessid_t targetsid,
+ dm_tokevent_t *tevp,
+ int synch);
+
+int dm_enqueue_user_event(
+ dm_sessid_t sid,
+ dm_tokevent_t *tevp,
+ dm_token_t *tokenp);
+
+int dm_obj_ref_query_rvp(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ int *rvp);
+
+int dm_read_invis_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ void __user *bufp,
+ int *rvp);
+
+int dm_write_invis_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void __user *bufp,
+ int *rvp);
+
+int dm_get_bulkattr_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvp);
+
+int dm_get_bulkall_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrname_t __user *attrnamep,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvp);
+
+int dm_get_dirattrs_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t __user *locp,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp,
+ int *rvp);
+
+int dm_get_allocinfo_rvp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t __user *offp,
+ u_int nelem,
+ dm_extent_t __user *extentp,
+ u_int __user *nelemp,
+ int *rvp);
+
+int dm_waitfor_destroy_attrname(
+ struct super_block *sb,
+ dm_attrname_t *attrnamep);
+
+void dm_clear_fsreg(
+ dm_session_t *s);
+
+int dm_add_fsys_entry(
+ struct super_block *sb,
+ dm_tokevent_t *tevp);
+
+void dm_change_fsys_entry(
+ struct super_block *sb,
+ dm_fsstate_t newstate);
+
+void dm_remove_fsys_entry(
+ struct super_block *sb);
+
+dm_fsys_vector_t *dm_fsys_vector(
+ struct inode *ip);
+
+struct filesystem_dmapi_operations *dm_fsys_ops(
+ struct super_block *sb);
+
+void dm_fsys_ops_release(
+ struct super_block *sb);
+
+int dm_waitfor_disp_session(
+ struct super_block *sb,
+ dm_tokevent_t *tevp,
+ dm_session_t **sessionpp,
+ unsigned long *lcp);
+
+struct inode * dm_handle_to_ip (
+ dm_handle_t *handlep,
+ short *typep);
+
+int dm_check_dmapi_ip(
+ struct inode *ip);
+
+dm_tokevent_t * dm_find_mount_tevp_and_lock(
+ dm_fsid_t *fsidp,
+ unsigned long *lcp);
+
+int dm_path_to_hdl(
+ char __user *path,
+ void __user *hanp,
+ size_t __user *hlenp);
+
+int dm_path_to_fshdl(
+ char __user *path,
+ void __user *hanp,
+ size_t __user *hlenp);
+
+int dm_fd_to_hdl(
+ int fd,
+ void __user *hanp,
+ size_t __user *hlenp);
+
+int dm_upgrade_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+int dm_downgrade_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+int dm_request_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int flags,
+ dm_right_t right);
+
+int dm_release_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+int dm_query_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_right_t __user *rightp);
+
+
+int dm_set_eventlist(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t __user *eventsetp,
+ u_int maxevent);
+
+int dm_obj_ref_hold(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen);
+
+int dm_obj_ref_rele(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen);
+
+int dm_get_eventlist(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_eventset_t __user *eventsetp,
+ u_int __user *nelemp);
+
+
+int dm_set_disp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t __user *eventsetp,
+ u_int maxevent);
+
+
+int dm_set_return_on_destroy(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ dm_boolean_t enable);
+
+
+int dm_get_mountinfo(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp);
+
+void dm_link_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue);
+
+void dm_unlink_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue);
+
+int dm_open_by_handle_rvp(
+ unsigned int fd,
+ void __user *hanp,
+ size_t hlen,
+ int mode,
+ int *rvp);
+
+int dm_copyin_handle(
+ void __user *hanp,
+ size_t hlen,
+ dm_handle_t *handlep);
+
+int dm_release_disp_threads(
+ dm_fsid_t *fsid,
+ struct inode *inode,
+ int errno);
+
+#endif /* _DMAPI_PRIVATE_H */
--- /dev/null
+++ b/fs/dmapi/dmapi_region.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+int
+dm_get_region(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_region_t __user *regbufp,
+ u_int __user *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_region(tdp->td_ip, tdp->td_right,
+ nelem, regbufp, nelemp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+
+int
+dm_set_region(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_region_t __user *regbufp,
+ dm_boolean_t __user *exactflagp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_region(tdp->td_ip, tdp->td_right,
+ nelem, regbufp, exactflagp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_register.c
@@ -0,0 +1,1638 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/version.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+/* LOOKUP_POSTIVE was removed in Linux 2.6 */
+#ifndef LOOKUP_POSITIVE
+#define LOOKUP_POSITIVE 0
+#endif
+
+dm_fsreg_t *dm_registers; /* head of filesystem registration list */
+int dm_fsys_cnt; /* number of filesystems on dm_registers list */
+lock_t dm_reg_lock = SPIN_LOCK_UNLOCKED;/* lock for dm_registers */
+
+
+
+#ifdef CONFIG_PROC_FS
+static int
+fsreg_read_pfs(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len;
+ int i;
+ dm_fsreg_t *fsrp = (dm_fsreg_t*)data;
+ char statebuf[30];
+
+#define CHKFULL if(len >= count) break;
+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
+
+ switch (fsrp->fr_state) {
+ case DM_STATE_MOUNTING: sprintf(statebuf, "mounting"); break;
+ case DM_STATE_MOUNTED: sprintf(statebuf, "mounted"); break;
+ case DM_STATE_UNMOUNTING: sprintf(statebuf, "unmounting"); break;
+ case DM_STATE_UNMOUNTED: sprintf(statebuf, "unmounted"); break;
+ default:
+ sprintf(statebuf, "unknown:%d", (int)fsrp->fr_state);
+ break;
+ }
+
+ len=0;
+ while(1){
+ ADDBUF("fsrp=0x%p\n", fsrp);
+ ADDBUF("fr_next=0x%p\n", fsrp->fr_next);
+ ADDBUF("fr_sb=0x%p\n", fsrp->fr_sb);
+ ADDBUF("fr_tevp=0x%p\n", fsrp->fr_tevp);
+ ADDBUF("fr_fsid=%c\n", '?');
+ ADDBUF("fr_msg=0x%p\n", fsrp->fr_msg);
+ ADDBUF("fr_msgsize=%d\n", fsrp->fr_msgsize);
+ ADDBUF("fr_state=%s\n", statebuf);
+ ADDBUF("fr_dispq=%c\n", '?');
+ ADDBUF("fr_dispcnt=%d\n", fsrp->fr_dispcnt);
+
+ ADDBUF("fr_evt_dispq.eq_head=0x%p\n", fsrp->fr_evt_dispq.eq_head);
+ ADDBUF("fr_evt_dispq.eq_tail=0x%p\n", fsrp->fr_evt_dispq.eq_tail);
+ ADDBUF("fr_evt_dispq.eq_count=%d\n", fsrp->fr_evt_dispq.eq_count);
+
+ ADDBUF("fr_queue=%c\n", '?');
+ ADDBUF("fr_lock=%c\n", '?');
+ ADDBUF("fr_hdlcnt=%d\n", fsrp->fr_hdlcnt);
+ ADDBUF("fr_vfscnt=%d\n", fsrp->fr_vfscnt);
+ ADDBUF("fr_unmount=%d\n", fsrp->fr_unmount);
+
+ len += sprintf(buffer + len, "fr_rattr=");
+ CHKFULL;
+ for(i = 0; i <= DM_ATTR_NAME_SIZE; ++i){
+ ADDBUF("%c", fsrp->fr_rattr.an_chars[i]);
+ }
+ CHKFULL;
+ len += sprintf(buffer + len, "\n");
+ CHKFULL;
+
+ for(i = 0; i < DM_EVENT_MAX; i++){
+ if( fsrp->fr_sessp[i] != NULL ){
+ ADDBUF("fr_sessp[%d]=", i);
+ ADDBUF("0x%p\n", fsrp->fr_sessp[i]);
+ }
+ }
+ CHKFULL;
+
+ break;
+ }
+
+ if (offset >= len) {
+ *start = buffer;
+ *eof = 1;
+ return 0;
+ }
+ *start = buffer + offset;
+ if ((len -= offset) > count)
+ return count;
+ *eof = 1;
+
+ return len;
+}
+#endif
+
+
+/* Returns a pointer to the filesystem structure for the filesystem
+ referenced by fsidp. The caller is responsible for obtaining dm_reg_lock
+ before calling this routine.
+*/
+
+static dm_fsreg_t *
+dm_find_fsreg(
+ dm_fsid_t *fsidp)
+{
+ dm_fsreg_t *fsrp;
+
+ for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) {
+ if (!memcmp(&fsrp->fr_fsid, fsidp, sizeof(*fsidp)))
+ break;
+ }
+ return(fsrp);
+}
+
+
+/* Given a fsid_t, dm_find_fsreg_and_lock() finds the dm_fsreg_t structure
+ for that filesytem if one exists, and returns a pointer to the structure
+ after obtaining its 'fr_lock' so that the caller can safely modify the
+ dm_fsreg_t. The caller is responsible for releasing 'fr_lock'.
+*/
+
+static dm_fsreg_t *
+dm_find_fsreg_and_lock(
+ dm_fsid_t *fsidp,
+ unsigned long *lcp) /* address of returned lock cookie */
+{
+ dm_fsreg_t *fsrp;
+
+ for (;;) {
+ *lcp = mutex_spinlock(&dm_reg_lock);
+
+ if ((fsrp = dm_find_fsreg(fsidp)) == NULL) {
+ mutex_spinunlock(&dm_reg_lock, *lcp);
+ return(NULL);
+ }
+ if (spin_trylock(&fsrp->fr_lock)) {
+ nested_spinunlock(&dm_reg_lock);
+ return(fsrp); /* success */
+ }
+
+ /* If the second lock is not available, drop the first and
+ start over. This gives the CPU a chance to process any
+ interrupts, and also allows processes which want a fr_lock
+ for a different filesystem to proceed.
+ */
+
+ mutex_spinunlock(&dm_reg_lock, *lcp);
+ }
+}
+
+
+/* dm_add_fsys_entry() is called when a DM_EVENT_MOUNT event is about to be
+ sent. It creates a dm_fsreg_t structure for the filesystem and stores a
+ pointer to a copy of the mount event within that structure so that it is
+ available for subsequent dm_get_mountinfo() calls.
+*/
+
+int
+dm_add_fsys_entry(
+ struct super_block *sb,
+ dm_tokevent_t *tevp)
+{
+ dm_fsreg_t *fsrp;
+ int msgsize;
+ void *msg;
+ unsigned long lc; /* lock cookie */
+ dm_fsid_t fsid;
+ struct filesystem_dmapi_operations *dops;
+
+ dops = dm_fsys_ops(sb);
+ ASSERT(dops);
+ dops->get_fsid(sb, &fsid);
+
+ /* Allocate and initialize a dm_fsreg_t structure for the filesystem. */
+
+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_event);
+ msg = kmalloc(msgsize, GFP_KERNEL);
+ if (msg == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+ memcpy(msg, &tevp->te_event, msgsize);
+
+ fsrp = kmem_cache_alloc(dm_fsreg_cachep, GFP_KERNEL);
+ if (fsrp == NULL) {
+ kfree(msg);
+ printk("%s/%d: kmem_cache_alloc(dm_fsreg_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+ memset(fsrp, 0, sizeof(*fsrp));
+
+ fsrp->fr_sb = sb;
+ fsrp->fr_tevp = tevp;
+ memcpy(&fsrp->fr_fsid, &fsid, sizeof(fsid));
+ fsrp->fr_msg = msg;
+ fsrp->fr_msgsize = msgsize;
+ fsrp->fr_state = DM_STATE_MOUNTING;
+ sv_init(&fsrp->fr_dispq, SV_DEFAULT, "fr_dispq");
+ sv_init(&fsrp->fr_queue, SV_DEFAULT, "fr_queue");
+ spinlock_init(&fsrp->fr_lock, "fr_lock");
+
+ /* If no other mounted DMAPI filesystem already has this same
+ fsid_t, then add this filesystem to the list.
+ */
+
+ lc = mutex_spinlock(&dm_reg_lock);
+
+ if (!dm_find_fsreg(&fsid)) {
+ fsrp->fr_next = dm_registers;
+ dm_registers = fsrp;
+ dm_fsys_cnt++;
+ mutex_spinunlock(&dm_reg_lock, lc);
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ struct proc_dir_entry *entry;
+
+ sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
+ entry = create_proc_read_entry(buf, 0, NULL, fsreg_read_pfs, fsrp);
+ }
+#endif
+ return(0);
+ }
+
+ /* A fsid_t collision occurred, so prevent this new filesystem from
+ mounting.
+ */
+
+ mutex_spinunlock(&dm_reg_lock, lc);
+
+ sv_destroy(&fsrp->fr_dispq);
+ sv_destroy(&fsrp->fr_queue);
+ spinlock_destroy(&fsrp->fr_lock);
+ kfree(msg);
+ kmem_cache_free(dm_fsreg_cachep, fsrp);
+ return(-EBUSY);
+}
+
+
+/* dm_change_fsys_entry() is called whenever a filesystem's mount state is
+ about to change. The state is changed to DM_STATE_MOUNTED after a
+ successful DM_EVENT_MOUNT event or after a failed unmount. It is changed
+ to DM_STATE_UNMOUNTING after a successful DM_EVENT_PREUNMOUNT event.
+ Finally, the state is changed to DM_STATE_UNMOUNTED after a successful
+ unmount. It stays in this state until the DM_EVENT_UNMOUNT event is
+ queued, at which point the filesystem entry is removed.
+*/
+
+void
+dm_change_fsys_entry(
+ struct super_block *sb,
+ dm_fsstate_t newstate)
+{
+ dm_fsreg_t *fsrp;
+ int seq_error;
+ unsigned long lc; /* lock cookie */
+ dm_fsid_t fsid;
+ struct filesystem_dmapi_operations *dops;
+
+ /* Find the filesystem referenced by the sb's fsid_t. This should
+ always succeed.
+ */
+
+ dops = dm_fsys_ops(sb);
+ ASSERT(dops);
+ dops->get_fsid(sb, &fsid);
+
+ if ((fsrp = dm_find_fsreg_and_lock(&fsid, &lc)) == NULL) {
+ panic("dm_change_fsys_entry: can't find DMAPI fsrp for "
+ "sb %p\n", sb);
+ }
+
+ /* Make sure that the new state is acceptable given the current state
+ of the filesystem. Any error here is a major DMAPI/filesystem
+ screwup.
+ */
+
+ seq_error = 0;
+ switch (newstate) {
+ case DM_STATE_MOUNTED:
+ if (fsrp->fr_state != DM_STATE_MOUNTING &&
+ fsrp->fr_state != DM_STATE_UNMOUNTING) {
+ seq_error++;
+ }
+ break;
+ case DM_STATE_UNMOUNTING:
+ if (fsrp->fr_state != DM_STATE_MOUNTED)
+ seq_error++;
+ break;
+ case DM_STATE_UNMOUNTED:
+ if (fsrp->fr_state != DM_STATE_UNMOUNTING)
+ seq_error++;
+ break;
+ default:
+ seq_error++;
+ break;
+ }
+ if (seq_error) {
+ panic("dm_change_fsys_entry: DMAPI sequence error: old state "
+ "%d, new state %d, fsrp %p\n", fsrp->fr_state,
+ newstate, fsrp);
+ }
+
+ /* If the old state was DM_STATE_UNMOUNTING, then processes could be
+ sleeping in dm_handle_to_ip() waiting for their DM_NO_TOKEN handles
+ to be translated to inodes. Wake them up so that they either
+ continue (new state is DM_STATE_MOUNTED) or fail (new state is
+ DM_STATE_UNMOUNTED).
+ */
+
+ if (fsrp->fr_state == DM_STATE_UNMOUNTING) {
+ if (fsrp->fr_hdlcnt)
+ sv_broadcast(&fsrp->fr_queue);
+ }
+
+ /* Change the filesystem's mount state to its new value. */
+
+ fsrp->fr_state = newstate;
+ fsrp->fr_tevp = NULL; /* not valid after DM_STATE_MOUNTING */
+
+ /* If the new state is DM_STATE_UNMOUNTING, wait until any application
+ threads currently in the process of making VFS_VGET and VFS_ROOT
+ calls are done before we let this unmount thread continue the
+ unmount. (We want to make sure that the unmount will see these
+ inode references during its scan.)
+ */
+
+ if (newstate == DM_STATE_UNMOUNTING) {
+ while (fsrp->fr_vfscnt) {
+ fsrp->fr_unmount++;
+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
+ lc = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_unmount--;
+ }
+ }
+
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+}
+
+
+/* dm_remove_fsys_entry() gets called after a failed mount or after an
+ DM_EVENT_UNMOUNT event has been queued. (The filesystem entry must stay
+ until the DM_EVENT_UNMOUNT reply is queued so that the event can use the
+ 'fr_sessp' list to see which session to send the event to.)
+*/
+
+void
+dm_remove_fsys_entry(
+ struct super_block *sb)
+{
+ dm_fsreg_t **fsrpp;
+ dm_fsreg_t *fsrp;
+ unsigned long lc; /* lock cookie */
+ struct filesystem_dmapi_operations *dops;
+ dm_fsid_t fsid;
+
+ dops = dm_fsys_ops(sb);
+ ASSERT(dops);
+ dops->get_fsid(sb, &fsid);
+
+ /* Find the filesystem referenced by the sb's fsid_t and dequeue
+ it after verifying that the fr_state shows a filesystem that is
+ either mounting or unmounted.
+ */
+
+ lc = mutex_spinlock(&dm_reg_lock);
+
+ fsrpp = &dm_registers;
+ while ((fsrp = *fsrpp) != NULL) {
+ if (!memcmp(&fsrp->fr_fsid, &fsid, sizeof(fsrp->fr_fsid)))
+ break;
+ fsrpp = &fsrp->fr_next;
+ }
+ if (fsrp == NULL) {
+ mutex_spinunlock(&dm_reg_lock, lc);
+ panic("dm_remove_fsys_entry: can't find DMAPI fsrp for "
+ "sb %p\n", sb);
+ }
+
+ nested_spinlock(&fsrp->fr_lock);
+
+ /* Verify that it makes sense to remove this entry. */
+
+ if (fsrp->fr_state != DM_STATE_MOUNTING &&
+ fsrp->fr_state != DM_STATE_UNMOUNTED) {
+ nested_spinunlock(&fsrp->fr_lock);
+ mutex_spinunlock(&dm_reg_lock, lc);
+ panic("dm_remove_fsys_entry: DMAPI sequence error: old state "
+ "%d, fsrp %p\n", fsrp->fr_state, fsrp);
+ }
+
+ *fsrpp = fsrp->fr_next;
+ dm_fsys_cnt--;
+
+ nested_spinunlock(&dm_reg_lock);
+
+ /* Since the filesystem is about to finish unmounting, we must be sure
+ that no inodes are being referenced within the filesystem before we
+ let this event thread continue. If the filesystem is currently in
+ state DM_STATE_MOUNTING, then we know by definition that there can't
+ be any references. If the filesystem is DM_STATE_UNMOUNTED, then
+ any application threads referencing handles with DM_NO_TOKEN should
+ have already been awakened by dm_change_fsys_entry and should be
+ long gone by now. Just in case they haven't yet left, sleep here
+ until they are really gone.
+ */
+
+ while (fsrp->fr_hdlcnt) {
+ fsrp->fr_unmount++;
+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
+ lc = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_unmount--;
+ }
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ /* Release all memory. */
+
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
+ remove_proc_entry(buf, NULL);
+ }
+#endif
+ dm_fsys_ops_release(sb);
+ sv_destroy(&fsrp->fr_dispq);
+ sv_destroy(&fsrp->fr_queue);
+ spinlock_destroy(&fsrp->fr_lock);
+ kfree(fsrp->fr_msg);
+ kmem_cache_free(dm_fsreg_cachep, fsrp);
+}
+
+
+/* Get an inode for the object referenced by handlep. We cannot use
+ altgetvfs() because it fails if the VFS_OFFLINE bit is set, which means
+ that any call to dm_handle_to_ip() while a umount is in progress would
+ return an error, even if the umount can't possibly succeed because users
+ are in the filesystem. The requests would start to fail as soon as the
+ umount begins, even before the application receives the DM_EVENT_PREUNMOUNT
+ event.
+
+ dm_handle_to_ip() emulates the behavior of lookup() while an unmount is
+ in progress. Any call to dm_handle_to_ip() while the filesystem is in the
+ DM_STATE_UNMOUNTING state will block. If the unmount eventually succeeds,
+ the requests will wake up and fail. If the unmount fails, the requests will
+ wake up and complete normally.
+
+ While a filesystem is in state DM_STATE_MOUNTING, dm_handle_to_ip() will
+ fail all requests. Per the DMAPI spec, the only handles in the filesystem
+ which are valid during a mount event are the handles within the event
+ itself.
+*/
+
+struct inode *
+dm_handle_to_ip(
+ dm_handle_t *handlep,
+ short *typep)
+{
+ dm_fsreg_t *fsrp;
+ short type;
+ unsigned long lc; /* lock cookie */
+ int error = 0;
+ dm_fid_t *fidp;
+ struct super_block *sb;
+ struct inode *ip;
+ int filetype;
+ struct filesystem_dmapi_operations *dmapiops;
+
+ if ((fsrp = dm_find_fsreg_and_lock(&handlep->ha_fsid, &lc)) == NULL)
+ return NULL;
+
+ fidp = (dm_fid_t*)&handlep->ha_fid;
+ /* If mounting, and we are not asking for a filesystem handle,
+ * then fail the request. (dm_fid_len==0 for fshandle)
+ */
+ if ((fsrp->fr_state == DM_STATE_MOUNTING) &&
+ (fidp->dm_fid_len != 0)) {
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ return NULL;
+ }
+
+ for (;;) {
+ if (fsrp->fr_state == DM_STATE_MOUNTING)
+ break;
+ if (fsrp->fr_state == DM_STATE_MOUNTED)
+ break;
+ if (fsrp->fr_state == DM_STATE_UNMOUNTED) {
+ if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0)
+ sv_broadcast(&fsrp->fr_queue);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ return NULL;
+ }
+
+ /* Must be DM_STATE_UNMOUNTING. */
+
+ fsrp->fr_hdlcnt++;
+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
+ lc = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_hdlcnt--;
+ }
+
+ fsrp->fr_vfscnt++;
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ /* Now that the mutex is released, wait until we have access to the
+ inode.
+ */
+
+ sb = fsrp->fr_sb;
+ error = -ENOSYS;
+ dmapiops = dm_fsys_ops(sb);
+ ASSERT(dmapiops);
+ if (dmapiops->fh_to_inode)
+ error = dmapiops->fh_to_inode(sb, &ip, (void*)fidp);
+
+ lc = mutex_spinlock(&fsrp->fr_lock);
+
+ fsrp->fr_vfscnt--;
+ if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0)
+ sv_broadcast(&fsrp->fr_queue);
+
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ if (error || ip == NULL)
+ return NULL;
+
+ filetype = ip->i_mode & S_IFMT;
+ if (fidp->dm_fid_len == 0) {
+ type = DM_TDT_VFS;
+ } else if (filetype == S_IFREG) {
+ type = DM_TDT_REG;
+ } else if (filetype == S_IFDIR) {
+ type = DM_TDT_DIR;
+ } else if (filetype == S_IFLNK) {
+ type = DM_TDT_LNK;
+ } else {
+ type = DM_TDT_OTH;
+ }
+ *typep = type;
+ return ip;
+}
+
+
+int
+dm_ip_to_handle(
+ struct inode *ip,
+ dm_handle_t *handlep)
+{
+ int error;
+ dm_fid_t fid;
+ dm_fsid_t fsid;
+ int hsize;
+ struct filesystem_dmapi_operations *dops;
+
+ dops = dm_fsys_ops(ip->i_sb);
+ ASSERT(dops);
+
+ error = dops->inode_to_fh(ip, &fid, &fsid);
+ if (error)
+ return error;
+
+ memcpy(&handlep->ha_fsid, &fsid, sizeof(fsid));
+ memcpy(&handlep->ha_fid, &fid, fid.dm_fid_len + sizeof fid.dm_fid_len);
+ hsize = DM_HSIZE(*handlep);
+ memset((char *)handlep + hsize, 0, sizeof(*handlep) - hsize);
+ return 0;
+}
+
+
+/* Given an inode, check if that inode resides in filesystem that supports
+ DMAPI. Returns zero if the inode is in a DMAPI filesystem, otherwise
+ returns an errno.
+*/
+
+int
+dm_check_dmapi_ip(
+ struct inode *ip)
+{
+ dm_handle_t handle;
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_ip_to_handle(ip, &handle)) != 0)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ return(0);
+}
+
+
+/* Return a pointer to the DM_EVENT_MOUNT event while a mount is still in
+ progress. This is only called by dm_get_config and dm_get_config_events
+ which need to access the filesystem during a mount but which don't have
+ a session and token to use.
+*/
+
+dm_tokevent_t *
+dm_find_mount_tevp_and_lock(
+ dm_fsid_t *fsidp,
+ unsigned long *lcp) /* address of returned lock cookie */
+{
+ dm_fsreg_t *fsrp;
+
+ if ((fsrp = dm_find_fsreg_and_lock(fsidp, lcp)) == NULL)
+ return(NULL);
+
+ if (!fsrp->fr_tevp || fsrp->fr_state != DM_STATE_MOUNTING) {
+ mutex_spinunlock(&fsrp->fr_lock, *lcp);
+ return(NULL);
+ }
+ nested_spinlock(&fsrp->fr_tevp->te_lock);
+ nested_spinunlock(&fsrp->fr_lock);
+ return(fsrp->fr_tevp);
+}
+
+
+/* Wait interruptibly until a session registers disposition for 'event' in
+ filesystem 'sb'. Upon successful exit, both the filesystem's dm_fsreg_t
+ structure and the session's dm_session_t structure are locked. The caller
+ is responsible for unlocking both structures using the returned cookies.
+
+ Warning: The locks can be dropped in any order, but the 'lc2p' cookie MUST
+ BE USED FOR THE FIRST UNLOCK, and the lc1p cookie must be used for the
+ second unlock. If this is not done, the CPU will be interruptible while
+ holding a mutex, which could deadlock the machine!
+*/
+
+static int
+dm_waitfor_disp(
+ struct super_block *sb,
+ dm_tokevent_t *tevp,
+ dm_fsreg_t **fsrpp,
+ unsigned long *lc1p, /* addr of first returned lock cookie */
+ dm_session_t **sessionpp,
+ unsigned long *lc2p) /* addr of 2nd returned lock cookie */
+{
+ dm_eventtype_t event = tevp->te_msg.ev_type;
+ dm_session_t *s;
+ dm_fsreg_t *fsrp;
+ dm_fsid_t fsid;
+ struct filesystem_dmapi_operations *dops;
+
+ dops = dm_fsys_ops(sb);
+ ASSERT(dops);
+
+ dops->get_fsid(sb, &fsid);
+ if ((fsrp = dm_find_fsreg_and_lock(&fsid, lc1p)) == NULL)
+ return -ENOENT;
+
+ /* If no session is registered for this event in the specified
+ filesystem, then sleep interruptibly until one does.
+ */
+
+ for (;;) {
+ int rc = 0;
+
+ /* The dm_find_session_and_lock() call is needed because a
+ session that is in the process of being removed might still
+ be in the dm_fsreg_t structure but won't be in the
+ dm_sessions list.
+ */
+
+ if ((s = fsrp->fr_sessp[event]) != NULL &&
+ dm_find_session_and_lock(s->sn_sessid, &s, lc2p) == 0) {
+ break;
+ }
+
+ /* Noone is currently registered. DM_EVENT_UNMOUNT events
+ don't wait for anyone to register because the unmount is
+ already past the point of no return.
+ */
+
+ if (event == DM_EVENT_UNMOUNT) {
+ mutex_spinunlock(&fsrp->fr_lock, *lc1p);
+ return -ENOENT;
+ }
+
+ /* Wait until a session registers for disposition of this
+ event.
+ */
+
+ fsrp->fr_dispcnt++;
+ dm_link_event(tevp, &fsrp->fr_evt_dispq);
+
+ sv_wait_sig(&fsrp->fr_dispq, 1, &fsrp->fr_lock, *lc1p);
+ rc = signal_pending(current);
+
+ *lc1p = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_dispcnt--;
+ dm_unlink_event(tevp, &fsrp->fr_evt_dispq);
+#ifdef HAVE_DM_QUEUE_FLUSH
+ if (tevp->te_flags & DM_TEF_FLUSH) {
+ mutex_spinunlock(&fsrp->fr_lock, *lc1p);
+ return tevp->te_reply;
+ }
+#endif /* HAVE_DM_QUEUE_FLUSH */
+ if (rc) { /* if signal was received */
+ mutex_spinunlock(&fsrp->fr_lock, *lc1p);
+ return -EINTR;
+ }
+ }
+ *sessionpp = s;
+ *fsrpp = fsrp;
+ return 0;
+}
+
+
+/* Returns the session pointer for the session registered for an event
+ in the given sb. If successful, the session is locked upon return. The
+ caller is responsible for releasing the lock. If no session is currently
+ registered for the event, dm_waitfor_disp_session() will sleep interruptibly
+ until a registration occurs.
+*/
+
+int
+dm_waitfor_disp_session(
+ struct super_block *sb,
+ dm_tokevent_t *tevp,
+ dm_session_t **sessionpp,
+ unsigned long *lcp)
+{
+ dm_fsreg_t *fsrp;
+ unsigned long lc2;
+ int error;
+
+ if (tevp->te_msg.ev_type < 0 || tevp->te_msg.ev_type > DM_EVENT_MAX)
+ return(-EIO);
+
+ error = dm_waitfor_disp(sb, tevp, &fsrp, lcp, sessionpp, &lc2);
+ if (!error)
+ mutex_spinunlock(&fsrp->fr_lock, lc2); /* rev. cookie order*/
+ return(error);
+}
+
+
+/* Find the session registered for the DM_EVENT_DESTROY event on the specified
+ filesystem, sleeping if necessary until registration occurs. Once found,
+ copy the session's return-on-destroy attribute name, if any, back to the
+ caller.
+*/
+
+int
+dm_waitfor_destroy_attrname(
+ struct super_block *sbp,
+ dm_attrname_t *attrnamep)
+{
+ dm_tokevent_t *tevp;
+ dm_session_t *s;
+ dm_fsreg_t *fsrp;
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+ void *msgp;
+
+ tevp = dm_evt_create_tevp(DM_EVENT_DESTROY, 1, (void**)&msgp);
+ error = dm_waitfor_disp(sbp, tevp, &fsrp, &lc1, &s, &lc2);
+ if (!error) {
+ *attrnamep = fsrp->fr_rattr; /* attribute or zeros */
+ mutex_spinunlock(&s->sn_qlock, lc2); /* rev. cookie order */
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ }
+ dm_evt_rele_tevp(tevp,0);
+ return(error);
+}
+
+
+/* Unregisters the session for the disposition of all events on all
+ filesystems. This routine is not called until the session has been
+ dequeued from the session list and its session lock has been dropped,
+ but before the actual structure is freed, so it is safe to grab the
+ 'dm_reg_lock' here. If dm_waitfor_disp_session() happens to be called
+ by another thread, it won't find this session on the session list and
+ will wait until a new session registers.
+*/
+
+void
+dm_clear_fsreg(
+ dm_session_t *s)
+{
+ dm_fsreg_t *fsrp;
+ int event;
+ unsigned long lc; /* lock cookie */
+
+ lc = mutex_spinlock(&dm_reg_lock);
+
+ for (fsrp = dm_registers; fsrp != NULL; fsrp = fsrp->fr_next) {
+ nested_spinlock(&fsrp->fr_lock);
+ for (event = 0; event < DM_EVENT_MAX; event++) {
+ if (fsrp->fr_sessp[event] != s)
+ continue;
+ fsrp->fr_sessp[event] = NULL;
+ if (event == DM_EVENT_DESTROY)
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ }
+ nested_spinunlock(&fsrp->fr_lock);
+ }
+
+ mutex_spinunlock(&dm_reg_lock, lc);
+}
+
+
+/*
+ * Return the handle for the object named by path.
+ */
+
+int
+dm_path_to_hdl(
+ char __user *path, /* any path name */
+ void __user *hanp, /* user's data buffer */
+ size_t __user *hlenp) /* set to size of data copied */
+{
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ dm_handle_t handle;
+ size_t hlen;
+ int error;
+ unsigned long lc; /* lock cookie */
+ struct nameidata nd;
+ struct inode *inode;
+ size_t len;
+ char *name;
+ struct filesystem_dmapi_operations *dops;
+
+ /* XXX get things straightened out so getname() works here? */
+ if (!(len = strnlen_user(path, PATH_MAX)))
+ return(-EFAULT);
+ if (len == 1)
+ return(-ENOENT);
+ if (len > PATH_MAX)
+ return(-ENAMETOOLONG);
+ name = kmalloc(len, GFP_KERNEL);
+ if (name == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+ if (copy_from_user(name, path, len)) {
+ kfree(name);
+ return(-EFAULT);
+ }
+
+ error = path_lookup(name, LOOKUP_POSITIVE, &nd);
+ kfree(name);
+ if (error)
+ return error;
+
+ ASSERT(nd.path.dentry);
+ ASSERT(nd.path.dentry->d_inode);
+ inode = igrab(nd.path.dentry->d_inode);
+ path_put(&nd.path);
+
+ dops = dm_fsys_ops(inode->i_sb);
+ if (dops == NULL) {
+ /* No longer in a dmapi-capable filesystem...Toto */
+ iput(inode);
+ return -EINVAL;
+ }
+
+ /* we need the inode */
+ error = dm_ip_to_handle(inode, &handle);
+ iput(inode);
+ if (error)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ hlen = DM_HSIZE(handle);
+
+ if (copy_to_user(hanp, &handle, (int)hlen))
+ return(-EFAULT);
+ if (put_user(hlen,hlenp))
+ return(-EFAULT);
+ return 0;
+}
+
+
+/*
+ * Return the handle for the file system containing the object named by path.
+ */
+
+int
+dm_path_to_fshdl(
+ char __user *path, /* any path name */
+ void __user *hanp, /* user's data buffer */
+ size_t __user *hlenp) /* set to size of data copied */
+{
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ dm_handle_t handle;
+ size_t hlen;
+ int error;
+ unsigned long lc; /* lock cookie */
+ struct nameidata nd;
+ struct inode *inode;
+ size_t len;
+ char *name;
+ struct filesystem_dmapi_operations *dops;
+
+ /* XXX get things straightened out so getname() works here? */
+ if(!(len = strnlen_user(path, PATH_MAX)))
+ return(-EFAULT);
+ if (len == 1)
+ return(-ENOENT);
+ if (len > PATH_MAX)
+ return(-ENAMETOOLONG);
+ name = kmalloc(len, GFP_KERNEL);
+ if (name == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+ if (copy_from_user(name, path, len)) {
+ kfree(name);
+ return(-EFAULT);
+ }
+
+ error = path_lookup(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd);
+ kfree(name);
+ if (error)
+ return error;
+
+ ASSERT(nd.path.dentry);
+ ASSERT(nd.path.dentry->d_inode);
+
+ inode = igrab(nd.path.dentry->d_inode);
+ path_put(&nd.path);
+
+ dops = dm_fsys_ops(inode->i_sb);
+ if (dops == NULL) {
+ /* No longer in a dmapi-capable filesystem...Toto */
+ iput(inode);
+ return -EINVAL;
+ }
+
+ error = dm_ip_to_handle(inode, &handle);
+ iput(inode);
+
+ if (error)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ hlen = DM_FSHSIZE;
+ if(copy_to_user(hanp, &handle, (int)hlen))
+ return(-EFAULT);
+ if(put_user(hlen,hlenp))
+ return(-EFAULT);
+ return 0;
+}
+
+
+int
+dm_fd_to_hdl(
+ int fd, /* any file descriptor */
+ void __user *hanp, /* user's data buffer */
+ size_t __user *hlenp) /* set to size of data copied */
+{
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ dm_handle_t handle;
+ size_t hlen;
+ int error;
+ unsigned long lc; /* lock cookie */
+ struct file *filep = fget(fd);
+ struct inode *ip = filep->f_dentry->d_inode;
+
+ if (!filep)
+ return(-EBADF);
+ if ((error = dm_ip_to_handle(ip, &handle)) != 0)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ hlen = DM_HSIZE(handle);
+ if (copy_to_user(hanp, &handle, (int)hlen))
+ return(-EFAULT);
+ fput(filep);
+ if(put_user(hlen, hlenp))
+ return(-EFAULT);
+ return 0;
+}
+
+
+/* Enable events on an object. */
+
+int
+dm_set_eventlist(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t __user *eventsetp,
+ u_int maxevent)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_eventset_t eventset;
+ dm_tokdata_t *tdp;
+ int error;
+
+ if (copy_from_user(&eventset, eventsetp, sizeof(eventset)))
+ return(-EFAULT);
+
+ /* Do some minor sanity checking. */
+
+ if (maxevent == 0 || maxevent > DM_EVENT_MAX)
+ return(-EINVAL);
+
+ /* Access the specified object. */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_eventlist(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0),
+ &eventset, maxevent);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/* Return the list of enabled events for an object. */
+
+int
+dm_get_eventlist(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_eventset_t __user *eventsetp,
+ u_int __user *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ dm_eventset_t eventset;
+ u_int elem;
+ int error;
+
+ if (nelem == 0)
+ return(-EINVAL);
+
+ /* Access the specified object. */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* Get the object's event list. */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_eventlist(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0),
+ nelem, &eventset, &elem);
+
+ dm_app_put_tdp(tdp);
+
+ if (error)
+ return(error);
+
+ if (copy_to_user(eventsetp, &eventset, sizeof(eventset)))
+ return(-EFAULT);
+ if (put_user(nelem, nelemp))
+ return(-EFAULT);
+ return(0);
+}
+
+
+/* Register for disposition of events. The handle must either be the
+ global handle or must be the handle of a file system. The list of events
+ is pointed to by eventsetp.
+*/
+
+int
+dm_set_disp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t __user *eventsetp,
+ u_int maxevent)
+{
+ dm_session_t *s;
+ dm_fsreg_t *fsrp;
+ dm_tokdata_t *tdp;
+ dm_eventset_t eventset;
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+ u_int i;
+
+ /* Copy in and validate the event mask. Only the lower maxevent bits
+ are meaningful, so clear any bits set above maxevent.
+ */
+
+ if (maxevent == 0 || maxevent > DM_EVENT_MAX)
+ return(-EINVAL);
+ if (copy_from_user(&eventset, eventsetp, sizeof(eventset)))
+ return(-EFAULT);
+ eventset &= (1 << maxevent) - 1;
+
+ /* If the caller specified the global handle, then the only valid token
+ is DM_NO_TOKEN, and the only valid event in the event mask is
+ DM_EVENT_MOUNT. If it is set, add the session to the list of
+ sessions that want to receive mount events. If it is clear, remove
+ the session from the list. Since DM_EVENT_MOUNT events never block
+ waiting for a session to register, there is noone to wake up if we
+ do add the session to the list.
+ */
+
+ if (DM_GLOBALHAN(hanp, hlen)) {
+ if (token != DM_NO_TOKEN)
+ return(-EINVAL);
+ if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0)
+ return(error);
+ if (eventset == 0) {
+ s->sn_flags &= ~DM_SN_WANTMOUNT;
+ error = 0;
+ } else if (eventset == 1 << DM_EVENT_MOUNT) {
+ s->sn_flags |= DM_SN_WANTMOUNT;
+ error = 0;
+ } else {
+ error = -EINVAL;
+ }
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ return(error);
+ }
+
+ /* Since it's not the global handle, it had better be a filesystem
+ handle. Verify that the first 'maxevent' events in the event list
+ are all valid for a filesystem handle.
+ */
+
+ if (eventset & ~DM_VALID_DISP_EVENTS)
+ return(-EINVAL);
+
+ /* Verify that the session is valid, that the handle is a filesystem
+ handle, and that the filesystem is capable of sending events. (If
+ a dm_fsreg_t structure exists, then the filesystem can issue events.)
+ */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1);
+ if (fsrp == NULL) {
+ dm_app_put_tdp(tdp);
+ return(-EINVAL);
+ }
+
+ /* Now that we own 'fsrp->fr_lock', get the lock on the session so that
+ it can't disappear while we add it to the filesystem's event mask.
+ */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ dm_app_put_tdp(tdp);
+ return(error);
+ }
+
+ /* Update the event disposition array for this filesystem, adding
+ and/or removing the session as appropriate. If this session is
+ dropping registration for DM_EVENT_DESTROY, or is overriding some
+ other session's registration for DM_EVENT_DESTROY, then clear any
+ any attr-on-destroy attribute name also.
+ */
+
+ for (i = 0; i < DM_EVENT_MAX; i++) {
+ if (DMEV_ISSET(i, eventset)) {
+ if (i == DM_EVENT_DESTROY && fsrp->fr_sessp[i] != s)
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ fsrp->fr_sessp[i] = s;
+ } else if (fsrp->fr_sessp[i] == s) {
+ if (i == DM_EVENT_DESTROY)
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ fsrp->fr_sessp[i] = NULL;
+ }
+ }
+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
+
+ /* Wake up all processes waiting for a disposition on this filesystem
+ in case any of them happen to be waiting for an event which we just
+ added.
+ */
+
+ if (fsrp->fr_dispcnt)
+ sv_broadcast(&fsrp->fr_dispq);
+
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+
+ dm_app_put_tdp(tdp);
+ return(0);
+}
+
+
+/*
+ * Register a specific attribute name with a filesystem. The value of
+ * the attribute is to be returned with an asynchronous destroy event.
+ */
+
+int
+dm_set_return_on_destroy(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t __user *attrnamep,
+ dm_boolean_t enable)
+{
+ dm_attrname_t attrname;
+ dm_tokdata_t *tdp;
+ dm_fsreg_t *fsrp;
+ dm_session_t *s;
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+
+ /* If a dm_attrname_t is provided, copy it in and validate it. */
+
+ if (enable && (error = copy_from_user(&attrname, attrnamep, sizeof(attrname))) != 0)
+ return(error);
+
+ /* Validate the filesystem handle and use it to get the filesystem's
+ disposition structure.
+ */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1);
+ if (fsrp == NULL) {
+ dm_app_put_tdp(tdp);
+ return(-EINVAL);
+ }
+
+ /* Now that we own 'fsrp->fr_lock', get the lock on the session so that
+ it can't disappear while we add it to the filesystem's event mask.
+ */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ dm_app_put_tdp(tdp);
+ return(error);
+ }
+
+ /* A caller cannot disable return-on-destroy if he is not registered
+ for DM_EVENT_DESTROY. Enabling return-on-destroy is an implicit
+ dm_set_disp() for DM_EVENT_DESTROY; we wake up all processes
+ waiting for a disposition in case any was waiting for a
+ DM_EVENT_DESTROY event.
+ */
+
+ error = 0;
+ if (enable) {
+ fsrp->fr_sessp[DM_EVENT_DESTROY] = s;
+ fsrp->fr_rattr = attrname;
+ if (fsrp->fr_dispcnt)
+ sv_broadcast(&fsrp->fr_dispq);
+ } else if (fsrp->fr_sessp[DM_EVENT_DESTROY] != s) {
+ error = -EINVAL;
+ } else {
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ }
+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_mountinfo(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp)
+{
+ dm_fsreg_t *fsrp;
+ dm_tokdata_t *tdp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Make sure that the caller's buffer is 8-byte aligned. */
+
+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Verify that the handle is a filesystem handle, and that the
+ filesystem is capable of sending events. If not, return an error.
+ */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* Find the filesystem entry. This should always succeed as the
+ dm_app_get_tdp call created a filesystem reference. Once we find
+ the entry, drop the lock. The mountinfo message is never modified,
+ the filesystem entry can't disappear, and we don't want to hold a
+ spinlock while doing copyout calls.
+ */
+
+ fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc);
+ if (fsrp == NULL) {
+ dm_app_put_tdp(tdp);
+ return(-EINVAL);
+ }
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ /* Copy the message into the user's buffer and update his 'rlenp'. */
+
+ if (put_user(fsrp->fr_msgsize, rlenp)) {
+ error = -EFAULT;
+ } else if (fsrp->fr_msgsize > buflen) { /* user buffer not big enough */
+ error = -E2BIG;
+ } else if (copy_to_user(bufp, fsrp->fr_msg, fsrp->fr_msgsize)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_getall_disp(
+ dm_sessid_t sid,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp)
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+ int totalsize;
+ int msgsize;
+ int fsyscnt;
+ dm_dispinfo_t *prevmsg;
+ dm_fsreg_t *fsrp;
+ int error;
+ char *kbuf;
+
+ int tmp3;
+ int tmp4;
+
+ /* Because the dm_getall_disp structure contains a __u64 field,
+ make sure that the buffer provided by the caller is aligned so
+ that he can read such fields successfully.
+ */
+
+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Compute the size of a dm_dispinfo structure, rounding up to an
+ 8-byte boundary so that any subsequent structures will also be
+ aligned.
+ */
+
+#if 0
+ /* XXX ug, what is going on here? */
+ msgsize = (sizeof(dm_dispinfo_t) + DM_FSHSIZE + sizeof(uint64_t) - 1) &
+ ~(sizeof(uint64_t) - 1);
+#else
+ tmp3 = sizeof(dm_dispinfo_t) + DM_FSHSIZE;
+ tmp3 += sizeof(__u64);
+ tmp3 -= 1;
+ tmp4 = ~((int)sizeof(__u64) - 1);
+ msgsize = tmp3 & tmp4;
+#endif
+
+ /* Loop until we can get the right amount of temp space, being careful
+ not to hold a mutex during the allocation. Usually only one trip.
+ */
+
+ for (;;) {
+ if ((fsyscnt = dm_fsys_cnt) == 0) {
+ /*if (dm_cpoutsizet(rlenp, 0))*/
+ if (put_user(0,rlenp))
+ return(-EFAULT);
+ return(0);
+ }
+ kbuf = kmalloc(fsyscnt * msgsize, GFP_KERNEL);
+ if (kbuf == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ lc1 = mutex_spinlock(&dm_reg_lock);
+ if (fsyscnt == dm_fsys_cnt)
+ break;
+
+ mutex_spinunlock(&dm_reg_lock, lc1);
+ kfree(kbuf);
+ }
+
+ /* Find the indicated session and lock it. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
+ mutex_spinunlock(&dm_reg_lock, lc1);
+ kfree(kbuf);
+ return(error);
+ }
+
+ /* Create a dm_dispinfo structure for each filesystem in which
+ this session has at least one event selected for disposition.
+ */
+
+ totalsize = 0; /* total bytes to transfer to the user */
+ prevmsg = NULL;
+
+ for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) {
+ dm_dispinfo_t *disp;
+ int event;
+ int found;
+
+ disp = (dm_dispinfo_t *)(kbuf + totalsize);
+
+ DMEV_ZERO(disp->di_eventset);
+
+ for (event = 0, found = 0; event < DM_EVENT_MAX; event++) {
+ if (fsrp->fr_sessp[event] != s)
+ continue;
+ DMEV_SET(event, disp->di_eventset);
+ found++;
+ }
+ if (!found)
+ continue;
+
+ disp->_link = 0;
+ disp->di_fshandle.vd_offset = sizeof(dm_dispinfo_t);
+ disp->di_fshandle.vd_length = DM_FSHSIZE;
+
+ memcpy((char *)disp + disp->di_fshandle.vd_offset,
+ &fsrp->fr_fsid, disp->di_fshandle.vd_length);
+
+ if (prevmsg)
+ prevmsg->_link = msgsize;
+
+ prevmsg = disp;
+ totalsize += msgsize;
+ }
+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
+ mutex_spinunlock(&dm_reg_lock, lc1);
+
+ if (put_user(totalsize, rlenp)) {
+ error = -EFAULT;
+ } else if (totalsize > buflen) { /* no more room */
+ error = -E2BIG;
+ } else if (totalsize && copy_to_user(bufp, kbuf, totalsize)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+
+ kfree(kbuf);
+ return(error);
+}
+
+int
+dm_open_by_handle_rvp(
+ unsigned int fd,
+ void __user *hanp,
+ size_t hlen,
+ int flags,
+ int *rvp)
+{
+ const struct cred *cred = current_cred();
+ dm_handle_t handle;
+ int error;
+ short td_type;
+ struct dentry *dentry;
+ struct inode *inodep;
+ int new_fd;
+ struct file *mfilp;
+ struct file *filp;
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0) {
+ return(error);
+ }
+
+ if ((inodep = dm_handle_to_ip(&handle, &td_type)) == NULL) {
+ return(-EBADF);
+ }
+ if ((td_type == DM_TDT_VFS) || (td_type == DM_TDT_OTH)) {
+ iput(inodep);
+ return(-EBADF);
+ }
+
+ if ((new_fd = get_unused_fd()) < 0) {
+ iput(inodep);
+ return(-EMFILE);
+ }
+
+ dentry = d_obtain_alias(inodep);
+ if (dentry == NULL) {
+ iput(inodep);
+ put_unused_fd(new_fd);
+ return(-ENOMEM);
+ }
+
+ mfilp = fget(fd);
+ if (!mfilp) {
+ dput(dentry);
+ put_unused_fd(new_fd);
+ return(-EBADF);
+ }
+
+ mntget(mfilp->f_vfsmnt);
+
+ /* Create file pointer */
+ filp = dentry_open(dentry, mfilp->f_vfsmnt, flags, cred);
+ if (IS_ERR(filp)) {
+ put_unused_fd(new_fd);
+ fput(mfilp);
+ return PTR_ERR(filp);
+ }
+
+ if (td_type == DM_TDT_REG)
+ filp->f_mode |= FMODE_NOCMTIME;
+
+ fd_install(new_fd, filp);
+ fput(mfilp);
+ *rvp = new_fd;
+ return 0;
+}
+
+
+#ifdef HAVE_DM_QUEUE_FLUSH
+/* Find the threads that have a reference to our filesystem and force
+ them to return with the specified errno.
+ We look for them in each dm_fsreg_t's fr_evt_dispq.
+*/
+
+int
+dm_release_disp_threads(
+ dm_fsid_t *fsidp,
+ struct inode *inode, /* may be null */
+ int errno)
+{
+ unsigned long lc;
+ dm_fsreg_t *fsrp;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp;
+ dm_eventq_t *queue;
+ int found_events = 0;
+
+ if ((fsrp = dm_find_fsreg_and_lock(fsidp, &lc)) == NULL){
+ return 0;
+ }
+
+ queue = &fsrp->fr_evt_dispq;
+ for (tevp = queue->eq_head; tevp; tevp = tevp->te_next) {
+ nested_spinlock(&tevp->te_lock);
+ if (inode) {
+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
+ if( tdp->td_ip == inode ) {
+ tevp->te_flags |= DM_TEF_FLUSH;
+ tevp->te_reply = errno;
+ found_events = 1;
+ break;
+ }
+ }
+ }
+ else {
+ tevp->te_flags |= DM_TEF_FLUSH;
+ tevp->te_reply = errno;
+ found_events = 1;
+ }
+ nested_spinunlock(&tevp->te_lock);
+ }
+
+ if (found_events && fsrp->fr_dispcnt)
+ sv_broadcast(&fsrp->fr_dispq);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ return 0;
+}
+#endif /* HAVE_DM_QUEUE_FLUSH */
--- /dev/null
+++ b/fs/dmapi/dmapi_right.c
@@ -0,0 +1,1256 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#include <asm/uaccess.h>
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+
+#define DM_FG_STHREAD 0x001 /* keep other threads from using tdp */
+#define DM_FG_MUSTEXIST 0x002 /* handle must exist in the event */
+#define DM_FG_DONTADD 0x004 /* don't add handle if not in event */
+
+/* Get a handle of the form (void *, size_t) from user space and convert it to
+ a handle_t. Do as much validation of the result as possible; any error
+ other than a bad address should return EBADF per the DMAPI spec.
+*/
+
+int
+dm_copyin_handle(
+ void __user *hanp, /* input, handle data */
+ size_t hlen, /* input, size of handle data */
+ dm_handle_t *handlep) /* output, copy of data */
+{
+ u_short len;
+ dm_fid_t *fidp;
+
+ fidp = (dm_fid_t*)&handlep->ha_fid;
+
+ if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
+ return -EBADF;
+
+ if (copy_from_user(handlep, hanp, hlen))
+ return -EFAULT;
+
+ if (hlen < sizeof(*handlep))
+ memset((char *)handlep + hlen, 0, sizeof(*handlep) - hlen);
+
+ if (hlen == sizeof(handlep->ha_fsid))
+ return 0; /* FS handle, nothing more to check */
+
+ len = hlen - sizeof(handlep->ha_fsid) - sizeof(fidp->dm_fid_len);
+
+ if ((fidp->dm_fid_len != len) || fidp->dm_fid_pad)
+ return -EBADF;
+ return 0;
+}
+
+/* Allocate and initialize a tevp structure. Called from both application and
+ event threads.
+*/
+
+static dm_tokevent_t *
+dm_init_tevp(
+ int ev_size, /* size of event structure */
+ int var_size) /* size of variable-length data */
+{
+ dm_tokevent_t *tevp;
+ int msgsize;
+
+ /* Calculate the size of the event in bytes and allocate memory for it.
+ Zero all but the variable portion of the message, which will be
+ eventually overlaid by the caller with data.
+ */
+
+ msgsize = offsetof(dm_tokevent_t, te_event) + ev_size + var_size;
+ tevp = kmalloc(msgsize, GFP_KERNEL);
+ if (tevp == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+ memset(tevp, 0, msgsize - var_size);
+
+ /* Now initialize all the non-zero fields. */
+
+ spinlock_init(&tevp->te_lock, "te_lock");
+ sv_init(&tevp->te_evt_queue, SV_DEFAULT, "te_evt_queue");
+ sv_init(&tevp->te_app_queue, SV_DEFAULT, "te_app_queue");
+ tevp->te_allocsize = msgsize;
+ tevp->te_msg.ev_type = DM_EVENT_INVALID;
+ tevp->te_flags = 0;
+
+ return(tevp);
+}
+
+
+/* Given the event type and the number of bytes of variable length data that
+ will follow the event, dm_evt_create_tevp() creates a dm_tokevent_t
+ structure to hold the event and initializes all the common event fields.
+
+ No locking is required for this routine because the caller is an event
+ thread, and is therefore the only thread that can see the event.
+*/
+
+dm_tokevent_t *
+dm_evt_create_tevp(
+ dm_eventtype_t event,
+ int variable_size,
+ void **msgpp)
+{
+ dm_tokevent_t *tevp;
+ int evsize;
+
+ switch (event) {
+ case DM_EVENT_READ:
+ case DM_EVENT_WRITE:
+ case DM_EVENT_TRUNCATE:
+ evsize = sizeof(dm_data_event_t);
+ break;
+
+ case DM_EVENT_DESTROY:
+ evsize = sizeof(dm_destroy_event_t);
+ break;
+
+ case DM_EVENT_MOUNT:
+ evsize = sizeof(dm_mount_event_t);
+ break;
+
+ case DM_EVENT_PREUNMOUNT:
+ case DM_EVENT_UNMOUNT:
+ case DM_EVENT_NOSPACE:
+ case DM_EVENT_CREATE:
+ case DM_EVENT_REMOVE:
+ case DM_EVENT_RENAME:
+ case DM_EVENT_SYMLINK:
+ case DM_EVENT_LINK:
+ case DM_EVENT_POSTCREATE:
+ case DM_EVENT_POSTREMOVE:
+ case DM_EVENT_POSTRENAME:
+ case DM_EVENT_POSTSYMLINK:
+ case DM_EVENT_POSTLINK:
+ case DM_EVENT_ATTRIBUTE:
+ case DM_EVENT_DEBUT: /* currently not supported */
+ case DM_EVENT_CLOSE: /* currently not supported */
+ evsize = sizeof(dm_namesp_event_t);
+ break;
+
+ case DM_EVENT_CANCEL: /* currently not supported */
+ evsize = sizeof(dm_cancel_event_t);
+ break;
+
+ case DM_EVENT_USER:
+ evsize = 0;
+ break;
+
+ default:
+ panic("dm_create_tevp: called with unknown event type %d\n",
+ event);
+ }
+
+ /* Allocate and initialize an event structure of the correct size. */
+
+ tevp = dm_init_tevp(evsize, variable_size);
+ if (tevp == NULL)
+ return NULL;
+ tevp->te_evt_ref = 1;
+
+ /* Fields ev_token, ev_sequence, and _link are all filled in when the
+ event is queued onto a session. Initialize all other fields here.
+ */
+
+ tevp->te_msg.ev_type = event;
+ tevp->te_msg.ev_data.vd_offset = offsetof(dm_tokevent_t, te_event) -
+ offsetof(dm_tokevent_t, te_msg);
+ tevp->te_msg.ev_data.vd_length = evsize + variable_size;
+
+ /* Give the caller a pointer to the event-specific structure. */
+
+ *msgpp = ((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset);
+ return(tevp);
+}
+
+
+/* Given a pointer to an event (tevp) and a pointer to a handle_t, look for a
+ tdp structure within the event which contains the handle_t. Either verify
+ that the event contains the tdp, or optionally add the tdp to the
+ event. Called only from application threads.
+
+ On entry, tevp->te_lock is held; it is dropped prior to return.
+*/
+
+static int
+dm_app_lookup_tdp(
+ dm_handle_t *handlep, /* the handle we are looking for */
+ dm_tokevent_t *tevp, /* the event to search for the handle */
+ unsigned long *lcp, /* address of active lock cookie */
+ short types, /* acceptable object types */
+ dm_right_t right, /* minimum right the object must have */
+ u_int flags,
+ dm_tokdata_t **tdpp) /* if ! NULL, pointer to matching tdp */
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ struct inode *ip;
+ int error;
+
+ /* Bump the tevp application reference counter so that the event
+ can't disappear in case we have to drop the lock for a while.
+ */
+
+ tevp->te_app_ref++;
+ *tdpp = NULL; /* assume failure */
+
+ for (;;) {
+ /* Look for a matching tdp in the tevp. */
+
+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
+ if (DM_HANDLE_CMP(&tdp->td_handle, handlep) == 0)
+ break;
+ }
+
+ /* If the tdp exists, but either we need single-thread access
+ to the handle and can't get it, or some other thread already
+ has single-thread access, then sleep until we can try again.
+ */
+
+ if (tdp != NULL && tdp->td_app_ref &&
+ ((flags & DM_FG_STHREAD) ||
+ (tdp->td_flags & DM_TDF_STHREAD))) {
+ tevp->te_app_slp++;
+ sv_wait(&tevp->te_app_queue, 1,
+ &tevp->te_lock, *lcp);
+ *lcp = mutex_spinlock(&tevp->te_lock);
+ tevp->te_app_slp--;
+ continue;
+ }
+
+ if (tdp != NULL &&
+ (tdp->td_vcount > 0 || tdp->td_flags & DM_TDF_EVTREF)) {
+ /* We have an existing tdp with a non-zero inode
+ reference count. If it's the wrong type, return
+ an appropriate errno.
+ */
+
+ if (!(tdp->td_type & types)) {
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ dm_put_tevp(tevp, NULL); /* no destroy events */
+ return(-EOPNOTSUPP);
+ }
+
+ /* If the current access right isn't high enough,
+ complain.
+ */
+
+ if (tdp->td_right < right) {
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ dm_put_tevp(tevp, NULL); /* no destroy events */
+ return(-EACCES);
+ }
+
+ /* The handle is acceptable. Increment the tdp
+ application and inode references and mark the tdp
+ as single-threaded if necessary.
+ */
+
+ tdp->td_app_ref++;
+ if (flags & DM_FG_STHREAD)
+ tdp->td_flags |= DM_TDF_STHREAD;
+ tdp->td_vcount++;
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ (void)fsys_vector->obj_ref_hold(tdp->td_ip);
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ *tdpp = tdp;
+ return(0);
+ }
+
+ /* If the tdp is not in the tevp or does not have an inode
+ reference, check to make sure it is okay to add/update it.
+ */
+
+ if (flags & DM_FG_MUSTEXIST) {
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ dm_put_tevp(tevp, NULL); /* no destroy events */
+ return(-EACCES); /* i.e. an insufficient right */
+ }
+ if (flags & DM_FG_DONTADD) {
+ tevp->te_app_ref--;
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ return(0);
+ }
+
+ /* If a tdp structure doesn't yet exist, create one and link
+ it into the tevp. Drop the lock while we are doing this as
+ zallocs can go to sleep. Once we have the memory, make
+ sure that another thread didn't simultaneously add the same
+ handle to the same event. If so, toss ours and start over.
+ */
+
+ if (tdp == NULL) {
+ dm_tokdata_t *tmp;
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+
+ tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL);
+ if (tdp == NULL){
+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+ memset(tdp, 0, sizeof(*tdp));
+
+ *lcp = mutex_spinlock(&tevp->te_lock);
+
+ for (tmp = tevp->te_tdp; tmp; tmp = tmp->td_next) {
+ if (DM_HANDLE_CMP(&tmp->td_handle, handlep) == 0)
+ break;
+ }
+ if (tmp) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ continue;
+ }
+
+ tdp->td_next = tevp->te_tdp;
+ tevp->te_tdp = tdp;
+ tdp->td_tevp = tevp;
+ tdp->td_handle = *handlep;
+ }
+
+ /* Temporarily single-thread access to the tdp so that other
+ threads don't touch it while we are filling the rest of the
+ fields in.
+ */
+
+ tdp->td_app_ref = 1;
+ tdp->td_flags |= DM_TDF_STHREAD;
+
+ /* Drop the spinlock while we access, validate, and obtain the
+ proper rights to the object. This can take a very long time
+ if the inode is not in memory, if the filesystem is
+ unmounting, or if the request_right() call should block
+ because some other tdp or kernel thread is holding a right.
+ */
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+
+ if ((ip = dm_handle_to_ip(handlep, &tdp->td_type)) == NULL) {
+ error = -EBADF;
+ } else {
+ tdp->td_vcount = 1;
+ tdp->td_ip = ip;
+
+ /* The handle is usable. Check that the type of the
+ object matches one of the types that the caller
+ will accept.
+ */
+
+ if (!(types & tdp->td_type)) {
+ error = -EOPNOTSUPP;
+ } else if (right > DM_RIGHT_NULL) {
+ /* Attempt to get the rights required by the
+ caller. If rights can't be obtained, return
+ an error.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->request_right(tdp->td_ip,
+ DM_RIGHT_NULL,
+ (tdp->td_type == DM_TDT_VFS ?
+ DM_FSYS_OBJ : 0),
+ DM_RR_WAIT, right);
+ if (!error) {
+ tdp->td_right = right;
+ }
+ } else {
+ error = 0;
+ }
+ }
+ if (error != 0) {
+ dm_put_tevp(tevp, tdp); /* destroy event risk, although tiny */
+ return(error);
+ }
+
+ *lcp = mutex_spinlock(&tevp->te_lock);
+
+ /* Wake up any threads which may have seen our tdp while we
+ were filling it in.
+ */
+
+ if (!(flags & DM_FG_STHREAD)) {
+ tdp->td_flags &= ~DM_TDF_STHREAD;
+ if (tevp->te_app_slp)
+ sv_broadcast(&tevp->te_app_queue);
+ }
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ *tdpp = tdp;
+ return(0);
+ }
+}
+
+
+/* dm_app_get_tdp_by_token() is called whenever the application request
+ contains a session ID and contains a token other than DM_NO_TOKEN.
+ Most of the callers provide a right that is either DM_RIGHT_SHARED or
+ DM_RIGHT_EXCL, but a few of the callers such as dm_obj_ref_hold() may
+ specify a right of DM_RIGHT_NULL.
+*/
+
+static int
+dm_app_get_tdp_by_token(
+ dm_sessid_t sid, /* an existing session ID */
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token, /* an existing token */
+ short types, /* acceptable object types */
+ dm_right_t right, /* minimum right the object must have */
+ u_int flags,
+ dm_tokdata_t **tdpp)
+{
+ dm_tokevent_t *tevp;
+ dm_handle_t handle;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if (right < DM_RIGHT_NULL || right > DM_RIGHT_EXCL)
+ return(-EINVAL);
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
+ return(error);
+
+ /* Find and lock the event which corresponds to the specified
+ session/token pair.
+ */
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
+ return(error);
+
+ return(dm_app_lookup_tdp(&handle, tevp, &lc, types,
+ right, flags, tdpp));
+}
+
+
+/* Function dm_app_get_tdp() must ONLY be called from routines associated with
+ application calls, e.g. dm_read_invis, dm_set_disp, etc. It must not be
+ called by a thread responsible for generating an event such as
+ dm_send_data_event()!
+
+ dm_app_get_tdp() is the interface used by all application calls other than
+ dm_get_events, dm_respond_event, dm_get_config, dm_get_config_events, and by
+ the dm_obj_ref_* and dm_*_right families of requests.
+
+ dm_app_get_tdp() converts a sid/hanp/hlen/token quad into a tdp pointer,
+ increments the number of active application threads in the event, and
+ increments the number of active application threads using the tdp. The
+ 'right' parameter must be either DM_RIGHT_SHARED or DM_RIGHT_EXCL. The
+ token may either be DM_NO_TOKEN, or can be a token received in a synchronous
+ event.
+*/
+
+int
+dm_app_get_tdp(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ short types,
+ dm_right_t right, /* minimum right */
+ dm_tokdata_t **tdpp)
+{
+ dm_session_t *s;
+ dm_handle_t handle;
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ ASSERT(right >= DM_RIGHT_SHARED);
+
+ /* If a token other than DM_NO_TOKEN is specified, find the event on
+ this session which owns the token and increment its reference count.
+ */
+
+ if (token != DM_NO_TOKEN) { /* look up existing tokevent struct */
+ return(dm_app_get_tdp_by_token(sid, hanp, hlen, token, types,
+ right, DM_FG_MUSTEXIST, tdpp));
+ }
+
+ /* The token is DM_NO_TOKEN. In this case we only want to verify that
+ the session ID is valid, and do not need to continue holding the
+ session lock after we know that to be true.
+ */
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
+ return(error);
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* When DM_NO_TOKEN is used, we simply block until we can obtain the
+ right that we want (since the tevp contains no tdp structures).
+ The blocking when we eventually support it will occur within
+ fsys_vector->request_right().
+ */
+
+ tevp = dm_init_tevp(0, 0);
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ return(dm_app_lookup_tdp(&handle, tevp, &lc, types, right, 0, tdpp));
+}
+
+
+/* dm_get_config_tdp() is only called by dm_get_config() and
+ dm_get_config_events(), which neither have a session ID nor a token.
+ Both of these calls are supposed to work even if the filesystem is in the
+ process of being mounted, as long as the caller only uses handles within
+ the mount event.
+*/
+
+int
+dm_get_config_tdp(
+ void __user *hanp,
+ size_t hlen,
+ dm_tokdata_t **tdpp)
+{
+ dm_handle_t handle;
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
+ return(error);
+
+ tevp = dm_init_tevp(0, 0);
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ /* Try to use the handle provided by the caller and assume DM_NO_TOKEN.
+ This will fail if the filesystem is in the process of being mounted.
+ */
+
+ error = dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY,
+ DM_RIGHT_NULL, 0, tdpp);
+
+ if (!error) {
+ return(0);
+ }
+
+ /* Perhaps the filesystem is still mounting, in which case we need to
+ see if this is one of the handles in the DM_EVENT_MOUNT tevp.
+ */
+
+ if ((tevp = dm_find_mount_tevp_and_lock(&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+
+ return(dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY,
+ DM_RIGHT_NULL, DM_FG_MUSTEXIST, tdpp));
+}
+
+
+/* dm_put_tdp() is called to release any right held on the inode, and to
+ VN_RELE() all references held on the inode. It is the caller's
+ responsibility to ensure that no other application threads are using the
+ tdp, and if necessary to unlink the tdp from the tevp before calling
+ this routine and to free the tdp afterwards.
+*/
+
+static void
+dm_put_tdp(
+ dm_tokdata_t *tdp)
+{
+ ASSERT(tdp->td_app_ref <= 1);
+
+ /* If the application thread is holding a right, or if the event
+ thread had a right but it has disappeared because of a dm_pending
+ or Cntl-C, then we need to release it here.
+ */
+
+ if (tdp->td_right != DM_RIGHT_NULL) {
+ dm_fsys_vector_t *fsys_vector;
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ (void)fsys_vector->release_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
+ tdp->td_right = DM_RIGHT_NULL;
+ }
+
+ /* Given that we wouldn't be here if there was still an event thread,
+ this VN_RELE loop has the potential of generating a DM_EVENT_DESTROY
+ event if some other thread has unlinked the file.
+ */
+
+ while (tdp->td_vcount > 0) {
+ iput(tdp->td_ip);
+ tdp->td_vcount--;
+ }
+
+ tdp->td_flags &= ~(DM_TDF_HOLD|DM_TDF_RIGHT);
+ tdp->td_ip = NULL;
+}
+
+
+/* Function dm_put_tevp() must ONLY be called from routines associated with
+ application threads, e.g. dm_read_invis, dm_get_events, etc. It must not be
+ called by a thread responsible for generating an event, such as
+ dm_send_data_event.
+
+ PLEASE NOTE: It is possible for this routine to generate DM_EVENT_DESTROY
+ events, because its calls to dm_put_tdp drop inode references, and another
+ thread may have already unlinked a file whose inode we are de-referencing.
+ This sets the stage for various types of deadlock if the thread calling
+ dm_put_tevp is the same thread that calls dm_respond_event! In particular,
+ the dm_sent_destroy_event routine needs to obtain the dm_reg_lock,
+ dm_session_lock, and sn_qlock in order to queue the destroy event. No
+ caller of dm_put_tevp can hold any of these locks!
+
+ Other possible deadlocks are that dm_send_destroy_event could block waiting
+ for a thread to register for the event using dm_set_disp() and/or
+ dm_set_return_on_destroy, or it could block because the session's sn_newq
+ is at the dm_max_queued_msgs event limit. The only safe solution
+ (unimplemented) is to have a separate kernel thread for each filesystem
+ whose only job is to do the inode-dereferencing. That way dm_respond_event
+ will not block, so the application can keep calling dm_get_events to read
+ events even if the filesystem thread should block. (If the filesystem
+ thread blocks, so will all subsequent destroy events for the same
+ filesystem.)
+*/
+
+void
+dm_put_tevp(
+ dm_tokevent_t *tevp,
+ dm_tokdata_t *tdp)
+{
+ int free_tdp = 0;
+ unsigned long lc; /* lock cookie */
+
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ if (tdp != NULL) {
+ if (tdp->td_vcount > 1 || (tdp->td_flags & DM_TDF_EVTREF)) {
+ ASSERT(tdp->td_app_ref > 0);
+
+ iput(tdp->td_ip);
+ tdp->td_vcount--;
+ } else {
+ ASSERT(tdp->td_app_ref == 1);
+
+ /* The inode reference count is either already at
+ zero (e.g. a failed dm_handle_to_ip() call in
+ dm_app_lookup_tdp()) or is going to zero. We can't
+ hold the lock while we decrement the count because
+ we could potentially end up being busy for a long
+ time in VOP_INACTIVATE. Use single-threading to
+ lock others out while we clean house.
+ */
+
+ tdp->td_flags |= DM_TDF_STHREAD;
+
+ /* WARNING - A destroy event is possible here if we are
+ giving up the last reference on an inode which has
+ been previously unlinked by some other thread!
+ */
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+ dm_put_tdp(tdp);
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ /* If this tdp is not one of the original tdps in the
+ event, then remove it from the tevp.
+ */
+
+ if (!(tdp->td_flags & DM_TDF_ORIG)) {
+ dm_tokdata_t **tdpp = &tevp->te_tdp;
+
+ while (*tdpp && *tdpp != tdp) {
+ tdpp = &(*tdpp)->td_next;
+ }
+ if (*tdpp == NULL) {
+ panic("dm_remove_tdp_from_tevp: tdp "
+ "%p not in tevp %p\n", tdp,
+ tevp);
+ }
+ *tdpp = tdp->td_next;
+ free_tdp++;
+ }
+ }
+
+ /* If this is the last app thread actively using the tdp, clear
+ any single-threading and wake up any other app threads who
+ might be waiting to use this tdp, single-threaded or
+ otherwise.
+ */
+
+ if (--tdp->td_app_ref == 0) {
+ if (tdp->td_flags & DM_TDF_STHREAD) {
+ tdp->td_flags &= ~DM_TDF_STHREAD;
+ if (tevp->te_app_slp)
+ sv_broadcast(&tevp->te_app_queue);
+ }
+ }
+
+ if (free_tdp) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ }
+ }
+
+ /* If other application threads are using this token/event, they will
+ do the cleanup.
+ */
+
+ if (--tevp->te_app_ref > 0) {
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return;
+ }
+
+ /* If event generation threads are waiting for this thread to go away,
+ wake them up and let them do the cleanup.
+ */
+
+ if (tevp->te_evt_ref > 0) {
+ sv_broadcast(&tevp->te_evt_queue);
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return;
+ }
+
+ /* This thread is the last active thread using the token/event. No
+ lock can be held while we disassemble the tevp because we could
+ potentially end up being busy for a long time in VOP_INACTIVATE.
+ */
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ /* WARNING - One or more destroy events are possible here if we are
+ giving up references on inodes which have been previously unlinked
+ by other kernel threads!
+ */
+
+ while ((tdp = tevp->te_tdp) != NULL) {
+ tevp->te_tdp = tdp->td_next;
+ dm_put_tdp(tdp);
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ }
+ spinlock_destroy(&tevp->te_lock);
+ sv_destroy(&tevp->te_evt_queue);
+ sv_destroy(&tevp->te_app_queue);
+ kfree(tevp);
+}
+
+
+/* No caller of dm_app_put_tevp can hold either of the locks dm_reg_lock,
+ dm_session_lock, or any sn_qlock! (See dm_put_tevp for details.)
+*/
+
+void
+dm_app_put_tdp(
+ dm_tokdata_t *tdp)
+{
+ dm_put_tevp(tdp->td_tevp, tdp);
+}
+
+
+/* dm_change_right is only called if the event thread is the one doing the
+ cleanup on a completed event. It looks at the current rights of a tdp
+ and compares that with the rights it had on the tdp when the event was
+ created. If different, it reaquires the original rights, then transfers
+ the rights back to being thread-based.
+*/
+
+static void
+dm_change_right(
+ dm_tokdata_t *tdp)
+{
+#ifdef HAVE_DMAPI_RIGHTS
+ dm_fsys_vector_t *fsys_vector;
+ int error;
+ u_int type;
+#endif
+
+ /* If the event doesn't have an inode reference, if the original right
+ was DM_RIGHT_NULL, or if the rights were never switched from being
+ thread-based to tdp-based, then there is nothing to do.
+ */
+
+ if (!(tdp->td_flags & DM_TDF_EVTREF))
+ return;
+
+ if (tdp->td_orig_right == DM_RIGHT_NULL)
+ return;
+
+ /* DEBUG - Need a check here for event-based rights. */
+
+#ifdef HAVE_DMAPI_RIGHTS
+ /* The "rights" vectors are stubs now anyway. When they are
+ * implemented then bhv locking will have to be sorted out.
+ */
+
+ /* If the current right is not the same as it was when the event was
+ created, first get back the original right.
+ */
+
+ if (tdp->td_right != tdp->td_orig_right) {
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ type = (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0);
+
+ switch (tdp->td_orig_right) {
+ case DM_RIGHT_SHARED:
+ if (tdp->td_right == DM_RIGHT_EXCL) {
+ error = fsys_vector->downgrade_right(
+ tdp->td_ip, tdp->td_right, type);
+ if (!error)
+ break;
+ (void)fsys_vector->release_right(tdp->td_ip,
+ tdp->td_right, type);
+ }
+ (void)fsys_vector->request_right(tdp->td_ip,
+ tdp->td_right, type, DM_RR_WAIT,
+ tdp->td_orig_right);
+ break;
+
+ case DM_RIGHT_EXCL:
+ if (tdp->td_right == DM_RIGHT_SHARED) {
+ error = fsys_vector->upgrade_right(tdp->td_ip,
+ tdp->td_right, type);
+ if (!error)
+ break;
+ (void)fsys_vector->release_right(tdp->td_ip,
+ tdp->td_right, type);
+ }
+ (void)fsys_vector->request_right(tdp->td_ip,
+ tdp->td_right, type, DM_RR_WAIT,
+ tdp->td_orig_right);
+ break;
+ case DM_RIGHT_NULL:
+ break;
+ }
+ }
+#endif
+
+ /* We now have back the same level of rights as we had when the event
+ was generated. Now transfer the rights from being tdp-based back
+ to thread-based.
+ */
+
+ /* DEBUG - Add a call here to transfer rights back to thread-based. */
+
+ /* Finally, update the tdp so that we don't mess with the rights when
+ we eventually call dm_put_tdp.
+ */
+
+ tdp->td_right = DM_RIGHT_NULL;
+}
+
+
+/* This routine is only called by event threads. The calls to dm_put_tdp
+ are not a deadlock risk here because this is an event thread, and it is
+ okay for such a thread to block on an induced destroy event. Okay, maybe
+ there is a slight risk; say that the event contains three inodes all of
+ which have DM_RIGHT_EXCL, and say that we are at the dm_max_queued_msgs
+ limit, and that the first inode is already unlinked. In that case the
+ destroy event will block waiting to be queued, and the application thread
+ could happen to reference one of the other locked inodes. Deadlock.
+*/
+
+void
+dm_evt_rele_tevp(
+ dm_tokevent_t *tevp,
+ int droprights) /* non-zero, evt thread loses rights */
+{
+ dm_tokdata_t *tdp;
+ unsigned long lc; /* lock cookie */
+
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ /* If we are here without DM_TEF_FINAL set and with at least one
+ application reference still remaining, then one of several
+ possibilities is true:
+ 1. This is an asynchronous event which has been queued but has not
+ yet been delivered, or which is in the process of being delivered.
+ 2. This is an unmount event (pseudo-asynchronous) yet to be
+ delivered or in the process of being delivered.
+ 3. This event had DM_FLAGS_NDELAY specified, and the application
+ has sent a dm_pending() reply for the event.
+ 4. This is a DM_EVENT_READ, DM_EVENT_WRITE, or DM_EVENT_TRUNCATE
+ event and the user typed a Cntl-C.
+ In all of these cases, the correct behavior is to leave the
+ responsibility of releasing any rights to the application threads
+ when they are done.
+ */
+
+ if (tevp->te_app_ref > 0 && !(tevp->te_flags & DM_TEF_FINAL)) {
+ tevp->te_evt_ref--;
+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
+ if (tdp->td_flags & DM_TDF_EVTREF) {
+ tdp->td_flags &= ~DM_TDF_EVTREF;
+ if (tdp->td_vcount == 0) {
+ tdp->td_ip = NULL;
+ }
+ }
+ }
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return; /* not the last thread */
+ }
+
+ /* If the application reference count is non-zero here, that can only
+ mean that dm_respond_event() has been called, but the application
+ still has one or more threads in the kernel that haven't let go of
+ the tevp. In these cases, the event thread must wait until all
+ application threads have given up their references, and their
+ rights to handles within the event.
+ */
+
+ while (tevp->te_app_ref) {
+ sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
+ lc = mutex_spinlock(&tevp->te_lock);
+ }
+
+ /* This thread is the last active thread using the token/event. Reset
+ the rights of any inode that was part of the original event back
+ to their initial values before returning to the filesystem. The
+ exception is if the event failed (droprights is non-zero), in which
+ case we chose to return to the filesystem with all rights released.
+ Release the rights on any inode that was not part of the original
+ event. Give up all remaining application inode references
+ regardless of whether or not the inode was part of the original
+ event.
+ */
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ while ((tdp = tevp->te_tdp) != NULL) {
+ tevp->te_tdp = tdp->td_next;
+ if ((tdp->td_flags & DM_TDF_ORIG) &&
+ (tdp->td_flags & DM_TDF_EVTREF) &&
+ (!droprights)) {
+ dm_change_right(tdp);
+ }
+ dm_put_tdp(tdp);
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ }
+ spinlock_destroy(&tevp->te_lock);
+ sv_destroy(&tevp->te_evt_queue);
+ sv_destroy(&tevp->te_app_queue);
+ kfree(tevp);
+}
+
+
+/* dm_obj_ref_hold() is just a fancy way to get an inode reference on an object
+ to hold it in kernel memory.
+*/
+
+int
+dm_obj_ref_hold(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_NULL, DM_FG_STHREAD, &tdp);
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0) {
+ if (tdp->td_flags & DM_TDF_HOLD) { /* if already held */
+ error = -EBUSY;
+ } else {
+ tdp->td_flags |= DM_TDF_HOLD;
+ tdp->td_vcount++;
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ (void)fsys_vector->obj_ref_hold(tdp->td_ip);
+ }
+ dm_app_put_tdp(tdp);
+ }
+ return(error);
+}
+
+
+int
+dm_obj_ref_rele(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen)
+{
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_NULL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0) {
+ if (!(tdp->td_flags & DM_TDF_HOLD)) { /* if not held */
+ error = -EACCES; /* use the DM_FG_MUSTEXIST errno */
+ } else {
+ tdp->td_flags &= ~DM_TDF_HOLD;
+ iput(tdp->td_ip);
+ tdp->td_vcount--;
+ }
+ dm_app_put_tdp(tdp);
+ }
+ return(error);
+}
+
+
+int
+dm_obj_ref_query_rvp(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void __user *hanp,
+ size_t hlen,
+ int *rvp)
+{
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO,
+ DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* If the request is valid but the handle just isn't present in the
+ event or the hold flag isn't set, return zero, else return one.
+ */
+
+ if (tdp) {
+ if (tdp->td_flags & DM_TDF_HOLD) { /* if held */
+ *rvp = 1;
+ } else {
+ *rvp = 0;
+ }
+ dm_app_put_tdp(tdp);
+ } else {
+ *rvp = 0;
+ }
+ return(0);
+}
+
+
+int
+dm_downgrade_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_EXCL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* Attempt the downgrade. Filesystems which support rights but not
+ the downgrading of rights will return ENOSYS.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->downgrade_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0)
+ tdp->td_right = DM_RIGHT_SHARED;
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_query_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_right_t __user *rightp)
+{
+ dm_tokdata_t *tdp;
+ dm_right_t right;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* Get the current right and copy it to the caller. The tdp is
+ single-threaded, so no mutex lock is needed. If the tdp is not in
+ the event we are supposed to return DM_RIGHT_NULL in order to be
+ compatible with Veritas.
+ */
+
+ if (tdp) {
+ right = tdp->td_right;
+ dm_app_put_tdp(tdp);
+ } else {
+ right = DM_RIGHT_NULL;
+ }
+ if (copy_to_user(rightp, &right, sizeof(right)))
+ return(-EFAULT);
+ return(0);
+}
+
+
+int
+dm_release_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->release_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0) {
+ tdp->td_right = DM_RIGHT_NULL;
+ if (tdp->td_flags & DM_TDF_RIGHT) {
+ tdp->td_flags &= ~DM_TDF_RIGHT;
+ iput(tdp->td_ip);
+ tdp->td_vcount--;
+ }
+ }
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_request_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int flags,
+ dm_right_t right)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_NULL, DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->request_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0), flags, right);
+
+ /* The tdp is single-threaded, so no mutex lock is needed for update.
+
+ If this is the first dm_request_right call for this inode, then we
+ need to bump the inode reference count for two reasons. First of
+ all, it is supposed to be impossible for the file to disappear or
+ for the filesystem to be unmounted while a right is held on a file;
+ bumping the file's inode reference count ensures this. Second, if
+ rights are ever actually implemented, it will most likely be done
+ without changes to the on-disk inode, which means that we can't let
+ the inode become unreferenced while a right on it is held.
+ */
+
+ if (error == 0) {
+ if (!(tdp->td_flags & DM_TDF_RIGHT)) { /* if first call */
+ tdp->td_flags |= DM_TDF_RIGHT;
+ tdp->td_vcount++;
+ (void)fsys_vector->obj_ref_hold(tdp->td_ip);
+ }
+ tdp->td_right = right;
+ }
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_upgrade_right(
+ dm_sessid_t sid,
+ void __user *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* If the object already has the DM_RIGHT_EXCL right, no need to
+ attempt an upgrade.
+ */
+
+ if (tdp->td_right == DM_RIGHT_EXCL) {
+ dm_app_put_tdp(tdp);
+ return(0);
+ }
+
+ /* Attempt the upgrade. Filesystems which support rights but not
+ the upgrading of rights will return ENOSYS.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->upgrade_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0)
+ tdp->td_right = DM_RIGHT_EXCL;
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
--- /dev/null
+++ b/fs/dmapi/dmapi_session.c
@@ -0,0 +1,1824 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/module.h>
+#endif
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+dm_session_t *dm_sessions = NULL; /* head of session list */
+u_int dm_sessions_active = 0; /* # sessions currently active */
+dm_sessid_t dm_next_sessid = 1; /* next session ID to use */
+lock_t dm_session_lock = SPIN_LOCK_UNLOCKED;/* lock for session list */
+
+dm_token_t dm_next_token = 1; /* next token ID to use */
+dm_sequence_t dm_next_sequence = 1; /* next sequence number to use */
+lock_t dm_token_lock = SPIN_LOCK_UNLOCKED;/* dm_next_token/dm_next_sequence lock */
+
+int dm_max_queued_msgs = 2048; /* max # undelivered msgs/session */
+
+int dm_hash_buckets = 1009; /* prime -- number of buckets */
+
+#define DM_SHASH(sess,inodenum) \
+ ((sess)->sn_sesshash + do_mod((inodenum), dm_hash_buckets))
+
+
+#ifdef CONFIG_PROC_FS
+static int
+sessions_read_pfs(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len;
+ dm_session_t *sessp = (dm_session_t*)data;
+
+#define CHKFULL if(len >= count) break;
+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
+
+ len=0;
+ while(1){
+ ADDBUF("sessp=0x%p\n", sessp);
+ ADDBUF("sn_next=0x%p\n", sessp->sn_next);
+ ADDBUF("sn_sessid=%d\n", sessp->sn_sessid);
+ ADDBUF("sn_flags=%x\n", sessp->sn_flags);
+ ADDBUF("sn_qlock=%c\n", '?');
+ ADDBUF("sn_readerq=%c\n", '?');
+ ADDBUF("sn_writerq=%c\n", '?');
+ ADDBUF("sn_readercnt=%u\n", sessp->sn_readercnt);
+ ADDBUF("sn_writercnt=%u\n", sessp->sn_writercnt);
+
+ ADDBUF("sn_newq.eq_head=0x%p\n", sessp->sn_newq.eq_head);
+ ADDBUF("sn_newq.eq_tail=0x%p\n", sessp->sn_newq.eq_tail);
+ ADDBUF("sn_newq.eq_count=%d\n", sessp->sn_newq.eq_count);
+
+ ADDBUF("sn_delq.eq_head=0x%p\n", sessp->sn_delq.eq_head);
+ ADDBUF("sn_delq.eq_tail=0x%p\n", sessp->sn_delq.eq_tail);
+ ADDBUF("sn_delq.eq_count=%d\n", sessp->sn_delq.eq_count);
+
+ ADDBUF("sn_evt_writerq.eq_head=0x%p\n", sessp->sn_evt_writerq.eq_head);
+ ADDBUF("sn_evt_writerq.eq_tail=0x%p\n", sessp->sn_evt_writerq.eq_tail);
+ ADDBUF("sn_evt_writerq.eq_count=%d\n", sessp->sn_evt_writerq.eq_count);
+
+ ADDBUF("sn_info=\"%s\"\n", sessp->sn_info);
+
+ break;
+ }
+
+ if (offset >= len) {
+ *start = buffer;
+ *eof = 1;
+ return 0;
+ }
+ *start = buffer + offset;
+ if ((len -= offset) > count)
+ return count;
+ *eof = 1;
+
+ return len;
+}
+#endif
+
+
+/* Link a session to the end of the session list. New sessions are always
+ added at the end of the list so that dm_enqueue_mount_event() doesn't
+ miss a session. The caller must have obtained dm_session_lock before
+ calling this routine.
+*/
+
+static void
+link_session(
+ dm_session_t *s)
+{
+ dm_session_t *tmp;
+
+ if ((tmp = dm_sessions) == NULL) {
+ dm_sessions = s;
+ } else {
+ while (tmp->sn_next != NULL)
+ tmp = tmp->sn_next;
+ tmp->sn_next = s;
+ }
+ s->sn_next = NULL;
+ dm_sessions_active++;
+}
+
+
+/* Remove a session from the session list. The caller must have obtained
+ dm_session_lock before calling this routine. unlink_session() should only
+ be used in situations where the session is known to be on the dm_sessions
+ list; otherwise it panics.
+*/
+
+static void
+unlink_session(
+ dm_session_t *s)
+{
+ dm_session_t *tmp;
+
+ if (dm_sessions == s) {
+ dm_sessions = dm_sessions->sn_next;
+ } else {
+ for (tmp = dm_sessions; tmp; tmp = tmp->sn_next) {
+ if (tmp->sn_next == s)
+ break;
+ }
+ if (tmp == NULL) {
+ panic("unlink_session: corrupt DMAPI session list, "
+ "dm_sessions %p, session %p\n",
+ dm_sessions, s);
+ }
+ tmp->sn_next = s->sn_next;
+ }
+ s->sn_next = NULL;
+ dm_sessions_active--;
+}
+
+
+/* Link an event to the end of an event queue. The caller must have obtained
+ the session's sn_qlock before calling this routine.
+*/
+
+void
+dm_link_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue)
+{
+ if (queue->eq_tail) {
+ queue->eq_tail->te_next = tevp;
+ queue->eq_tail = tevp;
+ } else {
+ queue->eq_head = queue->eq_tail = tevp;
+ }
+ tevp->te_next = NULL;
+ queue->eq_count++;
+}
+
+
+/* Remove an event from an event queue. The caller must have obtained the
+ session's sn_qlock before calling this routine. dm_unlink_event() should
+ only be used in situations where the event is known to be on the queue;
+ otherwise it panics.
+*/
+
+void
+dm_unlink_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue)
+{
+ dm_tokevent_t *tmp;
+
+ if (queue->eq_head == tevp) {
+ queue->eq_head = tevp->te_next;
+ if (queue->eq_head == NULL)
+ queue->eq_tail = NULL;
+ } else {
+ tmp = queue->eq_head;
+ while (tmp && tmp->te_next != tevp)
+ tmp = tmp->te_next;
+ if (tmp == NULL) {
+ panic("dm_unlink_event: corrupt DMAPI queue %p, "
+ "tevp %p\n", queue, tevp);
+ }
+ tmp->te_next = tevp->te_next;
+ if (tmp->te_next == NULL)
+ queue->eq_tail = tmp;
+ }
+ tevp->te_next = NULL;
+ queue->eq_count--;
+}
+
+/* Link a regular file event to a hash bucket. The caller must have obtained
+ the session's sn_qlock before calling this routine.
+ The tokevent must be for a regular file object--DM_TDT_REG.
+*/
+
+static void
+hash_event(
+ dm_session_t *s,
+ dm_tokevent_t *tevp)
+{
+ dm_sesshash_t *sh;
+ dm_ino_t ino;
+
+ if (s->sn_sesshash == NULL) {
+ s->sn_sesshash = kmalloc(dm_hash_buckets * sizeof(dm_sesshash_t), GFP_KERNEL);
+ if (s->sn_sesshash == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return;
+ }
+ memset(s->sn_sesshash, 0, dm_hash_buckets * sizeof(dm_sesshash_t));
+ }
+
+ ino = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
+ sh = DM_SHASH(s, ino);
+
+#ifdef DM_SHASH_DEBUG
+ if (sh->h_next == NULL) {
+ s->sn_buckets_in_use++;
+ if (s->sn_buckets_in_use > s->sn_max_buckets_in_use)
+ s->sn_max_buckets_in_use++;
+ }
+ sh->maxlength++;
+ sh->curlength++;
+ sh->num_adds++;
+#endif
+
+ tevp->te_flags |= DM_TEF_HASHED;
+ tevp->te_hashnext = sh->h_next;
+ sh->h_next = tevp;
+}
+
+
+/* Remove a regular file event from a hash bucket. The caller must have
+ obtained the session's sn_qlock before calling this routine.
+ The tokevent must be for a regular file object--DM_TDT_REG.
+*/
+
+static void
+unhash_event(
+ dm_session_t *s,
+ dm_tokevent_t *tevp)
+{
+ dm_sesshash_t *sh;
+ dm_tokevent_t *tmp;
+ dm_ino_t ino;
+
+ if (s->sn_sesshash == NULL)
+ return;
+
+ ino = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
+ sh = DM_SHASH(s, ino);
+
+ if (sh->h_next == tevp) {
+ sh->h_next = tevp->te_hashnext; /* leap frog */
+ } else {
+ tmp = sh->h_next;
+ while (tmp->te_hashnext != tevp) {
+ tmp = tmp->te_hashnext;
+ }
+ tmp->te_hashnext = tevp->te_hashnext; /* leap frog */
+ }
+ tevp->te_hashnext = NULL;
+ tevp->te_flags &= ~DM_TEF_HASHED;
+
+#ifdef DM_SHASH_DEBUG
+ if (sh->h_next == NULL)
+ s->sn_buckets_in_use--;
+ sh->curlength--;
+ sh->num_dels++;
+#endif
+}
+
+
+/* Determine if this is a repeat event. The caller MUST be holding
+ the session lock.
+ The tokevent must be for a regular file object--DM_TDT_REG.
+ Returns:
+ 0 == match not found
+ 1 == match found
+*/
+
+static int
+repeated_event(
+ dm_session_t *s,
+ dm_tokevent_t *tevp)
+{
+ dm_sesshash_t *sh;
+ dm_data_event_t *d_event1;
+ dm_data_event_t *d_event2;
+ dm_tokevent_t *tevph;
+ dm_ino_t ino1;
+ dm_ino_t ino2;
+
+ if ((!s->sn_newq.eq_tail) && (!s->sn_delq.eq_tail)) {
+ return(0);
+ }
+ if (s->sn_sesshash == NULL) {
+ return(0);
+ }
+
+ ino1 = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
+ sh = DM_SHASH(s, ino1);
+
+ if (sh->h_next == NULL) {
+ /* bucket is empty, no match here */
+ return(0);
+ }
+
+ d_event1 = (dm_data_event_t *)((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset);
+ tevph = sh->h_next;
+ while (tevph) {
+ /* find something with the same event type and handle type */
+ if ((tevph->te_msg.ev_type == tevp->te_msg.ev_type) &&
+ (tevph->te_tdp->td_type == tevp->te_tdp->td_type)) {
+
+ ino2 = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
+ d_event2 = (dm_data_event_t *)((char *)&tevph->te_msg + tevph->te_msg.ev_data.vd_offset);
+
+ /* If the two events are operating on the same file,
+ and the same part of that file, then we have a
+ match.
+ */
+ if ((ino1 == ino2) &&
+ (d_event2->de_offset == d_event1->de_offset) &&
+ (d_event2->de_length == d_event1->de_length)) {
+ /* found a match */
+#ifdef DM_SHASH_DEBUG
+ sh->dup_hits++;
+#endif
+ return(1);
+ }
+ }
+ tevph = tevph->te_hashnext;
+ }
+
+ /* No match found */
+ return(0);
+}
+
+
+/* Return a pointer to a session given its session ID, or EINVAL if no session
+ has the session ID (per the DMAPI spec). The caller must have obtained
+ dm_session_lock before calling this routine.
+*/
+
+static int
+dm_find_session(
+ dm_sessid_t sid,
+ dm_session_t **sessionpp)
+{
+ dm_session_t *s;
+
+ for (s = dm_sessions; s; s = s->sn_next) {
+ if (s->sn_sessid == sid) {
+ *sessionpp = s;
+ return(0);
+ }
+ }
+ return(-EINVAL);
+}
+
+
+/* Return a pointer to a locked session given its session ID. '*lcp' is
+ used to obtain the session's sn_qlock. Caller is responsible for eventually
+ unlocking it.
+*/
+
+int
+dm_find_session_and_lock(
+ dm_sessid_t sid,
+ dm_session_t **sessionpp,
+ unsigned long *lcp) /* addr of returned lock cookie */
+{
+ int error;
+
+ for (;;) {
+ *lcp = mutex_spinlock(&dm_session_lock);
+
+ if ((error = dm_find_session(sid, sessionpp)) != 0) {
+ mutex_spinunlock(&dm_session_lock, *lcp);
+ return(error);
+ }
+ if (spin_trylock(&(*sessionpp)->sn_qlock)) {
+ nested_spinunlock(&dm_session_lock);
+ return(0); /* success */
+ }
+
+ /* If the second lock is not available, drop the first and
+ start over. This gives the CPU a chance to process any
+ interrupts, and also allows processes which want a sn_qlock
+ for a different session to proceed.
+ */
+
+ mutex_spinunlock(&dm_session_lock, *lcp);
+ }
+}
+
+
+/* Return a pointer to the event on the specified session's sn_delq which
+ contains the given token. The caller must have obtained the session's
+ sn_qlock before calling this routine.
+*/
+
+static int
+dm_find_msg(
+ dm_session_t *s,
+ dm_token_t token,
+ dm_tokevent_t **tevpp)
+{
+ dm_tokevent_t *tevp;
+
+ if (token <= DM_INVALID_TOKEN)
+ return(-EINVAL);
+
+ for (tevp = s->sn_delq.eq_head; tevp; tevp = tevp->te_next) {
+ if (tevp->te_msg.ev_token == token) {
+ *tevpp = tevp;
+ return(0);
+ }
+ }
+ return(-ESRCH);
+}
+
+
+/* Given a session ID and token, find the tevp on the specified session's
+ sn_delq which corresponds to that session ID/token pair. If a match is
+ found, lock the tevp's te_lock and return a pointer to the tevp.
+ '*lcp' is used to obtain the tevp's te_lock. The caller is responsible
+ for eventually unlocking it.
+*/
+
+int
+dm_find_msg_and_lock(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_tokevent_t **tevpp,
+ unsigned long *lcp) /* address of returned lock cookie */
+{
+ dm_session_t *s;
+ int error;
+
+ if ((error = dm_find_session_and_lock(sid, &s, lcp)) != 0)
+ return(error);
+
+ if ((error = dm_find_msg(s, token, tevpp)) != 0) {
+ mutex_spinunlock(&s->sn_qlock, *lcp);
+ return(error);
+ }
+ nested_spinlock(&(*tevpp)->te_lock);
+ nested_spinunlock(&s->sn_qlock);
+ return(0);
+}
+
+
+/* Create a new session, or resume an old session if one is given. */
+
+int
+dm_create_session(
+ dm_sessid_t old,
+ char __user *info,
+ dm_sessid_t __user *new)
+{
+ dm_session_t *s;
+ dm_sessid_t sid;
+ char sessinfo[DM_SESSION_INFO_LEN];
+ size_t len;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ len = strnlen_user(info, DM_SESSION_INFO_LEN-1);
+ if (copy_from_user(sessinfo, info, len))
+ return(-EFAULT);
+ lc = mutex_spinlock(&dm_session_lock);
+ sid = dm_next_sessid++;
+ mutex_spinunlock(&dm_session_lock, lc);
+ if (copy_to_user(new, &sid, sizeof(sid)))
+ return(-EFAULT);
+
+ if (old == DM_NO_SESSION) {
+ s = kmem_cache_alloc(dm_session_cachep, GFP_KERNEL);
+ if (s == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_session_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+ memset(s, 0, sizeof(*s));
+
+ sv_init(&s->sn_readerq, SV_DEFAULT, "dmreadq");
+ sv_init(&s->sn_writerq, SV_DEFAULT, "dmwritq");
+ spinlock_init(&s->sn_qlock, "sn_qlock");
+ } else {
+ lc = mutex_spinlock(&dm_session_lock);
+ if ((error = dm_find_session(old, &s)) != 0) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(error);
+ }
+ unlink_session(s);
+ mutex_spinunlock(&dm_session_lock, lc);
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
+ remove_proc_entry(buf, NULL);
+ }
+#endif
+ }
+ memcpy(s->sn_info, sessinfo, len);
+ s->sn_info[len-1] = 0; /* if not NULL, then now 'tis */
+ s->sn_sessid = sid;
+ lc = mutex_spinlock(&dm_session_lock);
+ link_session(s);
+ mutex_spinunlock(&dm_session_lock, lc);
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ struct proc_dir_entry *entry;
+
+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
+ entry = create_proc_read_entry(buf, 0, NULL, sessions_read_pfs, s);
+ }
+#endif
+ return(0);
+}
+
+
+int
+dm_destroy_session(
+ dm_sessid_t sid)
+{
+ dm_session_t *s;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* The dm_session_lock must be held until the session is unlinked. */
+
+ lc = mutex_spinlock(&dm_session_lock);
+
+ if ((error = dm_find_session(sid, &s)) != 0) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(error);
+ }
+ nested_spinlock(&s->sn_qlock);
+
+ /* The session exists. Check to see if it is still in use. If any
+ messages still exist on the sn_newq or sn_delq, or if any processes
+ are waiting for messages to arrive on the session, then the session
+ must not be destroyed.
+ */
+
+ if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) {
+ nested_spinunlock(&s->sn_qlock);
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(-EBUSY);
+ }
+
+ /* The session is not in use. Dequeue it from the session chain. */
+
+ unlink_session(s);
+ nested_spinunlock(&s->sn_qlock);
+ mutex_spinunlock(&dm_session_lock, lc);
+
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
+ remove_proc_entry(buf, NULL);
+ }
+#endif
+
+ /* Now clear the sessions's disposition registration, and then destroy
+ the session structure.
+ */
+
+ dm_clear_fsreg(s);
+
+ spinlock_destroy(&s->sn_qlock);
+ sv_destroy(&s->sn_readerq);
+ sv_destroy(&s->sn_writerq);
+ if (s->sn_sesshash)
+ kfree(s->sn_sesshash);
+ kmem_cache_free(dm_session_cachep, s);
+ return(0);
+}
+
+
+/*
+ * Return a list of all active sessions.
+ */
+
+int
+dm_getall_sessions(
+ u_int nelem,
+ dm_sessid_t __user *sidp,
+ u_int __user *nelemp)
+{
+ dm_session_t *s;
+ u_int sesscnt;
+ dm_sessid_t *sesslist;
+ unsigned long lc; /* lock cookie */
+ int error;
+ int i;
+
+ /* Loop until we can get the right amount of temp space, being careful
+ not to hold a mutex during the allocation. Usually only one trip.
+ */
+
+ for (;;) {
+ if ((sesscnt = dm_sessions_active) == 0) {
+ /*if (suword(nelemp, 0))*/
+ if (put_user(0, nelemp))
+ return(-EFAULT);
+ return(0);
+ }
+ sesslist = kmalloc(sesscnt * sizeof(*sidp), GFP_KERNEL);
+ if (sesslist == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ lc = mutex_spinlock(&dm_session_lock);
+ if (sesscnt == dm_sessions_active)
+ break;
+
+ mutex_spinunlock(&dm_session_lock, lc);
+ kfree(sesslist);
+ }
+
+ /* Make a temp copy of the data, then release the mutex. */
+
+ for (i = 0, s = dm_sessions; i < sesscnt; i++, s = s->sn_next)
+ sesslist[i] = s->sn_sessid;
+
+ mutex_spinunlock(&dm_session_lock, lc);
+
+ /* Now copy the data to the user. */
+
+ if(put_user(sesscnt, nelemp)) {
+ error = -EFAULT;
+ } else if (sesscnt > nelem) {
+ error = -E2BIG;
+ } else if (copy_to_user(sidp, sesslist, sesscnt * sizeof(*sidp))) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ kfree(sesslist);
+ return(error);
+}
+
+
+/*
+ * Return the descriptive string associated with a session.
+ */
+
+int
+dm_query_session(
+ dm_sessid_t sid,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp)
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ int len; /* length of session info string */
+ int error;
+ char sessinfo[DM_SESSION_INFO_LEN];
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+
+ len = strlen(s->sn_info) + 1; /* NULL terminated when created */
+ memcpy(sessinfo, s->sn_info, len);
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* Now that the mutex is released, copy the sessinfo to the user. */
+
+ if (put_user(len, rlenp)) {
+ error = -EFAULT;
+ } else if (len > buflen) {
+ error = -E2BIG;
+ } else if (copy_to_user(bufp, sessinfo, len)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ return(error);
+}
+
+
+/*
+ * Return all of the previously delivered tokens (that is, their IDs)
+ * for the given session.
+ */
+
+int
+dm_getall_tokens(
+ dm_sessid_t sid, /* session obtaining tokens from */
+ u_int nelem, /* size of tokenbufp */
+ dm_token_t __user *tokenbufp,/* buffer to copy token IDs to */
+ u_int __user *nelemp) /* return number copied to tokenbufp */
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ dm_tokevent_t *tevp; /* event message queue traversal */
+ unsigned long lc; /* lock cookie */
+ int tokcnt;
+ dm_token_t *toklist;
+ int error;
+ int i;
+
+ /* Loop until we can get the right amount of temp space, being careful
+ not to hold a mutex during the allocation. Usually only one trip.
+ */
+
+ for (;;) {
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+ tokcnt = s->sn_delq.eq_count;
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ if (tokcnt == 0) {
+ /*if (suword(nelemp, 0))*/
+ if (put_user(0, nelemp))
+ return(-EFAULT);
+ return(0);
+ }
+ toklist = kmalloc(tokcnt * sizeof(*tokenbufp), GFP_KERNEL);
+ if (toklist == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) {
+ kfree(toklist);
+ return(error);
+ }
+
+ if (tokcnt == s->sn_delq.eq_count)
+ break;
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+ kfree(toklist);
+ }
+
+ /* Make a temp copy of the data, then release the mutex. */
+
+ tevp = s->sn_delq.eq_head;
+ for (i = 0; i < tokcnt; i++, tevp = tevp->te_next)
+ toklist[i] = tevp->te_msg.ev_token;
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* Now copy the data to the user. */
+
+ if (put_user(tokcnt, nelemp)) {
+ error = -EFAULT;
+ } else if (tokcnt > nelem) {
+ error = -E2BIG;
+ } else if (copy_to_user(tokenbufp,toklist,tokcnt*sizeof(*tokenbufp))) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ kfree(toklist);
+ return(error);
+}
+
+
+/*
+ * Return the message identified by token.
+ */
+
+int
+dm_find_eventmsg(
+ dm_sessid_t sid,
+ dm_token_t token,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp)
+{
+ dm_tokevent_t *tevp; /* message identified by token */
+ int msgsize; /* size of message to copy out */
+ void *msg;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Because some of the events (dm_data_event_t in particular) contain
+ __u64 fields, we need to make sure that the buffer provided by the
+ caller is aligned such that he can read those fields successfully.
+ */
+
+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Allocate the right amount of temp space, being careful not to hold
+ a mutex during the allocation.
+ */
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
+ return(error);
+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg);
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ msg = kmalloc(msgsize, GFP_KERNEL);
+ if (msg == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0) {
+ kfree(msg);
+ return(error);
+ }
+
+ /* Make a temp copy of the data, then release the mutex. */
+
+ memcpy(msg, &tevp->te_msg, msgsize);
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ /* Now copy the data to the user. */
+
+ if (put_user(msgsize,rlenp)) {
+ error = -EFAULT;
+ } else if (msgsize > buflen) { /* user buffer not big enough */
+ error = -E2BIG;
+ } else if (copy_to_user( bufp, msg, msgsize )) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ kfree(msg);
+ return(error);
+}
+
+
+int
+dm_move_event(
+ dm_sessid_t srcsid,
+ dm_token_t token,
+ dm_sessid_t targetsid,
+ dm_token_t __user *rtokenp)
+{
+ dm_session_t *s1;
+ dm_session_t *s2;
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+ int hash_it = 0;
+
+ lc = mutex_spinlock(&dm_session_lock);
+
+ if ((error = dm_find_session(srcsid, &s1)) != 0 ||
+ (error = dm_find_session(targetsid, &s2)) != 0 ||
+ (error = dm_find_msg(s1, token, &tevp)) != 0) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(error);
+ }
+ dm_unlink_event(tevp, &s1->sn_delq);
+ if (tevp->te_flags & DM_TEF_HASHED) {
+ unhash_event(s1, tevp);
+ hash_it = 1;
+ }
+ dm_link_event(tevp, &s2->sn_delq);
+ if (hash_it)
+ hash_event(s2, tevp);
+ mutex_spinunlock(&dm_session_lock, lc);
+
+ if (copy_to_user(rtokenp, &token, sizeof(token)))
+ return(-EFAULT);
+ return(0);
+}
+
+
+/* ARGSUSED */
+int
+dm_pending(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_timestruct_t __user *delay) /* unused */
+{
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
+ return(error);
+
+ tevp->te_flags |= DM_TEF_INTERMED;
+ if (tevp->te_evt_ref > 0) /* if event generation threads exist */
+ sv_broadcast(&tevp->te_evt_queue);
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return(0);
+}
+
+
+int
+dm_get_events(
+ dm_sessid_t sid,
+ u_int maxmsgs,
+ u_int flags,
+ size_t buflen,
+ void __user *bufp,
+ size_t __user *rlenp)
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ dm_tokevent_t *tevp; /* next event message on queue */
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2 = 0; /* second lock cookie */
+ int totalsize;
+ int msgsize;
+ dm_eventmsg_t __user *prevmsg;
+ int prev_msgsize = 0;
+ u_int msgcnt;
+
+ /* Because some of the events (dm_data_event_t in particular) contain
+ __u64 fields, we need to make sure that the buffer provided by the
+ caller is aligned such that he can read those fields successfully.
+ */
+
+ if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Find the indicated session and lock it. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0)
+ return(error);
+
+ /* Check for messages on sn_newq. If there aren't any that haven't
+ already been grabbed by another process, and if we are supposed to
+ to wait until one shows up, then go to sleep interruptibly on the
+ sn_readerq semaphore. The session can't disappear out from under
+ us as long as sn_readerq is non-zero.
+ */
+
+ for (;;) {
+ int rc;
+
+ for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) {
+ lc2 = mutex_spinlock(&tevp->te_lock);
+ if (!(tevp->te_flags & DM_TEF_LOCKED))
+ break;
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ }
+ if (tevp)
+ break; /* got one! */
+
+ if (!(flags & DM_EV_WAIT)) {
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ return(-EAGAIN);
+ }
+ s->sn_readercnt++;
+
+ sv_wait_sig(&s->sn_readerq, 1, &s->sn_qlock, lc1);
+ rc = signal_pending(current);
+
+ lc1 = mutex_spinlock(&s->sn_qlock);
+ s->sn_readercnt--;
+ if (rc) { /* if signal was received */
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ return(-EINTR);
+ }
+ }
+
+ /* At least one message is available for delivery, and we have both the
+ session lock and event lock. Mark the event so that it is not
+ grabbed by other daemons, then drop both locks prior copying the
+ data to the caller's buffer. Leaving the event on the queue in a
+ marked state prevents both the session and the event from
+ disappearing out from under us while we don't have the locks.
+ */
+
+ tevp->te_flags |= DM_TEF_LOCKED;
+ mutex_spinunlock(&tevp->te_lock, lc2); /* reverse cookie order */
+ mutex_spinunlock(&s->sn_qlock, lc1);
+
+ /* Continue to deliver messages until there are no more, the
+ user's buffer becomes full, or we hit his maxmsgs limit.
+ */
+
+ totalsize = 0; /* total bytes transferred to the user */
+ prevmsg = NULL;
+ msgcnt = 0;
+
+ while (tevp) {
+ /* Compute the number of bytes to be moved, rounding up to an
+ 8-byte boundary so that any subsequent messages will also be
+ aligned.
+ */
+
+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg);
+ msgsize = (msgsize + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1);
+ totalsize += msgsize;
+
+ /* If it fits, copy the message into the user's buffer and
+ update his 'rlenp'. Update the _link pointer for any
+ previous message.
+ */
+
+ if (totalsize > buflen) { /* no more room */
+ error = -E2BIG;
+ } else if (put_user(totalsize, rlenp)) {
+ error = -EFAULT;
+ } else if (copy_to_user(bufp, &tevp->te_msg, msgsize)) {
+ error = -EFAULT;
+ } else if (prevmsg && put_user(prev_msgsize, &prevmsg->_link)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+
+ /* If an error occurred, just unmark the event and leave it on
+ the queue for someone else. Note that other daemons may
+ have gone to sleep because this event was marked, so wake
+ them up. Also, if at least one message has already been
+ delivered, then an error here is not really an error.
+ */
+
+ lc1 = mutex_spinlock(&s->sn_qlock);
+ lc2 = mutex_spinlock(&tevp->te_lock);
+ tevp->te_flags &= ~DM_TEF_LOCKED; /* drop the mark */
+
+ if (error) {
+ if (s->sn_readercnt)
+ sv_signal(&s->sn_readerq);
+
+ mutex_spinunlock(&tevp->te_lock, lc2); /* rev. order */
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ if (prevmsg)
+ return(0);
+ if (error == -E2BIG && put_user(totalsize,rlenp))
+ error = -EFAULT;
+ return(error);
+ }
+
+ /* The message was successfully delivered. Unqueue it. */
+
+ dm_unlink_event(tevp, &s->sn_newq);
+
+ /* Wake up the first of any processes waiting for room on the
+ sn_newq.
+ */
+
+ if (s->sn_writercnt)
+ sv_signal(&s->sn_writerq);
+
+ /* If the message is synchronous, add it to the sn_delq while
+ still holding the lock. If it is asynchronous, free it.
+ */
+
+ if (tevp->te_msg.ev_token != DM_INVALID_TOKEN) { /* synch */
+ dm_link_event(tevp, &s->sn_delq);
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ } else {
+ tevp->te_flags |= DM_TEF_FINAL;
+ if (tevp->te_flags & DM_TEF_HASHED)
+ unhash_event(s, tevp);
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ dm_put_tevp(tevp, NULL);/* can't cause destroy events */
+ }
+
+ /* Update our notion of where we are in the user's buffer. If
+ he doesn't want any more messages, then stop.
+ */
+
+ prevmsg = (dm_eventmsg_t __user *)bufp;
+ prev_msgsize = msgsize;
+ bufp = (char __user *)bufp + msgsize;
+
+ msgcnt++;
+ if (maxmsgs && msgcnt >= maxmsgs) {
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ break;
+ }
+
+ /* While still holding the sn_qlock, see if any additional
+ messages are available for delivery.
+ */
+
+ for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) {
+ lc2 = mutex_spinlock(&tevp->te_lock);
+ if (!(tevp->te_flags & DM_TEF_LOCKED)) {
+ tevp->te_flags |= DM_TEF_LOCKED;
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ break;
+ }
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ }
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ }
+ return(0);
+}
+
+
+/*
+ * Remove an event message from the delivered queue, set the returned
+ * error where the event generator wants it, and wake up the generator.
+ * Also currently have the user side release any locks it holds...
+ */
+
+/* ARGSUSED */
+int
+dm_respond_event(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_response_t response,
+ int reterror,
+ size_t buflen, /* unused */
+ void __user *respbufp) /* unused */
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ dm_tokevent_t *tevp; /* event message queue traversal */
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Sanity check the input parameters. */
+
+ switch (response) {
+ case DM_RESP_CONTINUE: /* continue must have reterror == 0 */
+ if (reterror != 0)
+ return(-EINVAL);
+ break;
+ case DM_RESP_ABORT: /* abort must have errno set */
+ if (reterror <= 0)
+ return(-EINVAL);
+ break;
+ case DM_RESP_DONTCARE:
+ reterror = -1; /* to distinguish DM_RESP_DONTCARE */
+ break;
+ default:
+ return(-EINVAL);
+ }
+
+ /* Hold session lock until the event is unqueued. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+
+ if ((error = dm_find_msg(s, token, &tevp)) != 0) {
+ mutex_spinunlock(&s->sn_qlock, lc);
+ return(error);
+ }
+ nested_spinlock(&tevp->te_lock);
+
+ if ((response == DM_RESP_DONTCARE) &&
+ (tevp->te_msg.ev_type != DM_EVENT_MOUNT)) {
+ error = -EINVAL;
+ nested_spinunlock(&tevp->te_lock);
+ mutex_spinunlock(&s->sn_qlock, lc);
+ } else {
+ dm_unlink_event(tevp, &s->sn_delq);
+ if (tevp->te_flags & DM_TEF_HASHED)
+ unhash_event(s, tevp);
+ tevp->te_reply = -reterror; /* linux wants negative errno */
+ tevp->te_flags |= DM_TEF_FINAL;
+ if (tevp->te_evt_ref)
+ sv_broadcast(&tevp->te_evt_queue);
+ nested_spinunlock(&tevp->te_lock);
+ mutex_spinunlock(&s->sn_qlock, lc);
+ error = 0;
+
+ /* Absolutely no locks can be held when calling dm_put_tevp! */
+
+ dm_put_tevp(tevp, NULL); /* this can generate destroy events */
+ }
+ return(error);
+}
+
+/* The caller must hold sn_qlock.
+ This will return the tokevent locked.
+ */
+static dm_tokevent_t *
+__find_match_event_no_waiters_locked(
+ dm_tokevent_t *tevp1,
+ dm_eventq_t *queue)
+{
+ dm_tokevent_t *tevp2, *next_tevp;
+ dm_tokdata_t *tdp1 = tevp1->te_tdp;
+ dm_tokdata_t *tdp2;
+ dm_data_event_t *d_event1;
+ dm_data_event_t *d_event2;
+
+ d_event1 = (dm_data_event_t *)((char *)&tevp1->te_msg + tevp1->te_msg.ev_data.vd_offset);
+
+ for(tevp2 = queue->eq_head; tevp2; tevp2 = next_tevp) {
+ nested_spinlock(&tevp2->te_lock);
+ next_tevp = tevp2->te_next;
+
+ /* Just compare the first tdp's in each--there should
+ be just one, if it's the match we want.
+ */
+ tdp2 = tevp2->te_tdp;
+ if ((tevp2->te_msg.ev_type == tevp1->te_msg.ev_type) &&
+ (tevp2->te_tdp->td_type == tevp1->te_tdp->td_type) &&
+ (tevp2->te_evt_ref == 0) && (tdp2->td_next == NULL) &&
+ (memcmp(&tdp1->td_handle, &tdp2->td_handle,
+ sizeof(dm_handle_t)) == 0)) {
+
+ d_event2 = (dm_data_event_t *)((char *)&tevp2->te_msg + tevp2->te_msg.ev_data.vd_offset);
+
+
+ if ((d_event2->de_offset == d_event1->de_offset) &&
+ (d_event2->de_length == d_event1->de_length)) {
+ /* Match -- return it locked */
+ return tevp2;
+ }
+ }
+ nested_spinunlock(&tevp2->te_lock);
+ }
+ return NULL;
+}
+
+/* The caller must hold the sn_qlock.
+ The returned tokevent will be locked with nested_spinlock.
+ */
+static dm_tokevent_t *
+find_match_event_no_waiters_locked(
+ dm_session_t *s,
+ dm_tokevent_t *tevp)
+{
+ dm_tokevent_t *tevp2;
+
+ if ((!s->sn_newq.eq_tail) && (!s->sn_delq.eq_tail))
+ return NULL;
+ if (!tevp->te_tdp)
+ return NULL;
+ if (tevp->te_tdp->td_next) {
+ /* If it has multiple tdp's then don't bother trying to
+ find a match.
+ */
+ return NULL;
+ }
+ tevp2 = __find_match_event_no_waiters_locked(tevp, &s->sn_newq);
+ if (tevp2 == NULL)
+ tevp2 = __find_match_event_no_waiters_locked(tevp, &s->sn_delq);
+ /* returns a locked tokevent */
+ return tevp2;
+}
+
+
+
+/* Queue the filled in event message pointed to by tevp on the session s, and
+ (if a synchronous event) wait for the reply from the DMAPI application.
+ The caller MUST be holding the session lock before calling this routine!
+ The session lock is always released upon exit.
+ Returns:
+ -1 == don't care
+ 0 == success (or async event)
+ > 0 == errno describing reason for failure
+*/
+
+static int
+dm_enqueue(
+ dm_session_t *s,
+ unsigned long lc, /* input lock cookie */
+ dm_tokevent_t **tevpp, /* in/out parameter */
+ int sync,
+ int flags,
+ int interruptable)
+{
+ int is_unmount = 0;
+ int is_hashable = 0;
+ int reply;
+ dm_tokevent_t *tevp = *tevpp;
+
+ /* If the caller isn't planning to stick around for the result
+ and this request is identical to one that is already on the
+ queues then just give the caller an EAGAIN. Release the
+ session lock before returning.
+
+ We look only at NDELAY requests with an event type of READ,
+ WRITE, or TRUNCATE on objects that are regular files.
+ */
+
+ if ((flags & DM_FLAGS_NDELAY) && DM_EVENT_RDWRTRUNC(tevp) &&
+ (tevp->te_tdp->td_type == DM_TDT_REG)) {
+ if (repeated_event(s, tevp)) {
+ mutex_spinunlock(&s->sn_qlock, lc);
+ return -EAGAIN;
+ }
+ is_hashable = 1;
+ }
+
+ /* If the caller is a sync event then look for a matching sync
+ event. If there is a match and it doesn't currently have
+ event threads waiting on it, then we will drop our own
+ tokevent and jump on the matching event.
+ */
+ if (((flags & DM_FLAGS_NDELAY) == 0) && DM_EVENT_RDWRTRUNC(tevp) &&
+ (tevp->te_tdp->td_type == DM_TDT_REG)) {
+ dm_tokevent_t *tevp2;
+ if ((tevp2 = find_match_event_no_waiters_locked(s, tevp))) {
+ ASSERT(tevp2->te_evt_ref == 0);
+ tevp2->te_evt_ref++;
+ nested_spinunlock(&tevp2->te_lock);
+ nested_spinlock(&tevp->te_lock);
+ tevp->te_evt_ref--;
+ nested_spinunlock(&tevp->te_lock);
+ mutex_spinunlock(&s->sn_qlock, lc);
+ /* All locks have been released */
+ dm_evt_rele_tevp(tevp, 1);
+ *tevpp = tevp = tevp2;
+ goto wait_on_tevp;
+ }
+ }
+
+ if (tevp->te_msg.ev_type == DM_EVENT_UNMOUNT)
+ is_unmount = 1;
+
+ /* Check for room on sn_newq. If there is no room for new messages,
+ then go to sleep on the sn_writerq semaphore. The
+ session cannot disappear out from under us as long as sn_writercnt
+ is non-zero.
+ */
+
+ while (s->sn_newq.eq_count >= dm_max_queued_msgs) { /* no room */
+ s->sn_writercnt++;
+ dm_link_event(tevp, &s->sn_evt_writerq);
+ if (interruptable) {
+ sv_wait_sig(&s->sn_writerq, 1, &s->sn_qlock, lc);
+ if (signal_pending(current)) {
+ s->sn_writercnt--;
+ return -EINTR;
+ }
+ } else {
+ sv_wait(&s->sn_writerq, 1, &s->sn_qlock, lc);
+ }
+ lc = mutex_spinlock(&s->sn_qlock);
+ s->sn_writercnt--;
+ dm_unlink_event(tevp, &s->sn_evt_writerq);
+#ifdef HAVE_DM_QUEUE_FLUSH
+ /* We hold the sn_qlock, from here to after we get into
+ * the sn_newq. Any thread going through
+ * dm_release_threads() looking for us is already past us
+ * and has set the DM_TEF_FLUSH flag for us or is blocked on
+ * sn_qlock and will find us in sn_newq after we release
+ * the sn_qlock.
+ * We check for dop->flushing anyway, in case the
+ * dm_release_threads() already completed before we
+ * could enter dmapi.
+ */
+ if (!sync) {
+ /* async events are forced into the newq */
+ break;
+ }
+ if (tevp->te_flags & DM_TEF_FLUSH) {
+ mutex_spinunlock(&s->sn_qlock, lc);
+ return tevp->te_reply;
+ }
+ else {
+ struct filesystem_dmapi_operations *dops;
+ dm_tokdata_t *tdp;
+ int errno = 0;
+
+ nested_spinlock(&tevp->te_lock);
+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
+ if (tdp->td_ip) {
+ dops = dm_fsys_ops(tdp->td_ip->i_sb);
+ ASSERT(dops);
+ if (dops->flushing)
+ errno = dops->flushing(tdp->td_ip);
+ if (errno) {
+ nested_spinunlock(&tevp->te_lock);
+ mutex_spinunlock(&s->sn_qlock, lc);
+ return errno;
+ }
+ }
+ }
+ nested_spinunlock(&tevp->te_lock);
+ }
+#endif /* HAVE_DM_QUEUE_FLUSH */
+ }
+
+ /* Assign a sequence number and token to the event and bump the
+ application reference count by one. We don't need 'te_lock' here
+ because this thread is still the only thread that can see the event.
+ */
+
+ nested_spinlock(&dm_token_lock);
+ tevp->te_msg.ev_sequence = dm_next_sequence++;
+ if (sync) {
+ tevp->te_msg.ev_token = dm_next_token++;
+ } else {
+ tevp->te_msg.ev_token = DM_INVALID_TOKEN;
+ }
+ nested_spinunlock(&dm_token_lock);
+
+ tevp->te_app_ref++;
+
+ /* Room exists on the sn_newq queue, so add this request. If the
+ queue was previously empty, wake up the first of any processes
+ that are waiting for an event.
+ */
+
+ dm_link_event(tevp, &s->sn_newq);
+ if (is_hashable)
+ hash_event(s, tevp);
+
+ if (s->sn_readercnt)
+ sv_signal(&s->sn_readerq);
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* Now that the message is queued, processes issuing asynchronous
+ events or DM_EVENT_UNMOUNT events are ready to continue.
+ */
+
+ if (!sync || is_unmount)
+ return 0;
+
+ /* Synchronous requests wait until a final reply is received. If the
+ caller supplied the DM_FLAGS_NDELAY flag, the process will return
+ EAGAIN if dm_pending() sets DM_TEF_INTERMED. We also let users
+ Cntl-C out of a read, write, and truncate requests.
+ */
+
+wait_on_tevp:
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ while (!(tevp->te_flags & DM_TEF_FINAL)) {
+ if ((tevp->te_flags & DM_TEF_INTERMED) &&
+ (flags & DM_FLAGS_NDELAY)) {
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return -EAGAIN;
+ }
+ if (tevp->te_msg.ev_type == DM_EVENT_READ ||
+ tevp->te_msg.ev_type == DM_EVENT_WRITE ||
+ tevp->te_msg.ev_type == DM_EVENT_TRUNCATE) {
+ sv_wait_sig(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
+ if (signal_pending(current)){
+ return -EINTR;
+ }
+ } else {
+ sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
+ }
+ lc = mutex_spinlock(&tevp->te_lock);
+#ifdef HAVE_DM_QUEUE_FLUSH
+ /* Did we pop out because of queue flushing? */
+ if (tevp->te_flags & DM_TEF_FLUSH) {
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return tevp->te_reply;
+ }
+#endif /* HAVE_DM_QUEUE_FLUSH */
+ }
+
+ /* Return both the tevp and the reply which was stored in the tevp by
+ dm_respond_event. The tevp structure has already been removed from
+ the reply queue by this point in dm_respond_event().
+ */
+
+ reply = tevp->te_reply;
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return reply;
+}
+
+
+/* The filesystem is guaranteed to stay mounted while this event is
+ outstanding.
+*/
+
+int
+dm_enqueue_normal_event(
+ struct super_block *sb,
+ dm_tokevent_t **tevpp,
+ int flags)
+{
+ dm_session_t *s;
+ int error;
+ int sync;
+ unsigned long lc; /* lock cookie */
+
+ switch ((*tevpp)->te_msg.ev_type) {
+ case DM_EVENT_READ:
+ case DM_EVENT_WRITE:
+ case DM_EVENT_TRUNCATE:
+ case DM_EVENT_PREUNMOUNT:
+ case DM_EVENT_UNMOUNT:
+ case DM_EVENT_NOSPACE:
+ case DM_EVENT_CREATE:
+ case DM_EVENT_REMOVE:
+ case DM_EVENT_RENAME:
+ case DM_EVENT_SYMLINK:
+ case DM_EVENT_LINK:
+ case DM_EVENT_DEBUT: /* not currently supported */
+ sync = 1;
+ break;
+
+ case DM_EVENT_DESTROY:
+ case DM_EVENT_POSTCREATE:
+ case DM_EVENT_POSTREMOVE:
+ case DM_EVENT_POSTRENAME:
+ case DM_EVENT_POSTSYMLINK:
+ case DM_EVENT_POSTLINK:
+ case DM_EVENT_ATTRIBUTE:
+ case DM_EVENT_CLOSE: /* not currently supported */
+ case DM_EVENT_CANCEL: /* not currently supported */
+ sync = 0;
+ break;
+
+ default:
+ return(-EIO); /* garbage event number */
+ }
+
+ /* Wait until a session selects disposition for the event. The session
+ is locked upon return from dm_waitfor_disp_session().
+ */
+
+ if ((error = dm_waitfor_disp_session(sb, *tevpp, &s, &lc)) != 0)
+ return(error);
+
+ return(dm_enqueue(s, lc, tevpp, sync, flags, 0));
+}
+
+
+/* Traverse the session list checking for sessions with the WANTMOUNT flag
+ set. When one is found, send it the message. Possible responses to the
+ message are one of DONTCARE, CONTINUE, or ABORT. The action taken in each
+ case is:
+ DONTCARE (-1) - Send the event to the next session with WANTMOUNT set
+ CONTINUE ( 0) - Proceed with the mount, errno zero.
+ ABORT (>0) - Fail the mount, return the returned errno.
+
+ The mount request is sent to sessions in ascending session ID order.
+ Since the session list can change dramatically while this process is
+ sleeping in dm_enqueue(), this routine must use session IDs rather than
+ session pointers when keeping track of where it is in the list. Since
+ new sessions are always added at the end of the queue, and have increasing
+ session ID values, we don't have to worry about missing any session.
+*/
+
+int
+dm_enqueue_mount_event(
+ struct super_block *sb,
+ dm_tokevent_t *tevp)
+{
+ dm_session_t *s;
+ dm_sessid_t sid;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Make the mounting filesystem visible to other DMAPI calls. */
+
+ if ((error = dm_add_fsys_entry(sb, tevp)) != 0){
+ return(error);
+ }
+
+ /* Walk through the session list presenting the mount event to each
+ session that is interested until a session accepts or rejects it,
+ or until all sessions ignore it.
+ */
+
+ for (sid = DM_NO_SESSION, error = 1; error > 0; sid = s->sn_sessid) {
+
+ lc = mutex_spinlock(&dm_session_lock);
+ for (s = dm_sessions; s; s = s->sn_next) {
+ if (s->sn_sessid > sid && s->sn_flags & DM_SN_WANTMOUNT) {
+ nested_spinlock(&s->sn_qlock);
+ nested_spinunlock(&dm_session_lock);
+ break;
+ }
+ }
+ if (s == NULL) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ break; /* noone wants it; proceed with mount */
+ }
+ error = dm_enqueue(s, lc, &tevp, 1, 0, 0);
+ }
+
+ /* If the mount will be allowed to complete, then update the fsrp entry
+ accordingly. If the mount is to be aborted, remove the fsrp entry.
+ */
+
+ if (error >= 0) {
+ dm_change_fsys_entry(sb, DM_STATE_MOUNTED);
+ error = 0;
+ } else {
+ dm_remove_fsys_entry(sb);
+ }
+ return(error);
+}
+
+int
+dm_enqueue_sendmsg_event(
+ dm_sessid_t targetsid,
+ dm_tokevent_t *tevp,
+ int sync)
+{
+ dm_session_t *s;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_find_session_and_lock(targetsid, &s, &lc)) != 0)
+ return(error);
+
+ return(dm_enqueue(s, lc, &tevp, sync, 0, 1));
+}
+
+
+dm_token_t
+dm_enqueue_user_event(
+ dm_sessid_t sid,
+ dm_tokevent_t *tevp,
+ dm_token_t *tokenp)
+{
+ dm_session_t *s;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Atomically find and lock the session whose session id is 'sid'. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+
+ /* Assign a sequence number and token to the event, bump the
+ application reference count by one, and decrement the event
+ count because the caller gives up all ownership of the event.
+ We don't need 'te_lock' here because this thread is still the
+ only thread that can see the event.
+ */
+
+ nested_spinlock(&dm_token_lock);
+ tevp->te_msg.ev_sequence = dm_next_sequence++;
+ *tokenp = tevp->te_msg.ev_token = dm_next_token++;
+ nested_spinunlock(&dm_token_lock);
+
+ tevp->te_flags &= ~(DM_TEF_INTERMED|DM_TEF_FINAL);
+ tevp->te_app_ref++;
+ tevp->te_evt_ref--;
+
+ /* Add the request to the tail of the sn_delq. Now it's visible. */
+
+ dm_link_event(tevp, &s->sn_delq);
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ return(0);
+}
+
+#ifdef HAVE_DM_QUEUE_FLUSH
+/* If inode is non-null, find any tdp referencing that inode and flush the
+ * thread waiting on that inode and set DM_TEF_FLUSH for that tokevent.
+ * Otherwise, if inode is null, find any tdp referencing the specified fsid
+ * and flush that thread and set DM_TEF_FLUSH for that tokevent.
+ */
+static int
+dm_flush_events(
+ dm_session_t *s,
+ dm_fsid_t *fsidp,
+ struct inode *inode, /* may be null */
+ dm_eventq_t *queue,
+ int is_writerq,
+ int errno)
+{
+ dm_tokevent_t *tevp, *next_tevp;
+ dm_tokdata_t *tdp;
+ int found_events = 0;
+
+ ASSERT(fsidp);
+ for (tevp = queue->eq_head; tevp; tevp = next_tevp) {
+ nested_spinlock(&tevp->te_lock);
+ next_tevp = tevp->te_next;
+
+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
+ if( inode ) {
+ if( tdp->td_ip == inode ) {
+ break;
+ }
+ }
+ else if(memcmp(fsidp, &tdp->td_handle.ha_fsid, sizeof(*fsidp)) == 0) {
+ break;
+ }
+ }
+
+ if (tdp != NULL) {
+ /* found a handle reference in this event */
+ ++found_events;
+ tevp->te_flags |= DM_TEF_FLUSH;
+
+ /* Set the reply value, unless dm_get_events is
+ already on this one.
+ */
+ if (! (tevp->te_flags & DM_TEF_LOCKED))
+ tevp->te_reply = errno;
+
+ /* If it is on the sn_evt_writerq or is being
+ used by dm_get_events then we're done with it.
+ */
+ if (is_writerq || (tevp->te_flags & DM_TEF_LOCKED)) {
+ nested_spinunlock(&tevp->te_lock);
+ continue;
+ }
+
+ /* If there is a thread waiting on a synchronous
+ event then be like dm_respond_event.
+ */
+
+ if ((tevp->te_evt_ref) &&
+ (tevp->te_msg.ev_token != DM_INVALID_TOKEN)) {
+
+ tevp->te_flags |= DM_TEF_FINAL;
+ dm_unlink_event(tevp, queue);
+ if (tevp->te_flags & DM_TEF_HASHED)
+ unhash_event(s, tevp);
+ sv_broadcast(&tevp->te_evt_queue);
+ nested_spinunlock(&tevp->te_lock);
+ dm_put_tevp(tevp, NULL);
+ continue;
+ }
+ }
+ nested_spinunlock(&tevp->te_lock);
+ }
+
+ return(found_events);
+}
+
+
+/* If inode is non-null then find any threads that have a reference to that
+ * inode and flush them with the specified errno.
+ * Otherwise,if inode is null, then find any threads that have a reference
+ * to that sb and flush them with the specified errno.
+ * We look for these threads in each session's sn_evt_writerq, sn_newq,
+ * and sn_delq.
+ */
+int
+dm_release_threads(
+ struct super_block *sb,
+ struct inode *inode, /* may be null */
+ int errno)
+{
+ dm_sessid_t sid;
+ dm_session_t *s;
+ unsigned long lc;
+ u_int sesscnt;
+ dm_sessid_t *sidlist;
+ int i;
+ int found_events = 0;
+ dm_fsid_t fsid;
+ struct filesystem_dmapi_operations *dops;
+
+ ASSERT(sb);
+ dops = dm_fsys_ops(sb);
+ ASSERT(dops);
+ dops->get_fsid(sb, &fsid);
+ dm_release_disp_threads(&fsid, inode, errno);
+
+ /* Loop until we can get the right amount of temp space, being careful
+ not to hold a mutex during the allocation. Usually only one trip.
+ */
+
+ for (;;) {
+ lc = mutex_spinlock(&dm_session_lock);
+ sesscnt = dm_sessions_active;
+ mutex_spinunlock(&dm_session_lock, lc);
+
+ if (sesscnt == 0)
+ return 0;
+
+ sidlist = kmalloc(sesscnt * sizeof(sid), GFP_KERNEL);
+
+ lc = mutex_spinlock(&dm_session_lock);
+ if (sesscnt == dm_sessions_active)
+ break;
+
+ mutex_spinunlock(&dm_session_lock, lc);
+ kfree(sidlist);
+ }
+
+ for (i = 0, s = dm_sessions; i < sesscnt; i++, s = s->sn_next)
+ sidlist[i] = s->sn_sessid;
+
+ mutex_spinunlock(&dm_session_lock, lc);
+
+
+ for (i = 0; i < sesscnt; i++) {
+ sid = sidlist[i];
+ if( dm_find_session_and_lock( sid, &s, &lc ) == 0 ){
+ found_events = dm_flush_events( s, &fsid, inode,
+ &s->sn_evt_writerq, 1,
+ errno );
+ if (found_events)
+ sv_broadcast(&s->sn_writerq);
+
+ dm_flush_events(s, &fsid, inode, &s->sn_newq, 0, errno);
+ dm_flush_events(s, &fsid, inode, &s->sn_delq, 0, errno);
+
+ mutex_spinunlock( &s->sn_qlock, lc );
+ }
+ }
+ kfree(sidlist);
+
+ return 0;
+}
+#endif /* HAVE_DM_QUEUE_FLUSH */
--- /dev/null
+++ b/fs/dmapi/dmapi_sysent.c
@@ -0,0 +1,801 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/* Data Migration API (DMAPI)
+ */
+
+
+/* We're using MISC_MAJOR / MISC_DYNAMIC_MINOR. */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+
+#include "dmapi.h"
+#include "dmapi_kern.h"
+#include "dmapi_private.h"
+
+struct kmem_cache *dm_fsreg_cachep = NULL;
+struct kmem_cache *dm_tokdata_cachep = NULL;
+struct kmem_cache *dm_session_cachep = NULL;
+struct kmem_cache *dm_fsys_map_cachep = NULL;
+struct kmem_cache *dm_fsys_vptr_cachep = NULL;
+
+static int
+dmapi_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ sys_dmapi_args_t kargs;
+ sys_dmapi_args_t *uap = &kargs;
+ int error = 0;
+ int rvp = -ENOSYS;
+ int use_rvp = 0;
+
+ if (!capable(CAP_MKNOD))
+ return -EPERM;
+
+ if( copy_from_user( &kargs, (sys_dmapi_args_t __user *)arg,
+ sizeof(sys_dmapi_args_t) ) )
+ return -EFAULT;
+
+ unlock_kernel();
+
+ switch (cmd) {
+ case DM_CLEAR_INHERIT:
+ error = dm_clear_inherit(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t __user *) DM_Parg(uap,5));/* attrnamep */
+ break;
+ case DM_CREATE_BY_HANDLE:
+ error = dm_create_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* dirhanp */
+ (size_t) DM_Uarg(uap,3), /* dirhlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (void __user *) DM_Parg(uap,5), /* hanp */
+ (size_t) DM_Uarg(uap,6), /* hlen */
+ (char __user *) DM_Parg(uap,7));/* cname */
+ break;
+ case DM_CREATE_SESSION:
+ error = dm_create_session(
+ (dm_sessid_t) DM_Uarg(uap,1), /* oldsid */
+ (char __user *) DM_Parg(uap,2), /* sessinfop */
+ (dm_sessid_t __user *) DM_Parg(uap,3));/* newsidp */
+ break;
+ case DM_CREATE_USEREVENT:
+ error = dm_create_userevent(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (size_t) DM_Uarg(uap,2), /* msglen */
+ (void __user *) DM_Parg(uap,3), /* msgdatap */
+ (dm_token_t __user *) DM_Parg(uap,4));/* tokenp */
+ break;
+ case DM_DESTROY_SESSION:
+ error = dm_destroy_session(
+ (dm_sessid_t) DM_Uarg(uap,1));/* sid */
+ break;
+ case DM_DOWNGRADE_RIGHT:
+ error = dm_downgrade_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_FD_TO_HANDLE:
+ error = dm_fd_to_hdl(
+ (int) DM_Uarg(uap,1), /* fd */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t __user *) DM_Parg(uap,3));/* hlenp */
+ break;
+ case DM_FIND_EVENTMSG:
+ error = dm_find_eventmsg(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (size_t) DM_Uarg(uap,3), /* buflen */
+ (void __user *) DM_Parg(uap,4), /* bufp */
+ (size_t __user *) DM_Parg(uap,5));/* rlenp */
+ break;
+ case DM_GET_ALLOCINFO:
+ use_rvp = 1;
+ error = dm_get_allocinfo_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t __user *) DM_Parg(uap,5), /* offp */
+ (u_int) DM_Uarg(uap,6), /* nelem */
+ (dm_extent_t __user *) DM_Parg(uap,7), /* extentp */
+ (u_int __user *) DM_Parg(uap,8), /* nelemp */
+ &rvp);
+ break;
+ case DM_GET_BULKALL:
+ use_rvp = 1;
+ error = dm_get_bulkall_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_attrname_t __user *) DM_Parg(uap,6),/* attrnamep */
+ (dm_attrloc_t __user *) DM_Parg(uap,7),/* locp */
+ (size_t) DM_Uarg(uap,8), /* buflen */
+ (void __user *) DM_Parg(uap,9), /* bufp */
+ (size_t __user *) DM_Parg(uap,10),/* rlenp */
+ &rvp);
+ break;
+ case DM_GET_BULKATTR:
+ use_rvp = 1;
+ error = dm_get_bulkattr_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_attrloc_t __user *)DM_Parg(uap,6), /* locp */
+ (size_t) DM_Uarg(uap,7), /* buflen */
+ (void __user *) DM_Parg(uap,8), /* bufp */
+ (size_t __user *) DM_Parg(uap,9), /* rlenp */
+ &rvp);
+ break;
+ case DM_GET_CONFIG:
+ error = dm_get_config(
+ (void __user *) DM_Parg(uap,1), /* hanp */
+ (size_t) DM_Uarg(uap,2), /* hlen */
+ (dm_config_t) DM_Uarg(uap,3), /* flagname */
+ (dm_size_t __user *)DM_Parg(uap,4));/* retvalp */
+ break;
+ case DM_GET_CONFIG_EVENTS:
+ error = dm_get_config_events(
+ (void __user *) DM_Parg(uap,1), /* hanp */
+ (size_t) DM_Uarg(uap,2), /* hlen */
+ (u_int) DM_Uarg(uap,3), /* nelem */
+ (dm_eventset_t __user *) DM_Parg(uap,4),/* eventsetp */
+ (u_int __user *) DM_Parg(uap,5));/* nelemp */
+ break;
+ case DM_GET_DIOINFO:
+ error = dm_get_dioinfo(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_dioinfo_t __user *)DM_Parg(uap,5));/* diop */
+ break;
+ case DM_GET_DIRATTRS:
+ use_rvp = 1;
+ error = dm_get_dirattrs_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_attrloc_t __user *)DM_Parg(uap,6), /* locp */
+ (size_t) DM_Uarg(uap,7), /* buflen */
+ (void __user *) DM_Parg(uap,8), /* bufp */
+ (size_t __user *) DM_Parg(uap,9), /* rlenp */
+ &rvp);
+ break;
+ case DM_GET_DMATTR:
+ error = dm_get_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */
+ (size_t) DM_Uarg(uap,6), /* buflen */
+ (void __user *) DM_Parg(uap,7), /* bufp */
+ (size_t __user *) DM_Parg(uap,8));/* rlenp */
+
+ break;
+ case DM_GET_EVENTLIST:
+ error = dm_get_eventlist(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_eventset_t __user *) DM_Parg(uap,6),/* eventsetp */
+ (u_int __user *) DM_Parg(uap,7));/* nelemp */
+ break;
+ case DM_GET_EVENTS:
+ error = dm_get_events(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (u_int) DM_Uarg(uap,2), /* maxmsgs */
+ (u_int) DM_Uarg(uap,3), /* flags */
+ (size_t) DM_Uarg(uap,4), /* buflen */
+ (void __user *) DM_Parg(uap,5), /* bufp */
+ (size_t __user *) DM_Parg(uap,6));/* rlenp */
+ break;
+ case DM_GET_FILEATTR:
+ error = dm_get_fileattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_stat_t __user *) DM_Parg(uap,6));/* statp */
+ break;
+ case DM_GET_MOUNTINFO:
+ error = dm_get_mountinfo(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (size_t) DM_Uarg(uap,5), /* buflen */
+ (void __user *) DM_Parg(uap,6), /* bufp */
+ (size_t __user *) DM_Parg(uap,7));/* rlenp */
+ break;
+ case DM_GET_REGION:
+ error = dm_get_region(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_region_t __user *) DM_Parg(uap,6), /* regbufp */
+ (u_int __user *) DM_Parg(uap,7));/* nelemp */
+ break;
+ case DM_GETALL_DISP:
+ error = dm_getall_disp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (size_t) DM_Uarg(uap,2), /* buflen */
+ (void __user *) DM_Parg(uap,3), /* bufp */
+ (size_t __user *) DM_Parg(uap,4));/* rlenp */
+ break;
+ case DM_GETALL_DMATTR:
+ error = dm_getall_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (size_t) DM_Uarg(uap,5), /* buflen */
+ (void __user *) DM_Parg(uap,6), /* bufp */
+ (size_t __user *) DM_Parg(uap,7));/* rlenp */
+ break;
+ case DM_GETALL_INHERIT:
+ error = dm_getall_inherit(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_inherit_t __user *)DM_Parg(uap,6), /* inheritbufp*/
+ (u_int __user *) DM_Parg(uap,7));/* nelemp */
+ break;
+ case DM_GETALL_SESSIONS:
+ error = dm_getall_sessions(
+ (u_int) DM_Uarg(uap,1), /* nelem */
+ (dm_sessid_t __user *) DM_Parg(uap,2), /* sidbufp */
+ (u_int __user *) DM_Parg(uap,3));/* nelemp */
+ break;
+ case DM_GETALL_TOKENS:
+ error = dm_getall_tokens(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (u_int) DM_Uarg(uap,2), /* nelem */
+ (dm_token_t __user *) DM_Parg(uap,3), /* tokenbufp */
+ (u_int __user *) DM_Parg(uap,4));/* nelemp */
+ break;
+ case DM_INIT_ATTRLOC:
+ error = dm_init_attrloc(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrloc_t __user *) DM_Parg(uap,5));/* locp */
+ break;
+ case DM_MKDIR_BY_HANDLE:
+ error = dm_mkdir_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* dirhanp */
+ (size_t) DM_Uarg(uap,3), /* dirhlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (void __user *) DM_Parg(uap,5), /* hanp */
+ (size_t) DM_Uarg(uap,6), /* hlen */
+ (char __user *) DM_Parg(uap,7));/* cname */
+ break;
+ case DM_MOVE_EVENT:
+ error = dm_move_event(
+ (dm_sessid_t) DM_Uarg(uap,1), /* srcsid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (dm_sessid_t) DM_Uarg(uap,3), /* targetsid */
+ (dm_token_t __user *) DM_Parg(uap,4));/* rtokenp */
+ break;
+ case DM_OBJ_REF_HOLD:
+ error = dm_obj_ref_hold(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (void __user *) DM_Parg(uap,3), /* hanp */
+ (size_t) DM_Uarg(uap,4));/* hlen */
+ break;
+ case DM_OBJ_REF_QUERY:
+ use_rvp = 1;
+ error = dm_obj_ref_query_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (void __user *) DM_Parg(uap,3), /* hanp */
+ (size_t) DM_Uarg(uap,4), /* hlen */
+ &rvp);
+ break;
+ case DM_OBJ_REF_RELE:
+ error = dm_obj_ref_rele(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (void __user *) DM_Parg(uap,3), /* hanp */
+ (size_t) DM_Uarg(uap,4));/* hlen */
+ break;
+ case DM_PATH_TO_FSHANDLE:
+ error = dm_path_to_fshdl(
+ (char __user *) DM_Parg(uap,1), /* path */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t __user *) DM_Parg(uap,3));/* hlenp */
+ break;
+ case DM_PATH_TO_HANDLE:
+ error = dm_path_to_hdl(
+ (char __user *) DM_Parg(uap,1), /* path */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t __user *) DM_Parg(uap,3));/* hlenp */
+ break;
+ case DM_PENDING:
+ error = dm_pending(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (dm_timestruct_t __user *) DM_Parg(uap,3));/* delay */
+ break;
+ case DM_PROBE_HOLE:
+ error = dm_probe_hole(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t) DM_Uarg(uap,5), /* off */
+ (dm_size_t) DM_Uarg(uap,6), /* len */
+ (dm_off_t __user *) DM_Parg(uap,7), /* roffp */
+ (dm_size_t __user *) DM_Parg(uap,8));/* rlenp */
+ break;
+ case DM_PUNCH_HOLE:
+ error = dm_punch_hole(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t) DM_Uarg(uap,5), /* off */
+ (dm_size_t) DM_Uarg(uap,6));/* len */
+ break;
+ case DM_QUERY_RIGHT:
+ error = dm_query_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_right_t __user *) DM_Parg(uap,5));/* rightp */
+ break;
+ case DM_QUERY_SESSION:
+ error = dm_query_session(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (size_t) DM_Uarg(uap,2), /* buflen */
+ (void __user *) DM_Parg(uap,3), /* bufp */
+ (size_t __user *) DM_Parg(uap,4));/* rlenp */
+ break;
+ case DM_READ_INVIS:
+ use_rvp = 1;
+ error = dm_read_invis_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t) DM_Uarg(uap,5), /* off */
+ (dm_size_t) DM_Uarg(uap,6), /* len */
+ (void __user *) DM_Parg(uap,7), /* bufp */
+ &rvp);
+ break;
+ case DM_RELEASE_RIGHT:
+ error = dm_release_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_REMOVE_DMATTR:
+ error = dm_remove_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (int) DM_Uarg(uap,5), /* setdtime */
+ (dm_attrname_t __user *) DM_Parg(uap,6));/* attrnamep */
+ break;
+ case DM_REQUEST_RIGHT:
+ error = dm_request_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* flags */
+ (dm_right_t) DM_Uarg(uap,6));/* right */
+ break;
+ case DM_RESPOND_EVENT:
+ error = dm_respond_event(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (dm_response_t) DM_Uarg(uap,3), /* response */
+ (int) DM_Uarg(uap,4), /* reterror */
+ (size_t) DM_Uarg(uap,5), /* buflen */
+ (void __user *) DM_Parg(uap,6));/* respbufp */
+ break;
+ case DM_SEND_MSG:
+ error = dm_send_msg(
+ (dm_sessid_t) DM_Uarg(uap,1), /* targetsid */
+ (dm_msgtype_t) DM_Uarg(uap,2), /* msgtype */
+ (size_t) DM_Uarg(uap,3), /* buflen */
+ (void __user *) DM_Parg(uap,4));/* bufp */
+ break;
+ case DM_SET_DISP:
+ error = dm_set_disp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_eventset_t __user *) DM_Parg(uap,5),/* eventsetp */
+ (u_int) DM_Uarg(uap,6));/* maxevent */
+ break;
+ case DM_SET_DMATTR:
+ error = dm_set_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */
+ (int) DM_Uarg(uap,6), /* setdtime */
+ (size_t) DM_Uarg(uap,7), /* buflen */
+ (void __user *) DM_Parg(uap,8));/* bufp */
+ break;
+ case DM_SET_EVENTLIST:
+ error = dm_set_eventlist(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_eventset_t __user *) DM_Parg(uap,5),/* eventsetp */
+ (u_int) DM_Uarg(uap,6));/* maxevent */
+ break;
+ case DM_SET_FILEATTR:
+ error = dm_set_fileattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_fileattr_t __user *)DM_Parg(uap,6));/* attrp */
+ break;
+ case DM_SET_INHERIT:
+ error = dm_set_inherit(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t __user *)DM_Parg(uap,5),/* attrnamep */
+ (mode_t) DM_Uarg(uap,6));/* mode */
+ break;
+ case DM_SET_REGION:
+ error = dm_set_region(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_region_t __user *) DM_Parg(uap,6), /* regbufp */
+ (dm_boolean_t __user *) DM_Parg(uap,7));/* exactflagp */
+ break;
+ case DM_SET_RETURN_ON_DESTROY:
+ error = dm_set_return_on_destroy(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */
+ (dm_boolean_t) DM_Uarg(uap,6));/* enable */
+ break;
+ case DM_SYMLINK_BY_HANDLE:
+ error = dm_symlink_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* dirhanp */
+ (size_t) DM_Uarg(uap,3), /* dirhlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (void __user *) DM_Parg(uap,5), /* hanp */
+ (size_t) DM_Uarg(uap,6), /* hlen */
+ (char __user *) DM_Parg(uap,7), /* cname */
+ (char __user *) DM_Parg(uap,8));/* path */
+ break;
+ case DM_SYNC_BY_HANDLE:
+ error = dm_sync_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_UPGRADE_RIGHT:
+ error = dm_upgrade_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_WRITE_INVIS:
+ use_rvp = 1;
+ error = dm_write_invis_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (int) DM_Uarg(uap,5), /* flags */
+ (dm_off_t) DM_Uarg(uap,6), /* off */
+ (dm_size_t) DM_Uarg(uap,7), /* len */
+ (void __user *) DM_Parg(uap,8), /* bufp */
+ &rvp);
+ break;
+ case DM_OPEN_BY_HANDLE:
+ use_rvp = 1;
+ error = dm_open_by_handle_rvp(
+ (unsigned int) DM_Uarg(uap,1), /* fd */
+ (void __user *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (int) DM_Uarg(uap,4), /* flags */
+ &rvp);
+ break;
+ default:
+ error = -ENOSYS;
+ break;
+ }
+
+ lock_kernel();
+
+ /* If it was an *_rvp() function, then
+ if error==0, return |rvp|
+ */
+ if( use_rvp && (error == 0) )
+ return rvp;
+ else
+ return error;
+}
+
+
+
+static int
+dmapi_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+
+static int
+dmapi_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+
+/* say hello, and let me know the device is hooked up */
+static ssize_t
+dmapi_dump(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ char tmp[50];
+ int len;
+ if( *ppos == 0 ){
+ len = sprintf( tmp, "# " DM_VER_STR_CONTENTS "\n" );
+ if( copy_to_user(buf, tmp, len) )
+ return -EFAULT;
+ *ppos += 1;
+ return len;
+ }
+ return 0;
+}
+
+static struct file_operations dmapi_fops = {
+ .open = dmapi_open,
+ .ioctl = dmapi_ioctl,
+ .read = dmapi_dump,
+ .release = dmapi_release
+};
+
+static struct miscdevice dmapi_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dmapi",
+ .fops = &dmapi_fops
+};
+
+
+
+#ifdef CONFIG_PROC_FS
+static int
+dmapi_summary(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len;
+
+ extern u_int dm_sessions_active;
+ extern dm_sessid_t dm_next_sessid;
+ extern dm_token_t dm_next_token;
+ extern dm_sequence_t dm_next_sequence;
+ extern int dm_fsys_cnt;
+
+#define CHKFULL if(len >= count) break;
+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
+
+ len=0;
+ while(1){
+ ADDBUF("dm_sessions_active=%u\n", dm_sessions_active);
+ ADDBUF("dm_next_sessid=%d\n", (int)dm_next_sessid);
+ ADDBUF("dm_next_token=%d\n", (int)dm_next_token);
+ ADDBUF("dm_next_sequence=%u\n", (u_int)dm_next_sequence);
+ ADDBUF("dm_fsys_cnt=%d\n", dm_fsys_cnt);
+
+ break;
+ }
+
+ if (offset >= len) {
+ *start = buffer;
+ *eof = 1;
+ return 0;
+ }
+ *start = buffer + offset;
+ if ((len -= offset) > count)
+ return count;
+ *eof = 1;
+
+ return len;
+}
+#endif
+
+
+static void __init
+dmapi_init_procfs(int dmapi_minor)
+{
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *entry;
+
+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS, NULL)) == NULL )
+ return;
+ entry->mode = S_IFDIR | S_IRUSR | S_IXUSR;
+
+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/fsreg", NULL)) == NULL )
+ return;
+
+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/sessions", NULL)) == NULL )
+ return;
+
+ entry = create_proc_read_entry( DMAPI_DBG_PROCFS "/summary",
+ 0, NULL, dmapi_summary, NULL);
+#endif
+}
+
+#if 0
+static void __exit
+dmapi_cleanup_procfs(void)
+{
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry( DMAPI_DBG_PROCFS "/summary", NULL);
+ remove_proc_entry( DMAPI_DBG_PROCFS "/fsreg", NULL);
+ remove_proc_entry( DMAPI_DBG_PROCFS "/sessions", NULL);
+ remove_proc_entry( DMAPI_DBG_PROCFS, NULL);
+#endif
+}
+#endif
+
+int __init dmapi_init(void)
+{
+ int ret;
+
+ dm_tokdata_cachep = kmem_cache_create("dm_tokdata",
+ sizeof(struct dm_tokdata), 0, 0, NULL);
+ if (dm_tokdata_cachep == NULL)
+ goto out;
+
+ dm_fsreg_cachep = kmem_cache_create("dm_fsreg",
+ sizeof(struct dm_fsreg), 0, 0, NULL);
+ if (dm_fsreg_cachep == NULL)
+ goto out_free_tokdata_cachep;
+
+ dm_session_cachep = kmem_cache_create("dm_session",
+ sizeof(struct dm_session), 0, 0, NULL);
+ if (dm_session_cachep == NULL)
+ goto out_free_fsreg_cachep;
+
+ dm_fsys_map_cachep = kmem_cache_create("dm_fsys_map",
+ sizeof(dm_vector_map_t), 0, 0, NULL);
+ if (dm_fsys_map_cachep == NULL)
+ goto out_free_session_cachep;
+ dm_fsys_vptr_cachep = kmem_cache_create("dm_fsys_vptr",
+ sizeof(dm_fsys_vector_t), 0, 0, NULL);
+ if (dm_fsys_vptr_cachep == NULL)
+ goto out_free_fsys_map_cachep;
+
+ ret = misc_register(&dmapi_dev);
+ if (ret) {
+ printk(KERN_ERR "dmapi_init: misc_register returned %d\n", ret);
+ goto out_free_fsys_vptr_cachep;
+ }
+
+ dmapi_init_procfs(dmapi_dev.minor);
+ return 0;
+
+ out_free_fsys_vptr_cachep:
+ kmem_cache_destroy(dm_fsys_vptr_cachep);
+ out_free_fsys_map_cachep:
+ kmem_cache_destroy(dm_fsys_map_cachep);
+ out_free_session_cachep:
+ kmem_cache_destroy(dm_session_cachep);
+ out_free_fsreg_cachep:
+ kmem_cache_destroy(dm_fsreg_cachep);
+ out_free_tokdata_cachep:
+ kmem_cache_destroy(dm_tokdata_cachep);
+ out:
+ return -ENOMEM;
+}
+
+#if 0
+void __exit dmapi_uninit(void)
+{
+ misc_deregister(&dmapi_dev);
+ dmapi_cleanup_procfs();
+ kmem_cache_destroy(dm_tokdata_cachep);
+ kmem_cache_destroy(dm_fsreg_cachep);
+ kmem_cache_destroy(dm_session_cachep);
+ kmem_cache_destroy(dm_fsys_map_cachep);
+ kmem_cache_destroy(dm_fsys_vptr_cachep);
+}
+#endif
+
+module_init(dmapi_init);
+/*module_exit(dmapi_uninit);*/ /* Some other day */
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION("SGI Data Migration Subsystem");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(dm_send_mount_event);
+EXPORT_SYMBOL(dm_send_namesp_event);
+EXPORT_SYMBOL(dm_send_unmount_event);
+EXPORT_SYMBOL(dm_send_data_event);
+EXPORT_SYMBOL(dm_send_destroy_event);
+EXPORT_SYMBOL(dm_ip_to_handle);
+EXPORT_SYMBOL(dmapi_register);
+EXPORT_SYMBOL(dmapi_unregister);
+EXPORT_SYMBOL(dmapi_registered);
+EXPORT_SYMBOL(dm_release_threads);
--- /dev/null
+++ b/fs/dmapi/sv.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __DMAPI_SV_H__
+#define __DMAPI_SV_H__
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+/*
+ * Synchronisation variables.
+ *
+ * (Parameters "pri", "svf" and "rts" are not implemented)
+ */
+
+typedef struct sv_s {
+ wait_queue_head_t waiters;
+} sv_t;
+
+#define SV_FIFO 0x0 /* sv_t is FIFO type */
+#define SV_LIFO 0x2 /* sv_t is LIFO type */
+#define SV_PRIO 0x4 /* sv_t is PRIO type */
+#define SV_KEYED 0x6 /* sv_t is KEYED type */
+#define SV_DEFAULT SV_FIFO
+
+
+static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
+ unsigned long timeout)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue_exclusive(&sv->waiters, &wait);
+ __set_current_state(state);
+ spin_unlock(lock);
+
+ schedule_timeout(timeout);
+
+ remove_wait_queue(&sv->waiters, &wait);
+}
+
+#define init_sv(sv,type,name,flag) \
+ init_waitqueue_head(&(sv)->waiters)
+#define sv_init(sv,flag,name) \
+ init_waitqueue_head(&(sv)->waiters)
+#define sv_destroy(sv) \
+ /*NOTHING*/
+#define sv_wait(sv, pri, lock, s) \
+ _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_wait_sig(sv, pri, lock, s) \
+ _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
+ _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
+ _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_signal(sv) \
+ wake_up(&(sv)->waiters)
+#define sv_broadcast(sv) \
+ wake_up_all(&(sv)->waiters)
+
+#endif /* __DMAPI_SV_H__ */