From c74209a6da34b983b165949f454da68b4740aadd Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 24 May 2012 02:07:04 +0200 Subject: [PATCH 01/56] linux 3.4 base files Checksums, forward-port of netfront-in-dom0 patch, adopted config --- config-pvops | 526 +++++++++++------- linux-3.4.tar.bz2.sha1sum | 1 + linux-3.4.tar.sign | 11 + .../pvops-3.4-enable-netfront-in-dom0.patch | 24 + rel-pvops | 2 +- series-pvops.conf | 13 +- version-pvops | 2 +- 7 files changed, 373 insertions(+), 206 deletions(-) create mode 100644 linux-3.4.tar.bz2.sha1sum create mode 100644 linux-3.4.tar.sign create mode 100644 patches.xen/pvops-3.4-enable-netfront-in-dom0.patch diff --git a/config-pvops b/config-pvops index 566f0a8..dc71d06 100644 --- a/config-pvops +++ b/config-pvops @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86_64 3.2.7 Kernel Configuration +# Linux/x86_64 3.4.0 Kernel Configuration # CONFIG_64BIT=y # CONFIG_X86_32 is not set @@ -18,11 +18,9 @@ CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_HAVE_LATENCYTOP_SUPPORT=y CONFIG_MMU=y -CONFIG_ZONE_DMA=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_IOMAP=y CONFIG_GENERIC_BUG=y CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y CONFIG_GENERIC_HWEIGHT=y @@ -36,13 +34,13 @@ CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_ARCH_HAS_CPU_RELAX=y CONFIG_ARCH_HAS_DEFAULT_IDLE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CPU_AUTOPROBE=y CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_ZONE_DMA32=y -CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_AUDIT_ARCH=y CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y @@ -62,7 +60,7 @@ CONFIG_IRQ_WORK=y CONFIG_EXPERIMENTAL=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_CROSS_COMPILE="" -CONFIG_LOCALVERSION="-3.pvops.qubes.x86_64" +CONFIG_LOCALVERSION="-1.pvops.qubes.x86_64" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_HAVE_KERNEL_GZIP=y CONFIG_HAVE_KERNEL_BZIP2=y @@ -91,13 +89,13 @@ CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y CONFIG_AUDIT_WATCH=y CONFIG_AUDIT_TREE=y +CONFIG_AUDIT_LOGINUID_IMMUTABLE=y CONFIG_HAVE_GENERIC_HARDIRQS=y # # IRQ subsystem # CONFIG_GENERIC_HARDIRQS=y -CONFIG_HAVE_SPARSE_IRQ=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_IRQ_SHOW=y CONFIG_GENERIC_PENDING_IRQ=y @@ -110,7 +108,6 @@ CONFIG_SPARSE_IRQ=y # CONFIG_TREE_RCU=y # CONFIG_PREEMPT_RCU is not set -# CONFIG_RCU_TRACE is not set CONFIG_RCU_FANOUT=64 # CONFIG_RCU_FANOUT_EXACT is not set CONFIG_RCU_FAST_NO_HZ=y @@ -129,6 +126,7 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_RESOURCE_COUNTERS=y CONFIG_CGROUP_MEM_RES_CTLR=y # CONFIG_CGROUP_MEM_RES_CTLR_SWAP is not set +CONFIG_CGROUP_MEM_RES_CTLR_KMEM=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y @@ -136,6 +134,7 @@ CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_BLK_CGROUP=y # CONFIG_DEBUG_BLK_CGROUP is not set +# CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y @@ -189,11 +188,13 @@ CONFIG_PCI_QUIRKS=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y # CONFIG_SLUB is not set +# CONFIG_SLOB is not set CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y CONFIG_OPROFILE=m # CONFIG_OPROFILE_EVENT_MULTIPLEX is not set CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y CONFIG_KPROBES=y # CONFIG_JUMP_LABEL is not set CONFIG_OPTPROBES=y @@ -214,6 +215,9 @@ CONFIG_HAVE_USER_RETURN_NOTIFIER=y CONFIG_HAVE_PERF_EVENTS_NMI=y CONFIG_HAVE_ARCH_JUMP_LABEL=y CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y # # GCOV-based kernel profiling @@ -235,6 +239,29 @@ CONFIG_BLK_DEV_BSG=y CONFIG_BLK_DEV_BSGLIB=y CONFIG_BLK_DEV_INTEGRITY=y # CONFIG_BLK_DEV_THROTTLING is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +CONFIG_OSF_PARTITION=y +# CONFIG_AMIGA_PARTITION is not set +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_SYSV68_PARTITION=y CONFIG_BLOCK_COMPAT=y # @@ -255,7 +282,6 @@ CONFIG_PADATA=y # CONFIG_INLINE_SPIN_LOCK_BH is not set # CONFIG_INLINE_SPIN_LOCK_IRQ is not set # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set -CONFIG_INLINE_SPIN_UNLOCK=y # CONFIG_INLINE_SPIN_UNLOCK_BH is not set CONFIG_INLINE_SPIN_UNLOCK_IRQ=y # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set @@ -283,6 +309,7 @@ CONFIG_FREEZER=y # # Processor type and features # +CONFIG_ZONE_DMA=y CONFIG_TICK_ONESHOT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -301,7 +328,7 @@ CONFIG_XEN=y CONFIG_XEN_DOM0=y CONFIG_XEN_PRIVILEGED_GUEST=y CONFIG_XEN_PVHVM=y -CONFIG_XEN_MAX_DOMAIN_MEMORY=128 +CONFIG_XEN_MAX_DOMAIN_MEMORY=500 CONFIG_XEN_SAVE_RESTORE=y # CONFIG_XEN_DEBUG_FS is not set # CONFIG_KVM_CLOCK is not set @@ -319,8 +346,6 @@ CONFIG_NO_BOOTMEM=y CONFIG_GENERIC_CPU=y CONFIG_X86_INTERNODE_CACHE_SHIFT=6 CONFIG_X86_CMPXCHG=y -CONFIG_CMPXCHG_LOCAL=y -CONFIG_CMPXCHG_DOUBLE=y CONFIG_X86_L1_CACHE_SHIFT=6 CONFIG_X86_XADD=y CONFIG_X86_WP_WORKS_OK=y @@ -359,7 +384,7 @@ CONFIG_X86_THERMAL_VECTOR=y CONFIG_I8K=m CONFIG_MICROCODE=m CONFIG_MICROCODE_INTEL=y -# CONFIG_MICROCODE_AMD is not set +CONFIG_MICROCODE_AMD=y CONFIG_MICROCODE_OLD_INTERFACE=y CONFIG_X86_MSR=y CONFIG_X86_CPUID=m @@ -382,6 +407,8 @@ CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y CONFIG_SPARSEMEM_VMEMMAP=y CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_SPARSE=y CONFIG_MEMORY_HOTREMOVE=y @@ -461,8 +488,8 @@ CONFIG_ACPI_BUTTON=m CONFIG_ACPI_VIDEO=m CONFIG_ACPI_FAN=m CONFIG_ACPI_DOCK=y -CONFIG_ACPI_PROCESSOR=m -# CONFIG_ACPI_IPMI is not set +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PROCESSOR_AGGREGATOR=m CONFIG_ACPI_THERMAL=m @@ -473,11 +500,12 @@ CONFIG_ACPI_DEBUG=y # CONFIG_ACPI_DEBUG_FUNC_TRACE is not set CONFIG_ACPI_PCI_SLOT=m CONFIG_X86_PM_TIMER=y -CONFIG_ACPI_CONTAINER=m +CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_MEMORY=m CONFIG_ACPI_SBS=m # CONFIG_ACPI_HED is not set # CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=m # CONFIG_ACPI_APEI is not set CONFIG_SFI=y @@ -495,8 +523,8 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=m -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m # # x86 CPU frequency scaling drivers @@ -538,10 +566,14 @@ CONFIG_PCIEAER=y CONFIG_PCIEAER_INJECT=m CONFIG_PCIEASPM=y # CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set CONFIG_PCIE_PME=y CONFIG_ARCH_SUPPORTS_MSI=y CONFIG_PCI_MSI=y # CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set CONFIG_PCI_STUB=y CONFIG_XEN_PCIDEV_FRONTEND=y CONFIG_HT_IRQ=y @@ -585,11 +617,13 @@ CONFIG_HOTPLUG_PCI_SHPC=m # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y # CONFIG_HAVE_AOUT is not set CONFIG_BINFMT_MISC=m CONFIG_IA32_EMULATION=y CONFIG_IA32_AOUT=m +# CONFIG_X86_X32 is not set CONFIG_COMPAT=y CONFIG_COMPAT_FOR_U64_ALIGNMENT=y CONFIG_SYSVIPC_COMPAT=y @@ -603,6 +637,7 @@ CONFIG_COMPAT_NETLINK_MESSAGES=y # CONFIG_PACKET=m CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m CONFIG_XFRM=y CONFIG_XFRM_USER=m CONFIG_XFRM_SUB_POLICY=y @@ -642,6 +677,7 @@ CONFIG_INET_XFRM_MODE_BEET=m CONFIG_INET_LRO=y CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y @@ -692,13 +728,16 @@ CONFIG_BRIDGE_NETFILTER=y # Core Netfilter Configuration # CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_ACCT=m CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y # CONFIG_NF_CONNTRACK_TIMESTAMP is not set CONFIG_NF_CT_PROTO_DCCP=m CONFIG_NF_CT_PROTO_GRE=m @@ -716,6 +755,7 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XTABLES=m @@ -739,6 +779,7 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_HL=m # CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m @@ -765,6 +806,7 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m @@ -776,6 +818,7 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m @@ -835,6 +878,11 @@ CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + # # IPVS application helper # @@ -852,10 +900,10 @@ CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT=m CONFIG_NF_NAT_NEEDED=y @@ -897,9 +945,9 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m @@ -975,11 +1023,6 @@ CONFIG_NET_DSA=y CONFIG_NET_DSA_TAG_DSA=y CONFIG_NET_DSA_TAG_EDSA=y CONFIG_NET_DSA_TAG_TRAILER=y -CONFIG_NET_DSA_MV88E6XXX=y -CONFIG_NET_DSA_MV88E6060=y -CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_DECNET=m @@ -1024,6 +1067,7 @@ CONFIG_NET_SCH_MQPRIO=m CONFIG_NET_SCH_CHOKE=m CONFIG_NET_SCH_QFQ=m CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m # # Classification @@ -1064,9 +1108,12 @@ CONFIG_DCB=y CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_OPENVSWITCH=m CONFIG_RPS=y CONFIG_RFS_ACCEL=y CONFIG_XPS=y +CONFIG_NETPRIO_CGROUP=m +CONFIG_BQL=y CONFIG_HAVE_BPF_JIT=y CONFIG_BPF_JIT=y @@ -1112,19 +1159,26 @@ CONFIG_CAN_MCP251X=m CONFIG_CAN_JANZ_ICAN3=m # CONFIG_PCH_CAN is not set CONFIG_CAN_SJA1000=m +CONFIG_CAN_SJA1000_ISA=m CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_EMS_PCMCIA=m CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCMCIA=m CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y CONFIG_CAN_KVASER_PCI=m CONFIG_CAN_PLX_PCI=m # CONFIG_CAN_C_CAN is not set +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_ISA=m +CONFIG_CAN_CC770_PLATFORM=m # # CAN USB interfaces # CONFIG_CAN_EMS_USB=m # CONFIG_CAN_ESD_USB2 is not set +CONFIG_CAN_PEAK_USB=m # CONFIG_CAN_SOFTING is not set # CONFIG_CAN_DEBUG_DEVICES is not set CONFIG_IRDA=m @@ -1184,8 +1238,6 @@ CONFIG_VLSI_FIR=m CONFIG_VIA_FIR=m CONFIG_MCS_FIR=m CONFIG_BT=m -CONFIG_BT_L2CAP=y -CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -1266,6 +1318,7 @@ CONFIG_CEPH_LIB=m # CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set CONFIG_NFC=m CONFIG_NFC_NCI=m +# CONFIG_NFC_LLCP is not set # # Near Field Communication (NFC) devices @@ -1292,8 +1345,12 @@ CONFIG_EXTRA_FIRMWARE="" # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set CONFIG_SYS_HYPERVISOR=y +# CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_REGMAP=y CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_IRQ=y +CONFIG_DMA_SHARED_BUFFER=y CONFIG_CONNECTOR=y CONFIG_PROC_EVENTS=y CONFIG_MTD=m @@ -1405,6 +1462,8 @@ CONFIG_MTD_DOC2000=m CONFIG_MTD_DOC2001=m CONFIG_MTD_DOC2001PLUS=m CONFIG_MTD_DOCG3=m +CONFIG_BCH_CONST_M=14 +CONFIG_BCH_CONST_T=4 CONFIG_MTD_DOCPROBE=m CONFIG_MTD_DOCECC=m CONFIG_MTD_DOCPROBE_ADVANCED=y @@ -1426,6 +1485,7 @@ CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0 CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH=y CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y +CONFIG_MTD_NAND_DOCG4=m CONFIG_MTD_NAND_CAFE=m CONFIG_MTD_NAND_NANDSIM=m CONFIG_MTD_NAND_PLATFORM=m @@ -1495,6 +1555,7 @@ CONFIG_PARIDE_KBIC=m CONFIG_PARIDE_KTTI=m CONFIG_PARIDE_ON20=m CONFIG_PARIDE_ON26=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m CONFIG_BLK_CPQ_DA=m CONFIG_BLK_CPQ_CISS_DA=m CONFIG_CISS_SCSI_TAPE=y @@ -1507,6 +1568,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m # CONFIG_DRBD_FAULT_INJECTION is not set CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_NVME=m CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_SX8=m # CONFIG_BLK_DEV_UB is not set @@ -1523,8 +1585,11 @@ CONFIG_XEN_BLKDEV_BACKEND=m CONFIG_VIRTIO_BLK=m # CONFIG_BLK_DEV_HD is not set # CONFIG_BLK_DEV_RBD is not set + +# +# Misc devices +# CONFIG_SENSORS_LIS3LV02D=m -CONFIG_MISC_DEVICES=y CONFIG_AD525X_DPOT=m # CONFIG_AD525X_DPOT_I2C is not set # CONFIG_AD525X_DPOT_SPI is not set @@ -1671,6 +1736,7 @@ CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT2SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 # CONFIG_SCSI_MPT2SAS_LOGGING is not set +CONFIG_SCSI_UFSHCD=m CONFIG_SCSI_HPTIOP=m CONFIG_SCSI_BUSLOGIC=m # CONFIG_VMWARE_PVSCSI is not set @@ -1714,6 +1780,7 @@ CONFIG_SCSI_PMCRAID=m CONFIG_SCSI_PM8001=m CONFIG_SCSI_SRP=m CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_LOWLEVEL_PCMCIA=y CONFIG_PCMCIA_AHA152X=m CONFIG_PCMCIA_FDOMAIN=m @@ -1855,6 +1922,7 @@ CONFIG_DM_MULTIPATH_ST=m CONFIG_DM_DELAY=m CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m # CONFIG_TARGET_CORE is not set CONFIG_FUSION=y CONFIG_FUSION_SPI=m @@ -1870,7 +1938,6 @@ CONFIG_FUSION_LAN=m # CONFIG_FIREWIRE=m CONFIG_FIREWIRE_OHCI=m -CONFIG_FIREWIRE_OHCI_DEBUG=y CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m # CONFIG_FIREWIRE_NOSY is not set @@ -1884,7 +1951,8 @@ CONFIG_I2O_BUS=m CONFIG_I2O_BLOCK=m CONFIG_I2O_SCSI=m CONFIG_I2O_PROC=m -# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=m CONFIG_NETDEVICES=y CONFIG_NET_CORE=y CONFIG_BONDING=m @@ -1895,6 +1963,9 @@ CONFIG_MII=y CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKEHARD=m CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_NETCONSOLE=m @@ -1957,6 +2028,15 @@ CONFIG_ATM_SOLOS=m # # CAIF transport drivers # + +# +# Distributed Switch Architecture drivers +# +CONFIG_NET_DSA_MV88E6XXX=y +CONFIG_NET_DSA_MV88E6060=y +CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y +CONFIG_NET_DSA_MV88E6131=y +CONFIG_NET_DSA_MV88E6123_61_65=y CONFIG_ETHERNET=y CONFIG_MDIO=m CONFIG_NET_VENDOR_3COM=y @@ -1989,6 +2069,7 @@ CONFIG_TIGON3=m CONFIG_BNX2X=m CONFIG_NET_VENDOR_BROCADE=y CONFIG_BNA=m +CONFIG_NET_CALXEDA_XGMAC=m CONFIG_NET_VENDOR_CHELSIO=y CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y @@ -2104,12 +2185,16 @@ CONFIG_SIS900=m CONFIG_SIS190=m CONFIG_SFC=m CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y CONFIG_NET_VENDOR_SMSC=y CONFIG_PCMCIA_SMC91C92=m CONFIG_EPIC100=m CONFIG_SMSC9420=m CONFIG_NET_VENDOR_STMICRO=y CONFIG_STMMAC_ETH=m +CONFIG_STMMAC_PLATFORM=m +CONFIG_STMMAC_PCI=m # CONFIG_STMMAC_DEBUG_FS is not set # CONFIG_STMMAC_DA is not set CONFIG_STMMAC_RING=y @@ -2142,6 +2227,7 @@ CONFIG_PHYLIB=y # # MII PHY device drivers # +CONFIG_AMD_PHY=m CONFIG_MARVELL_PHY=m CONFIG_DAVICOM_PHY=m CONFIG_QSEMI_PHY=m @@ -2159,6 +2245,7 @@ CONFIG_MICREL_PHY=m CONFIG_FIXED_PHY=y CONFIG_MDIO_BITBANG=m CONFIG_MDIO_GPIO=m +CONFIG_MICREL_KS8995MA=m CONFIG_PLIP=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -2207,6 +2294,7 @@ CONFIG_USB_KC2190=y CONFIG_USB_NET_ZAURUS=m CONFIG_USB_NET_CX82310_ETH=m CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m CONFIG_USB_HSO=m CONFIG_USB_NET_INT51X1=m CONFIG_USB_CDC_PHONET=m @@ -2242,6 +2330,7 @@ CONFIG_ATH5K=m CONFIG_ATH5K_PCI=y CONFIG_ATH9K_HW=m CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y CONFIG_ATH9K=m CONFIG_ATH9K_PCI=y CONFIG_ATH9K_AHB=y @@ -2255,6 +2344,8 @@ CONFIG_CARL9170_LEDS=y CONFIG_CARL9170_WPC=y CONFIG_CARL9170_HWRNG=y CONFIG_ATH6KL=m +CONFIG_ATH6KL_SDIO=m +CONFIG_ATH6KL_USB=m # CONFIG_ATH6KL_DEBUG is not set CONFIG_B43=m CONFIG_B43_SSB=y @@ -2280,10 +2371,7 @@ CONFIG_B43LEGACY_PIO=y CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y # CONFIG_B43LEGACY_DMA_MODE is not set # CONFIG_B43LEGACY_PIO_MODE is not set -CONFIG_BRCMUTIL=m -CONFIG_BRCMSMAC=m # CONFIG_BRCMFMAC is not set -# CONFIG_BRCMDBG is not set CONFIG_HOSTAP=m CONFIG_HOSTAP_FIRMWARE=y CONFIG_HOSTAP_FIRMWARE_NVRAM=y @@ -2310,17 +2398,18 @@ CONFIG_IWLWIFI_DEBUG=y CONFIG_IWLWIFI_DEBUGFS=y # CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE is not set # CONFIG_IWLWIFI_DEVICE_TRACING is not set -CONFIG_IWLWIFI_DEVICE_SVTOOL=y -CONFIG_IWLWIFI_LEGACY=m - -# -# Debugging Options -# -# CONFIG_IWLWIFI_LEGACY_DEBUG is not set -# CONFIG_IWLWIFI_LEGACY_DEBUGFS is not set -# CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING is not set +CONFIG_IWLWIFI_DEVICE_TESTMODE=y +# CONFIG_IWLWIFI_P2P is not set +# CONFIG_IWLWIFI_EXPERIMENTAL_MFP is not set +CONFIG_IWLEGACY=m CONFIG_IWL4965=m CONFIG_IWL3945=m + +# +# iwl3945 / iwl4965 Debugging Options +# +# CONFIG_IWLEGACY_DEBUG is not set +# CONFIG_IWLEGACY_DEBUGFS is not set CONFIG_IWM=m # CONFIG_IWM_DEBUG is not set # CONFIG_IWM_TRACING is not set @@ -2372,6 +2461,7 @@ CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m CONFIG_RTL8192CU=m CONFIG_RTLWIFI=m +# CONFIG_RTLWIFI_DEBUG is not set CONFIG_RTL8192C_COMMON=m CONFIG_WL1251=m CONFIG_WL1251_SPI=m @@ -2552,9 +2642,6 @@ CONFIG_MISDN_NETJET=m CONFIG_MISDN_IPAC=m CONFIG_MISDN_ISAR=m CONFIG_ISDN_HDLC=m -CONFIG_PHONE=m -CONFIG_PHONE_IXJ=m -CONFIG_PHONE_IXJ_PCMCIA=m # # Input device support @@ -2588,6 +2675,7 @@ CONFIG_KEYBOARD_QT2160=m CONFIG_KEYBOARD_GPIO=m CONFIG_KEYBOARD_GPIO_POLLED=m CONFIG_KEYBOARD_TCA6416=m +CONFIG_KEYBOARD_TCA8418=m CONFIG_KEYBOARD_MATRIX=m CONFIG_KEYBOARD_LM8323=m CONFIG_KEYBOARD_MAX7359=m @@ -2597,6 +2685,8 @@ CONFIG_KEYBOARD_NEWTON=m CONFIG_KEYBOARD_OPENCORES=m # CONFIG_KEYBOARD_STOWAWAY is not set CONFIG_KEYBOARD_SUNKBD=m +CONFIG_KEYBOARD_STMPE=m +CONFIG_KEYBOARD_OMAP4=m CONFIG_KEYBOARD_XTKBD=m CONFIG_INPUT_MOUSE=y CONFIG_MOUSE_PS2=y @@ -2614,6 +2704,7 @@ CONFIG_MOUSE_BCM5974=m CONFIG_MOUSE_VSXXXAA=m CONFIG_MOUSE_GPIO=m CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m CONFIG_INPUT_JOYSTICK=y CONFIG_JOYSTICK_ANALOG=m CONFIG_JOYSTICK_A3D=m @@ -2659,12 +2750,18 @@ CONFIG_TOUCHSCREEN_AD7879=m CONFIG_TOUCHSCREEN_AD7879_I2C=m CONFIG_TOUCHSCREEN_AD7879_SPI=m CONFIG_TOUCHSCREEN_ATMEL_MXT=m +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m CONFIG_TOUCHSCREEN_BU21013=m CONFIG_TOUCHSCREEN_CY8CTMG110=m +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m CONFIG_TOUCHSCREEN_DYNAPRO=m CONFIG_TOUCHSCREEN_HAMPSHIRE=m CONFIG_TOUCHSCREEN_EETI=m +CONFIG_TOUCHSCREEN_EGALAX=m CONFIG_TOUCHSCREEN_FUJITSU=m +CONFIG_TOUCHSCREEN_ILI210X=m CONFIG_TOUCHSCREEN_GUNZE=m CONFIG_TOUCHSCREEN_ELO=m CONFIG_TOUCHSCREEN_WACOM_W8001=m @@ -2677,6 +2774,7 @@ CONFIG_TOUCHSCREEN_PENMOUNT=m CONFIG_TOUCHSCREEN_TOUCHRIGHT=m CONFIG_TOUCHSCREEN_TOUCHWIN=m CONFIG_TOUCHSCREEN_UCB1400=m +CONFIG_TOUCHSCREEN_PIXCIR=m CONFIG_TOUCHSCREEN_WM97XX=m CONFIG_TOUCHSCREEN_WM9705=y CONFIG_TOUCHSCREEN_WM9712=y @@ -2695,16 +2793,19 @@ CONFIG_TOUCHSCREEN_USB_IDEALTEK=y CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y CONFIG_TOUCHSCREEN_USB_GOTOP=y CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y CONFIG_TOUCHSCREEN_USB_E2I=y CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y CONFIG_TOUCHSCREEN_TOUCHIT213=m CONFIG_TOUCHSCREEN_TSC_SERIO=m CONFIG_TOUCHSCREEN_TSC2005=m CONFIG_TOUCHSCREEN_TSC2007=m CONFIG_TOUCHSCREEN_PCAP=m CONFIG_TOUCHSCREEN_ST1232=m +CONFIG_TOUCHSCREEN_STMPE=m CONFIG_TOUCHSCREEN_TPS6507X=m CONFIG_INPUT_MISC=y CONFIG_INPUT_AD714X=m @@ -2716,6 +2817,8 @@ CONFIG_INPUT_MC13783_PWRBUTTON=m CONFIG_INPUT_MMA8450=m CONFIG_INPUT_MPU3050=m CONFIG_INPUT_APANEL=m +CONFIG_INPUT_GP2A=m +CONFIG_INPUT_GPIO_TILT_POLLED=m CONFIG_INPUT_ATLAS_BTNS=m CONFIG_INPUT_ATI_REMOTE2=m CONFIG_INPUT_KEYSPAN_REMOTE=m @@ -2727,13 +2830,14 @@ CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m CONFIG_INPUT_PCF8574=m CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_INPUT_DA9052_ONKEY=m CONFIG_INPUT_PCAP=m CONFIG_INPUT_ADXL34X=m CONFIG_INPUT_ADXL34X_I2C=m CONFIG_INPUT_ADXL34X_SPI=m CONFIG_INPUT_CMA3000=m CONFIG_INPUT_CMA3000_I2C=m -CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y # # Hardware I/O ports @@ -2820,6 +2924,7 @@ CONFIG_PPDEV=m CONFIG_HVC_DRIVER=y CONFIG_HVC_IRQ=y CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y CONFIG_VIRTIO_CONSOLE=m CONFIG_IPMI_HANDLER=m CONFIG_IPMI_PANIC_EVENT=y @@ -2908,6 +3013,7 @@ CONFIG_I2C_SCMI=m # I2C system bus drivers (mostly embedded / system-on-chip) # CONFIG_I2C_DESIGNWARE_PCI=m +CONFIG_I2C_EG20T=m CONFIG_I2C_GPIO=m CONFIG_I2C_INTEL_MID=m CONFIG_I2C_OCORES=m @@ -2915,7 +3021,6 @@ CONFIG_I2C_PCA_PLATFORM=m # CONFIG_I2C_PXA_PCI is not set # CONFIG_I2C_SIMTEC is not set # CONFIG_I2C_XILINX is not set -CONFIG_I2C_EG20T=m # # External I2C/SMBus adapter drivers @@ -2958,6 +3063,13 @@ CONFIG_SPI_DW_PCI=m # CONFIG_SPI_SPIDEV=m CONFIG_SPI_TLE62X0=m +CONFIG_HSI=m +CONFIG_HSI_BOARDINFO=y + +# +# HSI clients +# +CONFIG_HSI_CHAR=m # # PPS support @@ -3003,6 +3115,7 @@ CONFIG_GPIO_MAX7300=m CONFIG_GPIO_MAX732X=m CONFIG_GPIO_PCA953X=m CONFIG_GPIO_PCF857X=m +CONFIG_GPIO_STMPE=y CONFIG_GPIO_TPS65912=m CONFIG_GPIO_ADP5588=m @@ -3057,6 +3170,7 @@ CONFIG_W1_SLAVE_DS2433=m CONFIG_W1_SLAVE_DS2433_CRC=y CONFIG_W1_SLAVE_DS2760=m # CONFIG_W1_SLAVE_DS2780 is not set +CONFIG_W1_SLAVE_DS2781=m CONFIG_W1_SLAVE_BQ27000=m CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set @@ -3064,16 +3178,19 @@ CONFIG_PDA_POWER=m # CONFIG_TEST_POWER is not set CONFIG_BATTERY_DS2760=m # CONFIG_BATTERY_DS2780 is not set +CONFIG_BATTERY_DS2781=m CONFIG_BATTERY_DS2782=m -CONFIG_BATTERY_BQ20Z75=m +CONFIG_BATTERY_SBS=m CONFIG_BATTERY_BQ27x00=m CONFIG_BATTERY_BQ27X00_I2C=y CONFIG_BATTERY_BQ27X00_PLATFORM=y CONFIG_BATTERY_MAX17040=m CONFIG_BATTERY_MAX17042=m -CONFIG_CHARGER_ISP1704=m CONFIG_CHARGER_MAX8903=m +CONFIG_CHARGER_LP8727=m CONFIG_CHARGER_GPIO=m +CONFIG_CHARGER_MANAGER=y +CONFIG_CHARGER_SMB347=m CONFIG_HWMON=m CONFIG_HWMON_VID=m # CONFIG_HWMON_DEBUG_CHIP is not set @@ -3146,6 +3263,7 @@ CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX6639=m CONFIG_SENSORS_MAX6642=m CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MCP3021=m CONFIG_SENSORS_NTC_THERMISTOR=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m @@ -3205,8 +3323,7 @@ CONFIG_SENSORS_MC13783_ADC=m # CONFIG_SENSORS_ACPI_POWER=m CONFIG_SENSORS_ATK0110=m -CONFIG_THERMAL=m -CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL=y CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -3243,6 +3360,7 @@ CONFIG_SBC8360_WDT=m CONFIG_CPU5_WDT=m CONFIG_SMSC_SCH311X_WDT=m CONFIG_SMSC37B787_WDT=m +CONFIG_VIA_WDT=m CONFIG_W83627HF_WDT=m CONFIG_W83697HF_WDT=m CONFIG_W83697UG_WDT=m @@ -3298,9 +3416,18 @@ CONFIG_UCB1400_CORE=m CONFIG_TPS6105X=m CONFIG_TPS65010=m CONFIG_TPS6507X=m +CONFIG_MFD_TPS65217=m CONFIG_MFD_TPS65912=y CONFIG_MFD_TPS65912_SPI=y +CONFIG_MFD_STMPE=y + +# +# STMPE Interface Drivers +# +CONFIG_STMPE_SPI=y # CONFIG_MFD_TMIO is not set +CONFIG_PMIC_DA9052=y +CONFIG_MFD_DA9052_SPI=y CONFIG_MFD_WM8400=m # CONFIG_MFD_WM831X_SPI is not set # CONFIG_MFD_PCF50633 is not set @@ -3322,25 +3449,27 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=m CONFIG_REGULATOR_VIRTUAL_CONSUMER=m CONFIG_REGULATOR_USERSPACE_CONSUMER=m CONFIG_REGULATOR_GPIO=m -CONFIG_REGULATOR_BQ24022=m +CONFIG_REGULATOR_AD5398=m +CONFIG_REGULATOR_DA9052=m +CONFIG_REGULATOR_MC13XXX_CORE=m +CONFIG_REGULATOR_MC13783=m +CONFIG_REGULATOR_MC13892=m +CONFIG_REGULATOR_ISL6271A=m CONFIG_REGULATOR_MAX1586=m CONFIG_REGULATOR_MAX8649=m CONFIG_REGULATOR_MAX8660=m CONFIG_REGULATOR_MAX8952=m -CONFIG_REGULATOR_WM8400=m +CONFIG_REGULATOR_PCAP=m CONFIG_REGULATOR_LP3971=m CONFIG_REGULATOR_LP3972=m -CONFIG_REGULATOR_PCAP=m -CONFIG_REGULATOR_MC13XXX_CORE=m -CONFIG_REGULATOR_MC13783=m -CONFIG_REGULATOR_MC13892=m CONFIG_REGULATOR_TPS6105X=m +CONFIG_REGULATOR_TPS62360=m CONFIG_REGULATOR_TPS65023=m CONFIG_REGULATOR_TPS6507X=m -CONFIG_REGULATOR_TPS65912=m -CONFIG_REGULATOR_ISL6271A=m -CONFIG_REGULATOR_AD5398=m +CONFIG_REGULATOR_TPS65217=m CONFIG_REGULATOR_TPS6524X=m +CONFIG_REGULATOR_TPS65912=m +CONFIG_REGULATOR_WM8400=m CONFIG_MEDIA_SUPPORT=m # @@ -3367,6 +3496,7 @@ CONFIG_IR_RC6_DECODER=m CONFIG_IR_JVC_DECODER=m CONFIG_IR_SONY_DECODER=m CONFIG_IR_RC5_SZ_DECODER=m +CONFIG_IR_SANYO_DECODER=m CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_LIRC_CODEC=m CONFIG_RC_ATI_REMOTE=m @@ -3380,6 +3510,7 @@ CONFIG_IR_REDRAT3=m CONFIG_IR_STREAMZAP=m CONFIG_IR_WINBOND_CIR=m CONFIG_RC_LOOPBACK=m +CONFIG_IR_GPIO_CIR=m CONFIG_MEDIA_ATTACH=y CONFIG_MEDIA_TUNER=m # CONFIG_MEDIA_TUNER_CUSTOMISE is not set @@ -3392,6 +3523,7 @@ CONFIG_MEDIA_TUNER_TEA5761=m CONFIG_MEDIA_TUNER_TEA5767=m CONFIG_MEDIA_TUNER_MT20XX=m CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m CONFIG_MEDIA_TUNER_MT2266=m CONFIG_MEDIA_TUNER_MT2131=m CONFIG_MEDIA_TUNER_QT1010=m @@ -3408,7 +3540,6 @@ CONFIG_VIDEO_V4L2=m CONFIG_VIDEOBUF_GEN=m CONFIG_VIDEOBUF_DMA_SG=m CONFIG_VIDEOBUF_VMALLOC=m -CONFIG_VIDEOBUF_DMA_CONTIG=m CONFIG_VIDEOBUF_DVB=m CONFIG_VIDEO_BTCX=m CONFIG_VIDEO_TVEEPROM=m @@ -3446,7 +3577,6 @@ CONFIG_VIDEO_SAA6588=m # # Video decoders # -CONFIG_VIDEO_ADV7180=m CONFIG_VIDEO_BT819=m CONFIG_VIDEO_BT856=m CONFIG_VIDEO_BT866=m @@ -3478,7 +3608,6 @@ CONFIG_VIDEO_ADV7175=m # # Camera sensor devices # -CONFIG_VIDEO_OV7670=m CONFIG_VIDEO_MT9V011=m # @@ -3496,63 +3625,6 @@ CONFIG_VIDEO_UPD64083=m # CONFIG_VIDEO_M52790=m CONFIG_VIDEO_VIVI=m -CONFIG_VIDEO_BT848=m -CONFIG_VIDEO_BT848_DVB=y -CONFIG_VIDEO_BWQCAM=m -CONFIG_VIDEO_CQCAM=m -CONFIG_VIDEO_W9966=m -CONFIG_VIDEO_CPIA2=m -CONFIG_VIDEO_ZORAN=m -CONFIG_VIDEO_ZORAN_DC30=m -CONFIG_VIDEO_ZORAN_ZR36060=m -CONFIG_VIDEO_ZORAN_BUZ=m -CONFIG_VIDEO_ZORAN_DC10=m -CONFIG_VIDEO_ZORAN_LML33=m -CONFIG_VIDEO_ZORAN_LML33R10=m -CONFIG_VIDEO_ZORAN_AVS6EYES=m -CONFIG_VIDEO_MEYE=m -CONFIG_VIDEO_SAA7134=m -CONFIG_VIDEO_SAA7134_ALSA=m -CONFIG_VIDEO_SAA7134_RC=y -CONFIG_VIDEO_SAA7134_DVB=m -CONFIG_VIDEO_MXB=m -CONFIG_VIDEO_HEXIUM_ORION=m -CONFIG_VIDEO_HEXIUM_GEMINI=m -CONFIG_VIDEO_TIMBERDALE=m -CONFIG_VIDEO_CX88=m -CONFIG_VIDEO_CX88_ALSA=m -CONFIG_VIDEO_CX88_BLACKBIRD=m -CONFIG_VIDEO_CX88_DVB=m -CONFIG_VIDEO_CX88_MPEG=m -CONFIG_VIDEO_CX88_VP3054=m -CONFIG_VIDEO_CX23885=m -CONFIG_MEDIA_ALTERA_CI=m -CONFIG_VIDEO_CX25821=m -CONFIG_VIDEO_CX25821_ALSA=m -CONFIG_VIDEO_AU0828=m -CONFIG_VIDEO_IVTV=m -CONFIG_VIDEO_FB_IVTV=m -CONFIG_VIDEO_CX18=m -CONFIG_VIDEO_CX18_ALSA=m -CONFIG_VIDEO_SAA7164=m -CONFIG_VIDEO_CAFE_CCIC=m -CONFIG_VIDEO_VIA_CAMERA=m -CONFIG_SOC_CAMERA=m -CONFIG_SOC_CAMERA_IMX074=m -CONFIG_SOC_CAMERA_MT9M001=m -CONFIG_SOC_CAMERA_MT9M111=m -CONFIG_SOC_CAMERA_MT9T031=m -CONFIG_SOC_CAMERA_MT9T112=m -CONFIG_SOC_CAMERA_MT9V022=m -CONFIG_SOC_CAMERA_RJ54N1=m -CONFIG_SOC_CAMERA_TW9910=m -CONFIG_SOC_CAMERA_PLATFORM=m -CONFIG_SOC_CAMERA_OV2640=m -CONFIG_SOC_CAMERA_OV5642=m -CONFIG_SOC_CAMERA_OV6650=m -CONFIG_SOC_CAMERA_OV772X=m -CONFIG_SOC_CAMERA_OV9640=m -CONFIG_SOC_CAMERA_OV9740=m CONFIG_V4L_USB_DRIVERS=y CONFIG_USB_VIDEO_CLASS=m CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y @@ -3566,6 +3638,7 @@ CONFIG_USB_GSPCA_CPIA1=m CONFIG_USB_GSPCA_ETOMS=m CONFIG_USB_GSPCA_FINEPIX=m CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m CONFIG_USB_GSPCA_KINECT=m CONFIG_USB_GSPCA_KONICA=m CONFIG_USB_GSPCA_MARS=m @@ -3625,19 +3698,58 @@ CONFIG_USB_SN9C102=m CONFIG_USB_PWC=m # CONFIG_USB_PWC_DEBUG is not set CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_VIDEO_CPIA2=m CONFIG_USB_ZR364XX=m CONFIG_USB_STKWEBCAM=m CONFIG_USB_S2255=m +CONFIG_V4L_PCI_DRIVERS=y +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_BT848=m +CONFIG_VIDEO_BT848_DVB=y +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX25821=m +CONFIG_VIDEO_CX25821_ALSA=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_HEXIUM_ORION=m +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_MEYE=m +CONFIG_VIDEO_MXB=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_VIDEO_ZORAN=m +CONFIG_VIDEO_ZORAN_DC30=m +CONFIG_VIDEO_ZORAN_ZR36060=m +CONFIG_VIDEO_ZORAN_BUZ=m +CONFIG_VIDEO_ZORAN_DC10=m +CONFIG_VIDEO_ZORAN_LML33=m +CONFIG_VIDEO_ZORAN_LML33R10=m +CONFIG_VIDEO_ZORAN_AVS6EYES=m +# CONFIG_V4L_ISA_PARPORT_DRIVERS is not set +# CONFIG_V4L_PLATFORM_DRIVERS is not set # CONFIG_V4L_MEM2MEM_DRIVERS is not set CONFIG_RADIO_ADAPTERS=y -CONFIG_RADIO_MAXIRADIO=m -CONFIG_I2C_SI4713=m -CONFIG_RADIO_SI4713=m -CONFIG_USB_DSBR=m CONFIG_RADIO_SI470X=y CONFIG_USB_SI470X=m CONFIG_I2C_SI470X=m CONFIG_USB_MR800=m +CONFIG_USB_DSBR=m +CONFIG_RADIO_MAXIRADIO=m +CONFIG_I2C_SI4713=m +CONFIG_RADIO_SI4713=m +CONFIG_USB_KEENE=m CONFIG_RADIO_TEA5764=m CONFIG_RADIO_SAA7706H=m CONFIG_RADIO_TEF6862=m @@ -3698,11 +3810,13 @@ CONFIG_DVB_USB_AF9015=m CONFIG_DVB_USB_CE6230=m CONFIG_DVB_USB_FRIIO=m CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_AZ6007=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_LME2510=m CONFIG_DVB_USB_TECHNISAT_USB2=m CONFIG_DVB_USB_IT913X=m CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB_RTL28XXU=m CONFIG_DVB_TTUSB_BUDGET=m CONFIG_DVB_TTUSB_DEC=m CONFIG_SMS_SIANO_MDTV=m @@ -3833,6 +3947,7 @@ CONFIG_DVB_AF9013=m CONFIG_DVB_EC100=m CONFIG_DVB_STV0367=m CONFIG_DVB_CXD2820R=m +CONFIG_DVB_RTL2830=m # # DVB-C (cable) frontends @@ -3883,6 +3998,7 @@ CONFIG_DVB_ATBM8830=m CONFIG_DVB_TDA665x=m CONFIG_DVB_IX2505V=m CONFIG_DVB_IT913X_FE=m +CONFIG_DVB_M88RS2000=m # # Tools to develop new frontends @@ -3901,12 +4017,23 @@ CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=16 CONFIG_VGA_SWITCHEROO=y CONFIG_DRM=m +CONFIG_DRM_USB=m CONFIG_DRM_KMS_HELPER=m +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set CONFIG_DRM_TTM=m CONFIG_DRM_TDFX=m CONFIG_DRM_R128=m CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_KMS=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_NOUVEAU_DEBUG is not set + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m CONFIG_DRM_I810=m CONFIG_DRM_I915=m CONFIG_DRM_I915_KMS=y @@ -3915,6 +4042,10 @@ CONFIG_DRM_SIS=m CONFIG_DRM_VIA=m CONFIG_DRM_SAVAGE=m # CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_GMA500=m +CONFIG_DRM_GMA600=y +CONFIG_DRM_GMA3600=y +CONFIG_DRM_UDL=m CONFIG_STUB_POULSBO=m CONFIG_VGASTATE=m CONFIG_VIDEO_OUTPUT_CONTROL=m @@ -3965,6 +4096,7 @@ CONFIG_FB_RIVA=m CONFIG_FB_RIVA_I2C=y # CONFIG_FB_RIVA_DEBUG is not set CONFIG_FB_RIVA_BACKLIGHT=y +CONFIG_FB_I740=m CONFIG_FB_LE80578=m CONFIG_FB_CARILLO_RANCH=m CONFIG_FB_MATROX=m @@ -4024,6 +4156,7 @@ CONFIG_FB_MB862XX=m CONFIG_FB_MB862XX_PCI_GDC=y CONFIG_FB_MB862XX_I2C=y CONFIG_FB_BROADSHEET=m +# CONFIG_EXYNOS_VIDEO is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=m CONFIG_LCD_L4F00242T03=m @@ -4040,19 +4173,13 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PROGEAR=m CONFIG_BACKLIGHT_CARILLO_RANCH=m +CONFIG_BACKLIGHT_DA9052=m CONFIG_BACKLIGHT_APPLE=m CONFIG_BACKLIGHT_SAHARA=m CONFIG_BACKLIGHT_ADP8860=m CONFIG_BACKLIGHT_ADP8870=m - -# -# Display device support -# -CONFIG_DISPLAY_SUPPORT=m - -# -# Display hardware drivers -# +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_OT200=m # # Console display driver support @@ -4094,6 +4221,7 @@ CONFIG_SND_DEBUG=y # CONFIG_SND_DEBUG_VERBOSE is not set CONFIG_SND_PCM_XRUN_DEBUG=y CONFIG_SND_VMASTER=y +CONFIG_SND_KCTL_JACK=y CONFIG_SND_DMA_SGBUF=y CONFIG_SND_RAWMIDI_SEQ=m CONFIG_SND_OPL3_LIB_SEQ=m @@ -4313,14 +4441,8 @@ CONFIG_HID_PICOLCD_BACKLIGHT=y CONFIG_HID_PICOLCD_LCD=y CONFIG_HID_PICOLCD_LEDS=y CONFIG_HID_PRIMAX=m -CONFIG_HID_QUANTA=m CONFIG_HID_ROCCAT=m -CONFIG_HID_ROCCAT_COMMON=m -CONFIG_HID_ROCCAT_ARVO=m -CONFIG_HID_ROCCAT_KONE=m -CONFIG_HID_ROCCAT_KONEPLUS=m -CONFIG_HID_ROCCAT_KOVAPLUS=m -CONFIG_HID_ROCCAT_PYRA=m +CONFIG_HID_SAITEK=m CONFIG_HID_SAMSUNG=m CONFIG_HID_SONY=m CONFIG_HID_SPEEDLINK=m @@ -4329,21 +4451,23 @@ CONFIG_HID_GREENASIA=m CONFIG_GREENASIA_FF=y CONFIG_HID_SMARTJOYPLUS=m CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=m CONFIG_HID_TOPSEED=m CONFIG_HID_THRUSTMASTER=m CONFIG_THRUSTMASTER_FF=y CONFIG_HID_WACOM=m CONFIG_HID_WACOM_POWER_SUPPLY=y CONFIG_HID_WIIMOTE=m +CONFIG_HID_WIIMOTE_EXT=y CONFIG_HID_ZEROPLUS=m CONFIG_ZEROPLUS_FF=y CONFIG_HID_ZYDACRON=m -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=m -CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y CONFIG_USB_ARCH_HAS_EHCI=y CONFIG_USB_ARCH_HAS_XHCI=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=m +CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=m # CONFIG_USB_DEBUG is not set CONFIG_USB_ANNOUNCE_NEW_DEVICES=y @@ -4356,8 +4480,6 @@ CONFIG_USB_DEVICEFS=y # CONFIG_USB_DYNAMIC_MINORS is not set CONFIG_USB_SUSPEND=y # CONFIG_USB_OTG is not set -CONFIG_USB_DWC3=m -# CONFIG_USB_DWC3_DEBUG is not set CONFIG_USB_MON=m CONFIG_USB_WUSB=m CONFIG_USB_WUSB_CBAF=m @@ -4378,6 +4500,8 @@ CONFIG_USB_ISP1760_HCD=m CONFIG_USB_ISP1362_HCD=m CONFIG_USB_OHCI_HCD=m # CONFIG_USB_OHCI_HCD_SSB is not set +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set CONFIG_USB_OHCI_LITTLE_ENDIAN=y @@ -4453,6 +4577,7 @@ CONFIG_USB_SERIAL_IPAQ=m CONFIG_USB_SERIAL_IR=m CONFIG_USB_SERIAL_EDGEPORT=m CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m CONFIG_USB_SERIAL_GARMIN=m CONFIG_USB_SERIAL_IPW=m CONFIG_USB_SERIAL_IUU=m @@ -4461,6 +4586,7 @@ CONFIG_USB_SERIAL_KEYSPAN=m CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m CONFIG_USB_SERIAL_MOS7720=m CONFIG_USB_SERIAL_MOS7715_PARPORT=y CONFIG_USB_SERIAL_MOS7840=m @@ -4523,7 +4649,6 @@ CONFIG_USB_XUSBATM=m # # OTG and related infrastructure # -CONFIG_USB_OTG_UTILS=y # CONFIG_USB_GPIO_VBUS is not set # CONFIG_NOP_USB_XCEIV is not set CONFIG_UWB=m @@ -4581,7 +4706,6 @@ CONFIG_LEDS_CLASS=y # LED drivers # CONFIG_LEDS_LM3530=m -CONFIG_LEDS_NET5501=m CONFIG_LEDS_PCA9532=m # CONFIG_LEDS_PCA9532_GPIO is not set CONFIG_LEDS_GPIO=m @@ -4590,6 +4714,7 @@ CONFIG_LEDS_LP5521=m CONFIG_LEDS_LP5523=m CONFIG_LEDS_CLEVO_MAIL=m CONFIG_LEDS_PCA955X=m +CONFIG_LEDS_PCA9633=m CONFIG_LEDS_DAC124S085=m CONFIG_LEDS_REGULATOR=m CONFIG_LEDS_BD2802=m @@ -4597,6 +4722,8 @@ CONFIG_LEDS_INTEL_SS4200=m CONFIG_LEDS_LT3593=m CONFIG_LEDS_DELL_NETBOOKS=m CONFIG_LEDS_MC13783=m +CONFIG_LEDS_TCA6507=m +CONFIG_LEDS_OT200=m CONFIG_LEDS_TRIGGERS=y # @@ -4717,6 +4844,7 @@ CONFIG_RTC_DRV_DS1286=m CONFIG_RTC_DRV_DS1511=m CONFIG_RTC_DRV_DS1553=m CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DA9052=m CONFIG_RTC_DRV_STK17TA8=m CONFIG_RTC_DRV_M48T86=m CONFIG_RTC_DRV_M48T35=m @@ -4774,6 +4902,11 @@ CONFIG_VIRTIO_RING=m # CONFIG_VIRTIO_BALLOON is not set CONFIG_VIRTIO_MMIO=m +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set + # # Xen driver support # @@ -4793,7 +4926,7 @@ CONFIG_SWIOTLB_XEN=y CONFIG_XEN_TMEM=y CONFIG_XEN_PCIDEV_BACKEND=m CONFIG_XEN_PRIVCMD=y -CONFIG_CPU_FREQ_GOV_XEN=m +CONFIG_XEN_ACPI_PROCESSOR=y CONFIG_STAGING=y CONFIG_ET131X=m CONFIG_SLICOSS=m @@ -4812,16 +4945,17 @@ CONFIG_PANEL_PROFILE=5 # CONFIG_PANEL_CHANGE_MESSAGE is not set CONFIG_R8187SE=m CONFIG_RTL8192U=m +CONFIG_RTLLIB=m +CONFIG_RTLLIB_CRYPTO_CCMP=m +CONFIG_RTLLIB_CRYPTO_TKIP=m +CONFIG_RTLLIB_CRYPTO_WEP=m CONFIG_RTL8192E=m CONFIG_R8712U=m -CONFIG_R8712_AP=y CONFIG_RTS_PSTOR=m # CONFIG_RTS_PSTOR_DEBUG is not set CONFIG_RTS5139=m # CONFIG_RTS5139_DEBUG is not set CONFIG_TRANZPORT=m -CONFIG_POHMELFS=m -# CONFIG_POHMELFS_DEBUG is not set CONFIG_IDE_PHISON=m CONFIG_LINE6_USB=m # CONFIG_LINE6_USB_DEBUG is not set @@ -4830,15 +4964,6 @@ CONFIG_LINE6_USB=m # CONFIG_LINE6_USB_DUMP_PCM is not set # CONFIG_LINE6_USB_RAW is not set # CONFIG_LINE6_USB_IMPULSE_RESPONSE is not set -CONFIG_DRM_NOUVEAU=m -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -# CONFIG_DRM_NOUVEAU_DEBUG is not set - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -CONFIG_DRM_I2C_SIL164=m CONFIG_USB_SERIAL_QUATECH2=m CONFIG_USB_SERIAL_QUATECH_USB2=m CONFIG_VT6655=m @@ -4855,6 +4980,7 @@ CONFIG_VME_TSI148=m # VME Device Drivers # CONFIG_VME_USER=m +CONFIG_VME_PIO2=m # # VME Board Drivers @@ -4862,6 +4988,7 @@ CONFIG_VME_USER=m CONFIG_VMIVME_7805=m CONFIG_DX_SEP=m CONFIG_IIO=m +CONFIG_IIO_ST_HWMON=m CONFIG_IIO_BUFFER=y CONFIG_IIO_SW_RING=m CONFIG_IIO_KFIFO_BUF=m @@ -4924,9 +5051,12 @@ CONFIG_AD7746=m # CONFIG_AD5064=m CONFIG_AD5360=m +CONFIG_AD5380=m +CONFIG_AD5421=m CONFIG_AD5624R_SPI=m CONFIG_AD5446=m CONFIG_AD5504=m +CONFIG_AD5764=m CONFIG_AD5791=m CONFIG_AD5686=m CONFIG_MAX517=m @@ -5001,10 +5131,10 @@ CONFIG_IIO_SYSFS_TRIGGER=m CONFIG_IIO_SIMPLE_DUMMY=m # CONFIG_IIO_SIMPLE_DUMMY_EVENTS is not set # CONFIG_IIO_SIMPLE_DUMMY_BUFFER is not set -CONFIG_XVMALLOC=y CONFIG_ZRAM=m # CONFIG_ZRAM_DEBUG is not set # CONFIG_ZCACHE is not set +CONFIG_ZSMALLOC=m CONFIG_WLAGS49_H2=m CONFIG_WLAGS49_H25=m CONFIG_FB_SM7XX=m @@ -5026,10 +5156,6 @@ CONFIG_FT1000_PCMCIA=m # CONFIG_SPEAKUP is not set CONFIG_TOUCHSCREEN_CLEARPAD_TM1217=m CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=m -CONFIG_DRM_PSB=m -CONFIG_DRM_PSB_MRST=y -CONFIG_DRM_PSB_MFLD=y -CONFIG_DRM_PSB_CDV=y CONFIG_INTEL_MEI=m CONFIG_STAGING_MEDIA=y CONFIG_DVB_AS102=m @@ -5052,6 +5178,15 @@ CONFIG_LIRC_SERIAL_TRANSMITTER=y CONFIG_LIRC_SIR=m CONFIG_LIRC_TTUSBIR=m CONFIG_LIRC_ZILOG=m + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_PHONE=m +CONFIG_PHONE_IXJ=m +CONFIG_PHONE_IXJ_PCMCIA=m +CONFIG_USB_WPAN_HCD=m CONFIG_X86_PLATFORM_DEVICES=y CONFIG_ACER_WMI=m CONFIG_ACERHDF=m @@ -5061,6 +5196,8 @@ CONFIG_DELL_WMI=m CONFIG_DELL_WMI_AIO=m CONFIG_FUJITSU_LAPTOP=m # CONFIG_FUJITSU_LAPTOP_DEBUG is not set +CONFIG_FUJITSU_TABLET=m +CONFIG_AMILO_RFKILL=m CONFIG_HP_ACCEL=m CONFIG_HP_WMI=m CONFIG_MSI_LAPTOP=m @@ -5084,7 +5221,6 @@ CONFIG_ASUS_NB_WMI=m CONFIG_EEEPC_WMI=m CONFIG_ACPI_WMI=m CONFIG_MSI_WMI=m -# CONFIG_ACPI_ASUS is not set CONFIG_TOPSTAR_LAPTOP=m CONFIG_ACPI_TOSHIBA=m CONFIG_TOSHIBA_BT_RFKILL=m @@ -5096,6 +5232,7 @@ CONFIG_SAMSUNG_LAPTOP=m CONFIG_MXM_WMI=m CONFIG_INTEL_OAKTRAIL=m CONFIG_SAMSUNG_Q10=m +CONFIG_APPLE_GMUX=m # # Hardware Spinlock drivers @@ -5107,13 +5244,21 @@ CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y CONFIG_AMD_IOMMU=y # CONFIG_AMD_IOMMU_STATS is not set +CONFIG_AMD_IOMMU_V2=m CONFIG_DMAR_TABLE=y CONFIG_INTEL_IOMMU=y CONFIG_INTEL_IOMMU_DEFAULT_ON=y CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_IRQ_REMAP=y + +# +# Remoteproc drivers (EXPERIMENTAL) +# + +# +# Rpmsg drivers (EXPERIMENTAL) +# CONFIG_VIRT_DRIVERS=y -# CONFIG_HYPERV is not set CONFIG_PM_DEVFREQ=y # @@ -5140,12 +5285,12 @@ CONFIG_DMIID=y CONFIG_DMI_SYSFS=m CONFIG_ISCSI_IBFT_FIND=y CONFIG_ISCSI_IBFT=m -CONFIG_SIGMA=m # CONFIG_GOOGLE_FIRMWARE is not set # # File systems # +CONFIG_DCACHE_WORD_ACCESS=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -5192,6 +5337,7 @@ CONFIG_OCFS2_FS_STATS=y # CONFIG_OCFS2_DEBUG_FS is not set CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set CONFIG_NILFS2_FS=m CONFIG_FS_POSIX_ACL=y CONFIG_EXPORTFS=m @@ -5308,6 +5454,8 @@ CONFIG_MINIX_FS=m CONFIG_OMFS_FS=m CONFIG_HPFS_FS=m CONFIG_QNX4FS_FS=m +CONFIG_QNX6FS_FS=m +# CONFIG_QNX6FS_DEBUG is not set CONFIG_ROMFS_FS=m # CONFIG_ROMFS_BACKED_BY_BLOCK is not set # CONFIG_ROMFS_BACKED_BY_MTD is not set @@ -5331,12 +5479,12 @@ CONFIG_NFS_V4=y CONFIG_NFS_FSCACHE=y # CONFIG_NFS_USE_LEGACY_DNS is not set CONFIG_NFS_USE_KERNEL_DNS=y -# CONFIG_NFS_USE_NEW_IDMAPPER is not set CONFIG_NFSD=m CONFIG_NFSD_V2_ACL=y CONFIG_NFSD_V3=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y +# CONFIG_NFSD_FAULT_INJECTION is not set CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_ACL_SUPPORT=m @@ -5345,6 +5493,7 @@ CONFIG_SUNRPC=m CONFIG_SUNRPC_GSS=m CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DEBUG is not set CONFIG_CEPH_FS=m CONFIG_CIFS=m CONFIG_CIFS_STATS=y @@ -5373,29 +5522,6 @@ CONFIG_AFS_FSCACHE=y CONFIG_9P_FS=m # CONFIG_9P_FSCACHE is not set CONFIG_9P_FS_POSIX_ACL=y - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -CONFIG_OSF_PARTITION=y -# CONFIG_AMIGA_PARTITION is not set -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -# CONFIG_MINIX_SUBPARTITION is not set -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=m @@ -5467,6 +5593,7 @@ CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y # CONFIG_DEBUG_OBJECTS is not set # CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -5496,6 +5623,8 @@ CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_CPU_STALL_INFO is not set +# CONFIG_RCU_TRACE is not set # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set @@ -5505,7 +5634,6 @@ CONFIG_LKDTM=m # CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set # CONFIG_FAULT_INJECTION is not set CONFIG_LATENCYTOP=y -CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_DEBUG_PAGEALLOC is not set CONFIG_USER_STACKTRACE_SUPPORT=y CONFIG_NOP_TRACER=y @@ -5579,6 +5707,7 @@ CONFIG_DEFAULT_IO_DELAY_TYPE=0 # CONFIG_CPA_DEBUG is not set CONFIG_OPTIMIZE_INLINING=y # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set # # Security options @@ -5688,6 +5817,7 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH_COMMON=m CONFIG_CRYPTO_BLOWFISH_X86_64=m CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -5697,6 +5827,7 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SALSA20_X86_64=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_TEA=m # CONFIG_CRYPTO_TWOFISH is not set CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -5732,11 +5863,19 @@ CONFIG_BINARY_PRINTF=y CONFIG_RAID6_PQ=m CONFIG_BITREVERSE=y CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_GENERIC_IO=y CONFIG_CRC_CCITT=m CONFIG_CRC16=m CONFIG_CRC_T10DIF=y CONFIG_CRC_ITU_T=m CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set CONFIG_CRC7=m CONFIG_LIBCRC32C=m CONFIG_CRC8=m @@ -5761,6 +5900,8 @@ CONFIG_DECOMPRESS_LZO=y CONFIG_GENERIC_ALLOCATOR=y CONFIG_REED_SOLOMON=m CONFIG_REED_SOLOMON_DEC16=y +CONFIG_BCH=m +CONFIG_BCH_CONST_PARAMS=y CONFIG_TEXTSEARCH=y CONFIG_TEXTSEARCH_KMP=m CONFIG_TEXTSEARCH_BM=m @@ -5771,6 +5912,7 @@ CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y CONFIG_CHECK_SIGNATURE=y CONFIG_CPU_RMAP=y +CONFIG_DQL=y CONFIG_NLATTR=y CONFIG_LRU_CACHE=m CONFIG_AVERAGE=y diff --git a/linux-3.4.tar.bz2.sha1sum b/linux-3.4.tar.bz2.sha1sum new file mode 100644 index 0000000..322566d --- /dev/null +++ b/linux-3.4.tar.bz2.sha1sum @@ -0,0 +1 @@ +dfc54e7fbee81f77aba85d3d8ff2d992e1e49573 linux-3.4.tar.bz2 diff --git a/linux-3.4.tar.sign b/linux-3.4.tar.sign new file mode 100644 index 0000000..9ee8205 --- /dev/null +++ b/linux-3.4.tar.sign @@ -0,0 +1,11 @@ +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v2.0.18 (GNU/Linux) + +iQEcBAABAgAGBQJPuYK+AAoJEHm+PkMAQRiGNjwIAJeq/rH2kbn0W/JECbp30GFw +h2nPpLgYWDIgGxyfLooJS20alz3Z+FL5i6+qQlgns9s0LwYtHjPexcPVoqhl2bko +aYBQZ097kSs0j+FYNrPm/I6if+oKPUzkJvx71ZJ3ESG7lJPSODrequZ2PR2BfwdL +7UXAbv7e86KJFp+Z9X4eZY9lVRvE0m9Hq6u1f+gpgOL2gUu425t6jq/z0rC2HCh2 +7E4adlzAzMqHsrYLB6924WgK+7UhYlfGp/cm/CDuKYYzFfM+ujJgnfSUvUV23kQ9 +PTjm9UzibGmH81FoUcMtEHqrc5a/Lv0ugRzp8891DSCGq4L0CVVz25FwMZeraXE= +=dt4b +-----END PGP SIGNATURE----- diff --git a/patches.xen/pvops-3.4-enable-netfront-in-dom0.patch b/patches.xen/pvops-3.4-enable-netfront-in-dom0.patch new file mode 100644 index 0000000..d3ce38e --- /dev/null +++ b/patches.xen/pvops-3.4-enable-netfront-in-dom0.patch @@ -0,0 +1,24 @@ +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index 698b905..e31ebff 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -1953,9 +1953,6 @@ static int __init netif_init(void) + if (!xen_domain()) + return -ENODEV; + +- if (xen_initial_domain()) +- return 0; +- + if (xen_hvm_domain() && !xen_platform_pci_unplug) + return -ENODEV; + +@@ -1965,9 +1962,6 @@ module_init(netif_init); + + static void __exit netif_exit(void) + { +- if (xen_initial_domain()) +- return; +- + xenbus_unregister_driver(&netfront_driver); + } + module_exit(netif_exit); diff --git a/rel-pvops b/rel-pvops index 0cfbf08..d00491f 100644 --- a/rel-pvops +++ b/rel-pvops @@ -1 +1 @@ -2 +1 diff --git a/series-pvops.conf b/series-pvops.conf index 61beb0a..bde4589 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -1,13 +1,2 @@ -patches.xen/pvops-0001-xen-Add-privcmd-device-driver.patch -patches.xen/pvops-0002-x86-acpi-tboot-Have-a-ACPI-os-prepare-sleep-instead-.patch -patches.xen/pvops-0003-tboot-Add-return-values-for-tboot_sleep.patch -patches.xen/pvops-0004-x86-acpi-sleep-Provide-registration-for-acpi_suspend.patch -patches.xen/pvops-0005-xen-acpi-sleep-Enable-ACPI-sleep-via-the-__acpi_os_p.patch -patches.xen/pvops-0006-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel.patch -patches.xen/pvops-0007-xen-Utilize-the-restore_msi_irqs-hook.patch -patches.xen/pvops-0008-xen-setup-pm-acpi-Remove-the-call-to-boot_option_idl.patch -patches.xen/pvops-0009-xen-enlighten-Expose-MWAIT-and-MWAIT_LEAF-if-hypervi.patch -patches.xen/pvops-0010-CPUFREQ-xen-governor-for-Xen-hypervisor-frequency-sc.patch -patches.xen/pvops-0011-x86-PCI-Expand-the-x86_msi_ops-to-have-a-restore-MSI.patch -patches.xen/pvops-enable-netfront-in-dom0.patch +patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch diff --git a/version-pvops b/version-pvops index 406ebcb..2f4b607 100644 --- a/version-pvops +++ b/version-pvops @@ -1 +1 @@ -3.2.7 +3.4 From da6296fbd1a2b90d0af98b52df8f86a8b055b123 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 24 May 2012 02:08:29 +0200 Subject: [PATCH 02/56] xen PVUSB driver --- config-pvops | 2 + .../pvops-3.4-0100-usb-xen-pvusb-driver.patch | 4252 +++++++++++++++++ series-pvops.conf | 1 + 3 files changed, 4255 insertions(+) create mode 100644 patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch diff --git a/config-pvops b/config-pvops index dc71d06..bb17523 100644 --- a/config-pvops +++ b/config-pvops @@ -4513,6 +4513,8 @@ CONFIG_USB_SL811_CS=m CONFIG_USB_R8A66597_HCD=m CONFIG_USB_WHCI_HCD=m CONFIG_USB_HWA_HCD=m +CONFIG_XEN_USBDEV_FRONTEND=m +CONFIG_XEN_USBDEV_BACKEND=m # # USB Device Class drivers diff --git a/patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch b/patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch new file mode 100644 index 0000000..42a4cec --- /dev/null +++ b/patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch @@ -0,0 +1,4252 @@ +From 10b675fc21702ff5a9b94fc13e2b504ca09073fd Mon Sep 17 00:00:00 2001 +From: Nathanael Rensen +Date: Tue, 7 Feb 2012 13:50:24 +0800 +Subject: [PATCH] usb: xen pvusb driver + +Port the original Xen PV USB drivers developed by Noboru Iwamatsu + to the Linux pvops kernel. The backend driver +resides in dom0 with access to the physical USB device. The frontend driver +resides in a domU to provide paravirtualised access to physical USB devices. + +For usage, see http://wiki.xensource.com/xenwiki/XenUSBPassthrough. + +Signed-off-by: Nathanael Rensen . +Signed-off-by: Konrad Rzeszutek Wilk +--- + drivers/usb/host/Kconfig | 23 + + drivers/usb/host/Makefile | 2 + + drivers/usb/host/xen-usbback/Makefile | 3 + + drivers/usb/host/xen-usbback/common.h | 170 ++++ + drivers/usb/host/xen-usbback/usbback.c | 1272 +++++++++++++++++++++++ + drivers/usb/host/xen-usbback/usbdev.c | 319 ++++++ + drivers/usb/host/xen-usbback/xenbus.c | 482 +++++++++ + drivers/usb/host/xen-usbfront.c | 1739 ++++++++++++++++++++++++++++++++ + include/xen/interface/io/usbif.h | 150 +++ + 9 files changed, 4160 insertions(+), 0 deletions(-) + create mode 100644 drivers/usb/host/xen-usbback/Makefile + create mode 100644 drivers/usb/host/xen-usbback/common.h + create mode 100644 drivers/usb/host/xen-usbback/usbback.c + create mode 100644 drivers/usb/host/xen-usbback/usbdev.c + create mode 100644 drivers/usb/host/xen-usbback/xenbus.c + create mode 100644 drivers/usb/host/xen-usbfront.c + create mode 100644 include/xen/interface/io/usbif.h + +diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig +index f788eb8..cbb0961 100644 +--- a/drivers/usb/host/Kconfig ++++ b/drivers/usb/host/Kconfig +@@ -638,3 +638,26 @@ config USB_OCTEON_OHCI + config USB_OCTEON2_COMMON + bool + default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI ++ ++config XEN_USBDEV_FRONTEND ++ tristate "Xen pvusb device frontend driver" ++ depends on XEN && USB ++ select XEN_XENBUS_FRONTEND ++ default m ++ help ++ The pvusb device frontend driver allows the kernel to ++ access usb devices exported exported by a virtual ++ machine containing a physical usb device driver. The ++ frontend driver is intended for unprivileged guest domains; ++ if you are compiling a kernel for a Xen guest, you almost ++ certainly want to enable this. ++ ++config XEN_USBDEV_BACKEND ++ tristate "PVUSB device backend driver" ++ depends on XEN_BACKEND && USB ++ default m ++ help ++ The pvusb backend driver allows the kernel to export its usb ++ devices to other guests via a high-performance shared-memory ++ interface. This requires the guest to have the pvusb frontend ++ available. +diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile +index 0982bcc..d62fe38 100644 +--- a/drivers/usb/host/Makefile ++++ b/drivers/usb/host/Makefile +@@ -40,4 +40,6 @@ obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o + obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o + obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o + obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o ++obj-$(CONFIG_XEN_USBDEV_FRONTEND) += xen-usbfront.o ++obj-$(CONFIG_XEN_USBDEV_BACKEND) += xen-usbback/ + obj-$(CONFIG_MIPS_ALCHEMY) += alchemy-common.o +diff --git a/drivers/usb/host/xen-usbback/Makefile b/drivers/usb/host/xen-usbback/Makefile +new file mode 100644 +index 0000000..9f3628c +--- /dev/null ++++ b/drivers/usb/host/xen-usbback/Makefile +@@ -0,0 +1,3 @@ ++obj-$(CONFIG_XEN_USBDEV_BACKEND) := xen-usbback.o ++ ++xen-usbback-y := usbdev.o xenbus.o usbback.o +diff --git a/drivers/usb/host/xen-usbback/common.h b/drivers/usb/host/xen-usbback/common.h +new file mode 100644 +index 0000000..d9671ec +--- /dev/null ++++ b/drivers/usb/host/xen-usbback/common.h +@@ -0,0 +1,170 @@ ++/* ++ * This file is part of Xen USB backend driver. ++ * ++ * Copyright (C) 2009, FUJITSU LABORATORIES LTD. ++ * Author: Noboru Iwamatsu ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ * ++ * or, by your choice, ++ * ++ * When distributed separately from the Linux kernel or incorporated into ++ * other software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to ++ * deal in the Software without restriction, including without limitation the ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __XEN_USBBACK__COMMON_H__ ++#define __XEN_USBBACK__COMMON_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRV_PFX "xen-usbback:" ++ ++struct xen_usbdev; ++ ++#ifndef BUS_ID_SIZE ++#define XEN_USB_BUS_ID_SIZE 20 ++#else ++#define XEN_USB_BUS_ID_SIZE BUS_ID_SIZE ++#endif ++ ++#define XEN_USB_DEV_ADDR_SIZE 128 ++ ++struct xen_usbif { ++ domid_t domid; ++ unsigned int handle; ++ int num_ports; ++ enum usb_spec_version usb_ver; ++ ++ struct list_head usbif_list; ++ ++ struct xenbus_device *xbdev; ++ ++ unsigned int irq; ++ ++ void *urb_sring; ++ void *conn_sring; ++ struct usbif_urb_back_ring urb_ring; ++ struct usbif_conn_back_ring conn_ring; ++ ++ spinlock_t urb_ring_lock; ++ spinlock_t conn_ring_lock; ++ atomic_t refcnt; ++ ++ struct xenbus_watch backend_watch; ++ ++ /* device address lookup table */ ++ struct xen_usbdev *addr_table[XEN_USB_DEV_ADDR_SIZE]; ++ spinlock_t addr_lock; ++ ++ /* connected device list */ ++ struct list_head dev_list; ++ spinlock_t dev_lock; ++ ++ /* request schedule */ ++ struct task_struct *xenusbd; ++ unsigned int waiting_reqs; ++ wait_queue_head_t waiting_to_free; ++ wait_queue_head_t wq; ++}; ++ ++struct xen_usbport { ++ struct list_head port_list; ++ ++ char phys_bus[XEN_USB_BUS_ID_SIZE]; ++ domid_t domid; ++ unsigned int handle; ++ int portnum; ++ unsigned is_connected:1; ++}; ++ ++struct xen_usbdev { ++ struct kref kref; ++ struct list_head dev_list; ++ ++ struct xen_usbport *port; ++ struct usb_device *udev; ++ struct xen_usbif *usbif; ++ int addr; ++ ++ struct list_head submitting_list; ++ spinlock_t submitting_lock; ++}; ++ ++#define usbif_get(_b) (atomic_inc(&(_b)->refcnt)) ++#define usbif_put(_b) \ ++ do { \ ++ if (atomic_dec_and_test(&(_b)->refcnt)) \ ++ wake_up(&(_b)->waiting_to_free); \ ++ } while (0) ++ ++int xen_usbif_xenbus_init(void); ++void xen_usbif_xenbus_exit(void); ++struct xen_usbif *xen_usbif_find(domid_t domid, unsigned int handle); ++ ++int xen_usbdev_init(void); ++void xen_usbdev_exit(void); ++ ++void xen_usbif_attach_device(struct xen_usbif *usbif, struct xen_usbdev *dev); ++void xen_usbif_detach_device(struct xen_usbif *usbif, struct xen_usbdev *dev); ++void xen_usbif_detach_device_without_lock(struct xen_usbif *usbif, ++ struct xen_usbdev *dev); ++void xen_usbif_hotplug_notify(struct xen_usbif *usbif, int portnum, int speed); ++struct xen_usbdev *xen_usbif_find_attached_device(struct xen_usbif *usbif, ++ int port); ++irqreturn_t xen_usbif_be_int(int irq, void *dev_id); ++int xen_usbif_schedule(void *arg); ++void xen_usbif_unlink_urbs(struct xen_usbdev *dev); ++ ++struct xen_usbport *xen_usbport_find_by_busid(const char *busid); ++struct xen_usbport *xen_usbport_find(const domid_t domid, ++ const unsigned int handle, const int portnum); ++int xen_usbport_add(const char *busid, const domid_t domid, ++ const unsigned int handle, const int portnum); ++int xen_usbport_remove(const domid_t domid, const unsigned int handle, ++ const int portnum); ++#endif /* __XEN_USBBACK__COMMON_H__ */ +diff --git a/drivers/usb/host/xen-usbback/usbback.c b/drivers/usb/host/xen-usbback/usbback.c +new file mode 100644 +index 0000000..df1afa9 +--- /dev/null ++++ b/drivers/usb/host/xen-usbback/usbback.c +@@ -0,0 +1,1272 @@ ++/* ++ * Xen USB backend driver ++ * ++ * Copyright (C) 2009, FUJITSU LABORATORIES LTD. ++ * Author: Noboru Iwamatsu ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ * ++ * or, by your choice, ++ * ++ * When distributed separately from the Linux kernel or incorporated into ++ * other software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to ++ * deal in the Software without restriction, including without limitation the ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include "common.h" ++ ++static int xen_usbif_reqs = USBIF_BACK_MAX_PENDING_REQS; ++module_param_named(reqs, xen_usbif_reqs, int, 0); ++MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate"); ++ ++struct pending_req_segment { ++ uint16_t offset; ++ uint16_t length; ++}; ++ ++struct pending_req { ++ struct xen_usbif *usbif; ++ ++ uint16_t id; /* request id */ ++ ++ struct xen_usbdev *dev; ++ struct list_head urb_list; ++ ++ /* urb */ ++ struct urb *urb; ++ void *buffer; ++ dma_addr_t transfer_dma; ++ struct usb_ctrlrequest *setup; ++ dma_addr_t setup_dma; ++ ++ /* request segments */ ++ uint16_t nr_buffer_segs; ++ /* number of urb->transfer_buffer segments */ ++ uint16_t nr_extra_segs; ++ /* number of iso_frame_desc segments (ISO) */ ++ struct pending_req_segment *seg; ++ ++ struct list_head free_list; ++}; ++ ++#define USBBACK_INVALID_HANDLE (~0) ++ ++struct xen_usbbk { ++ struct pending_req *pending_reqs; ++ struct list_head pending_free; ++ spinlock_t pending_free_lock; ++ wait_queue_head_t pending_free_wq; ++ struct list_head urb_free; ++ spinlock_t urb_free_lock; ++ struct page **pending_pages; ++ grant_handle_t *pending_grant_handles; ++}; ++ ++static struct xen_usbbk *usbbk; ++ ++static inline int vaddr_pagenr(struct pending_req *req, int seg) ++{ ++ return (req - usbbk->pending_reqs) * ++ USBIF_MAX_SEGMENTS_PER_REQUEST + seg; ++} ++ ++#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] ++ ++static inline unsigned long vaddr(struct pending_req *req, int seg) ++{ ++ unsigned long pfn = page_to_pfn(usbbk->pending_page(req, seg)); ++ return (unsigned long)pfn_to_kaddr(pfn); ++} ++ ++#define pending_handle(_req, _seg) \ ++ (usbbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) ++ ++static struct pending_req *alloc_req(void) ++{ ++ struct pending_req *req = NULL; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&usbbk->pending_free_lock, flags); ++ if (!list_empty(&usbbk->pending_free)) { ++ req = list_entry(usbbk->pending_free.next, struct pending_req, ++ free_list); ++ list_del(&req->free_list); ++ } ++ spin_unlock_irqrestore(&usbbk->pending_free_lock, flags); ++ return req; ++} ++ ++static void free_req(struct pending_req *req) ++{ ++ unsigned long flags; ++ int was_empty; ++ ++ spin_lock_irqsave(&usbbk->pending_free_lock, flags); ++ was_empty = list_empty(&usbbk->pending_free); ++ list_add(&req->free_list, &usbbk->pending_free); ++ spin_unlock_irqrestore(&usbbk->pending_free_lock, flags); ++ if (was_empty) ++ wake_up(&usbbk->pending_free_wq); ++} ++ ++static inline void add_req_to_submitting_list(struct xen_usbdev *dev, ++ struct pending_req *pending_req) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev->submitting_lock, flags); ++ list_add_tail(&pending_req->urb_list, &dev->submitting_list); ++ spin_unlock_irqrestore(&dev->submitting_lock, flags); ++} ++ ++static inline void remove_req_from_submitting_list(struct xen_usbdev *dev, ++ struct pending_req *pending_req) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev->submitting_lock, flags); ++ list_del_init(&pending_req->urb_list); ++ spin_unlock_irqrestore(&dev->submitting_lock, flags); ++} ++ ++void xen_usbif_unlink_urbs(struct xen_usbdev *dev) ++{ ++ struct pending_req *req, *tmp; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev->submitting_lock, flags); ++ list_for_each_entry_safe(req, tmp, &dev->submitting_list, urb_list) { ++ usb_unlink_urb(req->urb); ++ } ++ spin_unlock_irqrestore(&dev->submitting_lock, flags); ++} ++ ++static void copy_buff_to_pages(void *buff, struct pending_req *pending_req, ++ int start, int nr_pages) ++{ ++ unsigned long copied = 0; ++ int i; ++ ++ for (i = start; i < start + nr_pages; i++) { ++ memcpy((void *) vaddr(pending_req, i) + ++ pending_req->seg[i].offset, ++ buff + copied, pending_req->seg[i].length); ++ copied += pending_req->seg[i].length; ++ } ++} ++ ++static void copy_pages_to_buff(void *buff, struct pending_req *pending_req, ++ int start, int nr_pages) ++{ ++ unsigned long copied = 0; ++ int i; ++ ++ for (i = start; i < start + nr_pages; i++) { ++ void *src = (void *) vaddr(pending_req, i) + ++ pending_req->seg[i].offset; ++ memcpy(buff + copied, src, pending_req->seg[i].length); ++ copied += pending_req->seg[i].length; ++ } ++} ++ ++static int usbbk_alloc_urb(struct usbif_urb_request *req, ++ struct pending_req *pending_req) ++{ ++ int ret; ++ ++ if (usb_pipeisoc(req->pipe)) ++ pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets, ++ GFP_KERNEL); ++ else ++ pending_req->urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!pending_req->urb) { ++ pr_alert(DRV_PFX "can't alloc urb\n"); ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ if (req->buffer_length) { ++ pending_req->buffer = ++ usb_alloc_coherent(pending_req->dev->udev, ++ req->buffer_length, GFP_KERNEL, ++ &pending_req->transfer_dma); ++ if (!pending_req->buffer) { ++ pr_alert(DRV_PFX "can't alloc urb buffer\n"); ++ ret = -ENOMEM; ++ goto fail_free_urb; ++ } ++ } ++ ++ if (usb_pipecontrol(req->pipe)) { ++ pending_req->setup = usb_alloc_coherent(pending_req->dev->udev, ++ sizeof(struct usb_ctrlrequest), ++ GFP_KERNEL, &pending_req->setup_dma); ++ if (!pending_req->setup) { ++ pr_alert(DRV_PFX "can't alloc usb_ctrlrequest\n"); ++ ret = -ENOMEM; ++ goto fail_free_buffer; ++ } ++ } ++ ++ return 0; ++ ++fail_free_buffer: ++ if (req->buffer_length) ++ usb_free_coherent(pending_req->dev->udev, req->buffer_length, ++ pending_req->buffer, pending_req->transfer_dma); ++fail_free_urb: ++ usb_free_urb(pending_req->urb); ++fail: ++ return ret; ++} ++ ++static void usbbk_release_urb(struct urb *urb) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&usbbk->urb_free_lock, flags); ++ list_add(&urb->urb_list, &usbbk->urb_free); ++ spin_unlock_irqrestore(&usbbk->urb_free_lock, flags); ++} ++ ++static void usbbk_free_urb(struct urb *urb) ++{ ++ if (usb_pipecontrol(urb->pipe)) ++ usb_free_coherent(urb->dev, sizeof(struct usb_ctrlrequest), ++ urb->setup_packet, urb->setup_dma); ++ if (urb->transfer_buffer_length) ++ usb_free_coherent(urb->dev, urb->transfer_buffer_length, ++ urb->transfer_buffer, urb->transfer_dma); ++ barrier(); ++ usb_free_urb(urb); ++} ++ ++static void usbbk_free_urbs(void) ++{ ++ unsigned long flags; ++ struct list_head tmp_list; ++ ++ if (list_empty(&usbbk->urb_free)) ++ return; ++ ++ INIT_LIST_HEAD(&tmp_list); ++ ++ spin_lock_irqsave(&usbbk->urb_free_lock, flags); ++ list_splice_init(&usbbk->urb_free, &tmp_list); ++ spin_unlock_irqrestore(&usbbk->urb_free_lock, flags); ++ ++ while (!list_empty(&tmp_list)) { ++ struct urb *next_urb = ++ list_first_entry(&tmp_list, struct urb, urb_list); ++ list_del(&next_urb->urb_list); ++ usbbk_free_urb(next_urb); ++ } ++} ++ ++static void usbif_notify_work(struct xen_usbif *usbif) ++{ ++ usbif->waiting_reqs = 1; ++ wake_up(&usbif->wq); ++} ++ ++irqreturn_t xen_usbif_be_int(int irq, void *dev_id) ++{ ++ usbif_notify_work(dev_id); ++ return IRQ_HANDLED; ++} ++ ++static void xen_usbbk_unmap(struct pending_req *req) ++{ ++ struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST]; ++ unsigned int i, nr_segs, invcount = 0; ++ grant_handle_t handle; ++ int ret; ++ ++ nr_segs = req->nr_buffer_segs + req->nr_extra_segs; ++ ++ if (nr_segs == 0) ++ return; ++ ++ for (i = 0; i < nr_segs; i++) { ++ handle = pending_handle(req, i); ++ if (handle == USBBACK_INVALID_HANDLE) ++ continue; ++ gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), ++ GNTMAP_host_map, handle); ++ pending_handle(req, i) = USBBACK_INVALID_HANDLE; ++ invcount++; ++ } ++ ++ ret = HYPERVISOR_grant_table_op( ++ GNTTABOP_unmap_grant_ref, unmap, invcount); ++ BUG_ON(ret); ++ /* ++ * Note, we use invcount, not nr_segs, so we can't index ++ * using vaddr(req, i). ++ */ ++ for (i = 0; i < invcount; i++) { ++ ret = m2p_remove_override( ++ virt_to_page(unmap[i].host_addr), false); ++ if (ret) { ++ pr_alert(DRV_PFX "Failed to remove M2P override for " ++ "%lx\n", (unsigned long)unmap[i].host_addr); ++ continue; ++ } ++ } ++ ++ kfree(req->seg); ++} ++ ++static int xen_usbbk_map(struct xen_usbif *usbif, ++ struct usbif_urb_request *req, ++ struct pending_req *pending_req) ++{ ++ int i, ret; ++ unsigned int nr_segs; ++ uint32_t flags; ++ struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST]; ++ ++ nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs; ++ ++ if (nr_segs == 0) ++ return 0; ++ ++ if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) { ++ pr_alert(DRV_PFX "Bad number of segments in request\n"); ++ ret = -EINVAL; ++ goto fail; ++ } ++ ++ pending_req->seg = kmalloc(sizeof(struct pending_req_segment) * ++ nr_segs, GFP_KERNEL); ++ if (!pending_req->seg) { ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ flags = GNTMAP_host_map; ++ if (usb_pipeout(req->pipe)) ++ flags |= GNTMAP_readonly; ++ for (i = 0; i < pending_req->nr_buffer_segs; i++) { ++ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, ++ req->seg[i].gref, usbif->domid); ++ } ++ ++ flags = GNTMAP_host_map; ++ for (i = pending_req->nr_buffer_segs; i < nr_segs; i++) { ++ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, ++ req->seg[i].gref, usbif->domid); ++ } ++ ++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segs); ++ BUG_ON(ret); ++ ++ for (i = 0; i < nr_segs; i++) { ++ if (unlikely(map[i].status != 0)) { ++ pr_alert(DRV_PFX "invalid buffer " ++ "-- could not remap it (error %d)\n", ++ map[i].status); ++ map[i].handle = USBBACK_INVALID_HANDLE; ++ ret |= 1; ++ } ++ ++ pending_handle(pending_req, i) = map[i].handle; ++ ++ if (ret) ++ continue; ++ ++ ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), ++ usbbk->pending_page(pending_req, i), NULL); ++ if (ret) { ++ pr_alert(DRV_PFX "Failed to install M2P override for " ++ "%lx (ret: %d)\n", ++ (unsigned long)map[i].dev_bus_addr, ret); ++ /* We could switch over to GNTTABOP_copy */ ++ continue; ++ } ++ ++ pending_req->seg[i].offset = req->seg[i].offset; ++ pending_req->seg[i].length = req->seg[i].length; ++ ++ barrier(); ++ ++ if (pending_req->seg[i].offset >= PAGE_SIZE || ++ pending_req->seg[i].length > PAGE_SIZE || ++ pending_req->seg[i].offset + ++ pending_req->seg[i].length > PAGE_SIZE) ++ ret |= 1; ++ } ++ ++ if (ret) ++ goto fail_flush; ++ ++ return 0; ++ ++fail_flush: ++ xen_usbbk_unmap(pending_req); ++ ret = -ENOMEM; ++ ++fail: ++ return ret; ++} ++ ++static void usbbk_do_response(struct pending_req *pending_req, int32_t status, ++ int32_t actual_length, int32_t error_count, ++ uint16_t start_frame) ++{ ++ struct xen_usbif *usbif = pending_req->usbif; ++ struct usbif_urb_response *res; ++ unsigned long flags; ++ int notify; ++ ++ spin_lock_irqsave(&usbif->urb_ring_lock, flags); ++ res = RING_GET_RESPONSE(&usbif->urb_ring, usbif->urb_ring.rsp_prod_pvt); ++ res->id = pending_req->id; ++ res->status = status; ++ res->actual_length = actual_length; ++ res->error_count = error_count; ++ res->start_frame = start_frame; ++ usbif->urb_ring.rsp_prod_pvt++; ++ barrier(); ++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->urb_ring, notify); ++ spin_unlock_irqrestore(&usbif->urb_ring_lock, flags); ++ ++ if (notify) ++ notify_remote_via_irq(usbif->irq); ++} ++ ++static void usbbk_urb_complete(struct urb *urb) ++{ ++ struct pending_req *pending_req = (struct pending_req *)urb->context; ++ ++ if (usb_pipein(urb->pipe) && urb->status == 0 && urb->actual_length > 0) ++ copy_buff_to_pages(pending_req->buffer, pending_req, 0, ++ pending_req->nr_buffer_segs); ++ ++ if (usb_pipeisoc(urb->pipe)) ++ copy_buff_to_pages(&urb->iso_frame_desc[0], pending_req, ++ pending_req->nr_buffer_segs, ++ pending_req->nr_extra_segs); ++ ++ barrier(); ++ ++ xen_usbbk_unmap(pending_req); ++ ++ usbbk_do_response(pending_req, urb->status, urb->actual_length, ++ urb->error_count, urb->start_frame); ++ ++ remove_req_from_submitting_list(pending_req->dev, pending_req); ++ ++ barrier(); ++ usbbk_release_urb(urb); ++ usbif_put(pending_req->usbif); ++ free_req(pending_req); ++} ++ ++static void usbbk_init_urb(struct usbif_urb_request *req, ++ struct pending_req *pending_req) ++{ ++ unsigned int pipe; ++ struct usb_device *udev = pending_req->dev->udev; ++ struct urb *urb = pending_req->urb; ++ ++ switch (usb_pipetype(req->pipe)) { ++ case PIPE_ISOCHRONOUS: ++ if (usb_pipein(req->pipe)) ++ pipe = usb_rcvisocpipe(udev, ++ usb_pipeendpoint(req->pipe)); ++ else ++ pipe = usb_sndisocpipe(udev, ++ usb_pipeendpoint(req->pipe)); ++ ++ urb->dev = udev; ++ urb->pipe = pipe; ++ urb->transfer_flags = req->transfer_flags; ++ urb->transfer_flags |= URB_ISO_ASAP; ++ urb->transfer_buffer = pending_req->buffer; ++ urb->transfer_buffer_length = req->buffer_length; ++ urb->complete = usbbk_urb_complete; ++ urb->context = pending_req; ++ urb->interval = req->u.isoc.interval; ++ urb->start_frame = req->u.isoc.start_frame; ++ urb->number_of_packets = req->u.isoc.number_of_packets; ++ ++ break; ++ case PIPE_INTERRUPT: ++ if (usb_pipein(req->pipe)) ++ pipe = usb_rcvintpipe(udev, ++ usb_pipeendpoint(req->pipe)); ++ else ++ pipe = usb_sndintpipe(udev, ++ usb_pipeendpoint(req->pipe)); ++ ++ usb_fill_int_urb(urb, udev, pipe, ++ pending_req->buffer, req->buffer_length, ++ usbbk_urb_complete, ++ pending_req, req->u.intr.interval); ++ /* ++ * high speed interrupt endpoints use a logarithmic encoding of ++ * the endpoint interval, and usb_fill_int_urb() initializes a ++ * interrupt urb with the encoded interval value. ++ * ++ * req->u.intr.interval is the interval value that already ++ * encoded in the frontend part, and the above ++ * usb_fill_int_urb() initializes the urb->interval with double ++ * encoded value. ++ * ++ * so, simply overwrite the urb->interval with original value. ++ */ ++ urb->interval = req->u.intr.interval; ++ urb->transfer_flags = req->transfer_flags; ++ ++ break; ++ case PIPE_CONTROL: ++ if (usb_pipein(req->pipe)) ++ pipe = usb_rcvctrlpipe(udev, 0); ++ else ++ pipe = usb_sndctrlpipe(udev, 0); ++ ++ usb_fill_control_urb(urb, udev, pipe, ++ (unsigned char *) pending_req->setup, ++ pending_req->buffer, req->buffer_length, ++ usbbk_urb_complete, pending_req); ++ memcpy(pending_req->setup, req->u.ctrl, 8); ++ urb->setup_dma = pending_req->setup_dma; ++ urb->transfer_flags = req->transfer_flags; ++ ++ break; ++ case PIPE_BULK: ++ if (usb_pipein(req->pipe)) ++ pipe = usb_rcvbulkpipe(udev, ++ usb_pipeendpoint(req->pipe)); ++ else ++ pipe = usb_sndbulkpipe(udev, ++ usb_pipeendpoint(req->pipe)); ++ ++ usb_fill_bulk_urb(urb, udev, pipe, pending_req->buffer, ++ req->buffer_length, usbbk_urb_complete, ++ pending_req); ++ urb->transfer_flags = req->transfer_flags; ++ ++ break; ++ default: ++ break; ++ } ++ ++ if (req->buffer_length) { ++ urb->transfer_dma = pending_req->transfer_dma; ++ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ } ++} ++ ++struct set_interface_request { ++ struct pending_req *pending_req; ++ int interface; ++ int alternate; ++ struct work_struct work; ++}; ++ ++static void usbbk_set_interface_work(struct work_struct *arg) ++{ ++ struct set_interface_request *req ++ = container_of(arg, struct set_interface_request, work); ++ struct pending_req *pending_req = req->pending_req; ++ struct usb_device *udev = req->pending_req->dev->udev; ++ ++ int ret; ++ ++ usb_lock_device(udev); ++ ret = usb_set_interface(udev, req->interface, req->alternate); ++ usb_unlock_device(udev); ++ usb_put_dev(udev); ++ ++ usbbk_do_response(pending_req, ret, 0, 0, 0); ++ usbif_put(pending_req->usbif); ++ free_req(pending_req); ++ kfree(req); ++} ++ ++static int usbbk_set_interface(struct pending_req *pending_req, int interface, ++ int alternate) ++{ ++ struct set_interface_request *req; ++ struct usb_device *udev = pending_req->dev->udev; ++ ++ req = kmalloc(sizeof(*req), GFP_KERNEL); ++ if (!req) ++ return -ENOMEM; ++ req->pending_req = pending_req; ++ req->interface = interface; ++ req->alternate = alternate; ++ INIT_WORK(&req->work, usbbk_set_interface_work); ++ usb_get_dev(udev); ++ schedule_work(&req->work); ++ return 0; ++} ++ ++struct clear_halt_request { ++ struct pending_req *pending_req; ++ int pipe; ++ struct work_struct work; ++}; ++ ++static void usbbk_clear_halt_work(struct work_struct *arg) ++{ ++ struct clear_halt_request *req = container_of(arg, ++ struct clear_halt_request, work); ++ struct pending_req *pending_req = req->pending_req; ++ struct usb_device *udev = req->pending_req->dev->udev; ++ int ret; ++ ++ usb_lock_device(udev); ++ ret = usb_clear_halt(req->pending_req->dev->udev, req->pipe); ++ usb_unlock_device(udev); ++ usb_put_dev(udev); ++ ++ usbbk_do_response(pending_req, ret, 0, 0, 0); ++ usbif_put(pending_req->usbif); ++ free_req(pending_req); ++ kfree(req); ++} ++ ++static int usbbk_clear_halt(struct pending_req *pending_req, int pipe) ++{ ++ struct clear_halt_request *req; ++ struct usb_device *udev = pending_req->dev->udev; ++ ++ req = kmalloc(sizeof(*req), GFP_KERNEL); ++ if (!req) ++ return -ENOMEM; ++ req->pending_req = pending_req; ++ req->pipe = pipe; ++ INIT_WORK(&req->work, usbbk_clear_halt_work); ++ ++ usb_get_dev(udev); ++ schedule_work(&req->work); ++ return 0; ++} ++ ++#if 0 ++struct port_reset_request { ++ struct pending_req *pending_req; ++ struct work_struct work; ++}; ++ ++static void usbbk_port_reset_work(struct work_struct *arg) ++{ ++ struct port_reset_request *req = container_of(arg, ++ struct port_reset_request, work); ++ struct pending_req *pending_req = req->pending_req; ++ struct usb_device *udev = pending_req->dev->udev; ++ int ret, ret_lock; ++ ++ ret = ret_lock = usb_lock_device_for_reset(udev, NULL); ++ if (ret_lock >= 0) { ++ ret = usb_reset_device(udev); ++ if (ret_lock) ++ usb_unlock_device(udev); ++ } ++ usb_put_dev(udev); ++ ++ usbbk_do_response(pending_req, ret, 0, 0, 0); ++ usbif_put(pending_req->usbif); ++ free_req(pending_req); ++ kfree(req); ++} ++ ++static int usbbk_port_reset(struct pending_req *pending_req) ++{ ++ struct port_reset_request *req; ++ struct usb_device *udev = pending_req->dev->udev; ++ ++ req = kmalloc(sizeof(*req), GFP_KERNEL); ++ if (!req) ++ return -ENOMEM; ++ ++ req->pending_req = pending_req; ++ INIT_WORK(&req->work, usbbk_port_reset_work); ++ ++ usb_get_dev(udev); ++ schedule_work(&req->work); ++ return 0; ++} ++#endif ++ ++static void usbbk_set_address(struct xen_usbif *usbif, struct xen_usbdev *dev, ++ int cur_addr, int new_addr) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&usbif->addr_lock, flags); ++ if (cur_addr) ++ usbif->addr_table[cur_addr] = NULL; ++ if (new_addr) ++ usbif->addr_table[new_addr] = dev; ++ dev->addr = new_addr; ++ spin_unlock_irqrestore(&usbif->addr_lock, flags); ++} ++ ++static void process_unlink_req(struct xen_usbif *usbif, ++ struct usbif_urb_request *req, ++ struct pending_req *pending_req) ++{ ++ struct pending_req *unlink_req = NULL; ++ int devnum; ++ int ret = 0; ++ unsigned long flags; ++ ++ devnum = usb_pipedevice(req->pipe); ++ if (unlikely(devnum == 0)) { ++ pending_req->dev = xen_usbif_find_attached_device(usbif, ++ usbif_pipeportnum(req->pipe)); ++ if (unlikely(!pending_req->dev)) { ++ ret = -ENODEV; ++ goto fail_response; ++ } ++ } else { ++ if (unlikely(!usbif->addr_table[devnum])) { ++ ret = -ENODEV; ++ goto fail_response; ++ } ++ pending_req->dev = usbif->addr_table[devnum]; ++ } ++ ++ spin_lock_irqsave(&pending_req->dev->submitting_lock, flags); ++ list_for_each_entry(unlink_req, &pending_req->dev->submitting_list, ++ urb_list) { ++ if (unlink_req->id == req->u.unlink.unlink_id) { ++ ret = usb_unlink_urb(unlink_req->urb); ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&pending_req->dev->submitting_lock, flags); ++ ++fail_response: ++ usbbk_do_response(pending_req, ret, 0, 0, 0); ++ usbif_put(usbif); ++ free_req(pending_req); ++ return; ++} ++ ++static int check_and_submit_special_ctrlreq(struct xen_usbif *usbif, ++ struct usbif_urb_request *req, ++ struct pending_req *pending_req) ++{ ++ int devnum; ++ struct xen_usbdev *dev = NULL; ++ struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *) req->u.ctrl; ++ int ret; ++ int done = 0; ++ ++ devnum = usb_pipedevice(req->pipe); ++ ++ /* ++ * When the device is first connected or reseted, USB device has no ++ * address. In this initial state, following requests are send to ++ * device address (#0), ++ * ++ * 1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is send, and ++ * OS knows what device is connected to. ++ * ++ * 2. SET_ADDRESS is send, and then, device has its address. ++ * ++ * In the next step, SET_CONFIGURATION is send to addressed device, and ++ * then, the device is finally ready to use. ++ */ ++ if (unlikely(devnum == 0)) { ++ dev = xen_usbif_find_attached_device(usbif, ++ usbif_pipeportnum(req->pipe)); ++ if (unlikely(!dev)) { ++ ret = -ENODEV; ++ goto fail_response; ++ } ++ ++ switch (ctrl->bRequest) { ++ case USB_REQ_GET_DESCRIPTOR: ++ /* ++ * GET_DESCRIPTOR request to device #0. ++ * through to normal urb transfer. ++ */ ++ pending_req->dev = dev; ++ return 0; ++ break; ++ case USB_REQ_SET_ADDRESS: ++ /* ++ * SET_ADDRESS request to device #0. ++ * add attached device to addr_table. ++ */ ++ { ++ __u16 addr = le16_to_cpu(ctrl->wValue); ++ usbbk_set_address(usbif, dev, 0, addr); ++ } ++ ret = 0; ++ goto fail_response; ++ break; ++ default: ++ ret = -EINVAL; ++ goto fail_response; ++ } ++ } else { ++ if (unlikely(!usbif->addr_table[devnum])) { ++ ret = -ENODEV; ++ goto fail_response; ++ } ++ pending_req->dev = usbif->addr_table[devnum]; ++ } ++ ++ /* ++ * Check special request ++ */ ++ switch (ctrl->bRequest) { ++ case USB_REQ_SET_ADDRESS: ++ /* ++ * SET_ADDRESS request to addressed device. ++ * change addr or remove from addr_table. ++ */ ++ { ++ __u16 addr = le16_to_cpu(ctrl->wValue); ++ usbbk_set_address(usbif, dev, devnum, addr); ++ } ++ ret = 0; ++ goto fail_response; ++ break; ++#if 0 ++ case USB_REQ_SET_CONFIGURATION: ++ /* ++ * linux 2.6.27 or later version only! ++ */ ++ if (ctrl->RequestType == USB_RECIP_DEVICE) { ++ __u16 config = le16_to_cpu(ctrl->wValue); ++ usb_driver_set_configuration(pending_req->dev->udev, ++ config); ++ done = 1; ++ } ++ break; ++#endif ++ case USB_REQ_SET_INTERFACE: ++ if (ctrl->bRequestType == USB_RECIP_INTERFACE) { ++ __u16 alt = le16_to_cpu(ctrl->wValue); ++ __u16 intf = le16_to_cpu(ctrl->wIndex); ++ usbbk_set_interface(pending_req, intf, alt); ++ done = 1; ++ } ++ break; ++ case USB_REQ_CLEAR_FEATURE: ++ if (ctrl->bRequestType == USB_RECIP_ENDPOINT ++ && ctrl->wValue == USB_ENDPOINT_HALT) { ++ int pipe; ++ int ep = le16_to_cpu(ctrl->wIndex) & 0x0f; ++ int dir = le16_to_cpu(ctrl->wIndex) & USB_DIR_IN; ++ if (dir) ++ pipe = usb_rcvctrlpipe(pending_req->dev->udev, ++ ep); ++ else ++ pipe = usb_sndctrlpipe(pending_req->dev->udev, ++ ep); ++ usbbk_clear_halt(pending_req, pipe); ++ done = 1; ++ } ++ break; ++#if 0 /* not tested yet */ ++ case USB_REQ_SET_FEATURE: ++ if (ctrl->bRequestType == USB_RT_PORT) { ++ __u16 feat = le16_to_cpu(ctrl->wValue); ++ if (feat == USB_PORT_FEAT_RESET) { ++ usbbk_port_reset(pending_req); ++ done = 1; ++ } ++ } ++ break; ++#endif ++ default: ++ break; ++ } ++ ++ return done; ++ ++fail_response: ++ usbbk_do_response(pending_req, ret, 0, 0, 0); ++ usbif_put(usbif); ++ free_req(pending_req); ++ return 1; ++} ++ ++static void dispatch_request_to_pending_reqs(struct xen_usbif *usbif, ++ struct usbif_urb_request *req, ++ struct pending_req *pending_req) ++{ ++ int ret; ++ ++ pending_req->id = req->id; ++ pending_req->usbif = usbif; ++ ++ barrier(); ++ ++ usbif_get(usbif); ++ ++ /* unlink request */ ++ if (unlikely(usbif_pipeunlink(req->pipe))) { ++ process_unlink_req(usbif, req, pending_req); ++ return; ++ } ++ ++ if (usb_pipecontrol(req->pipe)) { ++ if (check_and_submit_special_ctrlreq(usbif, req, pending_req)) ++ return; ++ } else { ++ int devnum = usb_pipedevice(req->pipe); ++ if (unlikely(!usbif->addr_table[devnum])) { ++ ret = -ENODEV; ++ goto fail_response; ++ } ++ pending_req->dev = usbif->addr_table[devnum]; ++ } ++ ++ barrier(); ++ ++ ret = usbbk_alloc_urb(req, pending_req); ++ if (ret) { ++ ret = -ESHUTDOWN; ++ goto fail_response; ++ } ++ ++ add_req_to_submitting_list(pending_req->dev, pending_req); ++ ++ barrier(); ++ ++ usbbk_init_urb(req, pending_req); ++ ++ barrier(); ++ ++ pending_req->nr_buffer_segs = req->nr_buffer_segs; ++ if (usb_pipeisoc(req->pipe)) ++ pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs; ++ else ++ pending_req->nr_extra_segs = 0; ++ ++ barrier(); ++ ++ ret = xen_usbbk_map(usbif, req, pending_req); ++ if (ret) { ++ pr_alert(DRV_PFX "invalid buffer\n"); ++ ret = -ESHUTDOWN; ++ goto fail_free_urb; ++ } ++ ++ barrier(); ++ ++ if (usb_pipeout(req->pipe) && req->buffer_length) ++ copy_pages_to_buff(pending_req->buffer, pending_req, 0, ++ pending_req->nr_buffer_segs); ++ if (usb_pipeisoc(req->pipe)) { ++ copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0], ++ pending_req, pending_req->nr_buffer_segs, ++ pending_req->nr_extra_segs); ++ } ++ ++ barrier(); ++ ++ ret = usb_submit_urb(pending_req->urb, GFP_KERNEL); ++ if (ret) { ++ pr_alert(DRV_PFX "failed submitting urb, error %d\n", ret); ++ ret = -ESHUTDOWN; ++ goto fail_flush_area; ++ } ++ return; ++ ++fail_flush_area: ++ xen_usbbk_unmap(pending_req); ++fail_free_urb: ++ remove_req_from_submitting_list(pending_req->dev, pending_req); ++ barrier(); ++ usbbk_release_urb(pending_req->urb); ++fail_response: ++ usbbk_do_response(pending_req, ret, 0, 0, 0); ++ usbif_put(usbif); ++ free_req(pending_req); ++} ++ ++static int usbbk_start_submit_urb(struct xen_usbif *usbif) ++{ ++ struct usbif_urb_back_ring *urb_ring = &usbif->urb_ring; ++ struct usbif_urb_request *req; ++ struct pending_req *pending_req; ++ RING_IDX rc, rp; ++ int more_to_do = 0; ++ ++ rc = urb_ring->req_cons; ++ rp = urb_ring->sring->req_prod; ++ rmb(); ++ ++ while (rc != rp) { ++ if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) { ++ pr_warn(DRV_PFX "RING_REQUEST_CONS_OVERFLOW\n"); ++ break; ++ } ++ ++ pending_req = alloc_req(); ++ if (NULL == pending_req) { ++ more_to_do = 1; ++ break; ++ } ++ ++ req = RING_GET_REQUEST(urb_ring, rc); ++ urb_ring->req_cons = ++rc; ++ ++ dispatch_request_to_pending_reqs(usbif, req, pending_req); ++ } ++ ++ RING_FINAL_CHECK_FOR_REQUESTS(&usbif->urb_ring, more_to_do); ++ ++ cond_resched(); ++ ++ return more_to_do; ++} ++ ++void xen_usbif_hotplug_notify(struct xen_usbif *usbif, int portnum, int speed) ++{ ++ struct usbif_conn_back_ring *ring = &usbif->conn_ring; ++ struct usbif_conn_request *req; ++ struct usbif_conn_response *res; ++ unsigned long flags; ++ u16 id; ++ int notify; ++ ++ spin_lock_irqsave(&usbif->conn_ring_lock, flags); ++ ++ req = RING_GET_REQUEST(ring, ring->req_cons); ++ id = req->id; ++ ring->req_cons++; ++ ring->sring->req_event = ring->req_cons + 1; ++ ++ res = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); ++ res->id = id; ++ res->portnum = portnum; ++ res->speed = speed; ++ ring->rsp_prod_pvt++; ++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify); ++ ++ spin_unlock_irqrestore(&usbif->conn_ring_lock, flags); ++ ++ if (notify) ++ notify_remote_via_irq(usbif->irq); ++} ++ ++int xen_usbif_schedule(void *arg) ++{ ++ struct xen_usbif *usbif = (struct xen_usbif *) arg; ++ ++ usbif_get(usbif); ++ ++ while (!kthread_should_stop()) { ++ wait_event_interruptible(usbif->wq, ++ usbif->waiting_reqs || kthread_should_stop()); ++ wait_event_interruptible(usbbk->pending_free_wq, ++ !list_empty(&usbbk->pending_free) || kthread_should_stop()); ++ usbif->waiting_reqs = 0; ++ smp_mb(); ++ ++ if (usbbk_start_submit_urb(usbif)) ++ usbif->waiting_reqs = 1; ++ ++ usbbk_free_urbs(); ++ } ++ ++ usbbk_free_urbs(); ++ usbif->xenusbd = NULL; ++ usbif_put(usbif); ++ ++ return 0; ++} ++ ++/* ++ * attach xen_usbdev device to usbif. ++ */ ++void xen_usbif_attach_device(struct xen_usbif *usbif, struct xen_usbdev *dev) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&usbif->dev_lock, flags); ++ list_add(&dev->dev_list, &usbif->dev_list); ++ spin_unlock_irqrestore(&usbif->dev_lock, flags); ++ dev->usbif = usbif; ++} ++ ++/* ++ * detach usbdev device from usbif. ++ */ ++void xen_usbif_detach_device(struct xen_usbif *usbif, struct xen_usbdev *dev) ++{ ++ unsigned long flags; ++ ++ if (dev->addr) ++ usbbk_set_address(usbif, dev, dev->addr, 0); ++ spin_lock_irqsave(&usbif->dev_lock, flags); ++ list_del(&dev->dev_list); ++ spin_unlock_irqrestore(&usbif->dev_lock, flags); ++ dev->usbif = NULL; ++} ++ ++void xen_usbif_detach_device_without_lock(struct xen_usbif *usbif, ++ struct xen_usbdev *dev) ++{ ++ if (dev->addr) ++ usbbk_set_address(usbif, dev, dev->addr, 0); ++ list_del(&dev->dev_list); ++ dev->usbif = NULL; ++} ++ ++static int __init xen_usbif_init(void) ++{ ++ int i, mmap_pages; ++ int rc = 0; ++ ++ if (!xen_pv_domain()) ++ return -ENODEV; ++ ++ usbbk = kzalloc(sizeof(struct xen_usbbk), GFP_KERNEL); ++ if (!usbbk) { ++ pr_alert(DRV_PFX "%s: out of memory!\n", __func__); ++ return -ENOMEM; ++ } ++ ++ mmap_pages = xen_usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST; ++ usbbk->pending_reqs = ++ kzalloc(sizeof(usbbk->pending_reqs[0]) * xen_usbif_reqs, ++ GFP_KERNEL); ++ usbbk->pending_grant_handles = ++ kmalloc(sizeof(usbbk->pending_grant_handles[0]) * mmap_pages, ++ GFP_KERNEL); ++ usbbk->pending_pages = ++ kzalloc(sizeof(usbbk->pending_pages[0]) * mmap_pages, ++ GFP_KERNEL); ++ ++ if (!usbbk->pending_reqs || !usbbk->pending_grant_handles || ++ !usbbk->pending_pages) { ++ rc = -ENOMEM; ++ pr_alert(DRV_PFX "%s: out of memory\n", __func__); ++ goto failed_init; ++ } ++ ++ for (i = 0; i < mmap_pages; i++) { ++ usbbk->pending_grant_handles[i] = USBBACK_INVALID_HANDLE; ++ usbbk->pending_pages[i] = alloc_page(GFP_KERNEL); ++ if (usbbk->pending_pages[i] == NULL) { ++ rc = -ENOMEM; ++ pr_alert(DRV_PFX "%s: out of memory\n", __func__); ++ goto failed_init; ++ } ++ } ++ ++ INIT_LIST_HEAD(&usbbk->pending_free); ++ spin_lock_init(&usbbk->pending_free_lock); ++ init_waitqueue_head(&usbbk->pending_free_wq); ++ ++ INIT_LIST_HEAD(&usbbk->urb_free); ++ spin_lock_init(&usbbk->urb_free_lock); ++ ++ for (i = 0; i < xen_usbif_reqs; i++) ++ list_add_tail(&usbbk->pending_reqs[i].free_list, ++ &usbbk->pending_free); ++ ++ rc = xen_usbdev_init(); ++ if (rc) ++ goto failed_init; ++ ++ rc = xen_usbif_xenbus_init(); ++ if (rc) ++ goto usb_exit; ++ ++ return 0; ++ ++ usb_exit: ++ xen_usbdev_exit(); ++ failed_init: ++ kfree(usbbk->pending_reqs); ++ kfree(usbbk->pending_grant_handles); ++ if (usbbk->pending_pages) { ++ for (i = 0; i < mmap_pages; i++) { ++ if (usbbk->pending_pages[i]) ++ __free_page(usbbk->pending_pages[i]); ++ } ++ kfree(usbbk->pending_pages); ++ } ++ kfree(usbbk); ++ usbbk = NULL; ++ return rc; ++} ++ ++struct xen_usbdev *xen_usbif_find_attached_device(struct xen_usbif *usbif, ++ int portnum) ++{ ++ struct xen_usbdev *dev; ++ int found = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&usbif->dev_lock, flags); ++ list_for_each_entry(dev, &usbif->dev_list, dev_list) { ++ if (dev->port->portnum == portnum) { ++ found = 1; ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&usbif->dev_lock, flags); ++ ++ if (found) ++ return dev; ++ ++ return NULL; ++} ++ ++static void __exit xen_usbif_exit(void) ++{ ++ int i; ++ int mmap_pages = xen_usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST; ++ ++ xen_usbif_xenbus_exit(); ++ xen_usbdev_exit(); ++ kfree(usbbk->pending_reqs); ++ kfree(usbbk->pending_grant_handles); ++ for (i = 0; i < mmap_pages; i++) { ++ if (usbbk->pending_pages[i]) ++ __free_page(usbbk->pending_pages[i]); ++ } ++ kfree(usbbk->pending_pages); ++ usbbk = NULL; ++} ++ ++module_init(xen_usbif_init); ++module_exit(xen_usbif_exit); ++ ++MODULE_AUTHOR(""); ++MODULE_DESCRIPTION("Xen USB backend driver (xen_usbback)"); ++MODULE_LICENSE("Dual BSD/GPL"); +diff --git a/drivers/usb/host/xen-usbback/usbdev.c b/drivers/usb/host/xen-usbback/usbdev.c +new file mode 100644 +index 0000000..53a14b4 +--- /dev/null ++++ b/drivers/usb/host/xen-usbback/usbdev.c +@@ -0,0 +1,319 @@ ++/* ++ * USB stub device driver - grabbing and managing USB devices. ++ * ++ * Copyright (C) 2009, FUJITSU LABORATORIES LTD. ++ * Author: Noboru Iwamatsu ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ * ++ * or, by your choice, ++ * ++ * When distributed separately from the Linux kernel or incorporated into ++ * other software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to ++ * deal in the Software without restriction, including without limitation the ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "common.h" ++ ++static LIST_HEAD(port_list); ++static DEFINE_SPINLOCK(port_list_lock); ++ ++struct xen_usbport *xen_usbport_find_by_busid(const char *busid) ++{ ++ struct xen_usbport *port; ++ int found = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&port_list_lock, flags); ++ list_for_each_entry(port, &port_list, port_list) { ++ if (!(strncmp(port->phys_bus, busid, XEN_USB_BUS_ID_SIZE))) { ++ found = 1; ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&port_list_lock, flags); ++ ++ if (found) ++ return port; ++ ++ return NULL; ++} ++ ++struct xen_usbport *xen_usbport_find(const domid_t domid, ++ const unsigned int handle, const int portnum) ++{ ++ struct xen_usbport *port; ++ int found = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&port_list_lock, flags); ++ list_for_each_entry(port, &port_list, port_list) { ++ if ((port->domid == domid) && ++ (port->handle == handle) && ++ (port->portnum == portnum)) { ++ found = 1; ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&port_list_lock, flags); ++ ++ if (found) ++ return port; ++ ++ return NULL; ++} ++ ++int xen_usbport_add(const char *busid, const domid_t domid, ++ const unsigned int handle, const int portnum) ++{ ++ struct xen_usbport *port; ++ unsigned long flags; ++ ++ port = kzalloc(sizeof(*port), GFP_KERNEL); ++ if (!port) ++ return -ENOMEM; ++ ++ port->domid = domid; ++ port->handle = handle; ++ port->portnum = portnum; ++ ++ strncpy(port->phys_bus, busid, XEN_USB_BUS_ID_SIZE); ++ ++ spin_lock_irqsave(&port_list_lock, flags); ++ list_add(&port->port_list, &port_list); ++ spin_unlock_irqrestore(&port_list_lock, flags); ++ ++ return 0; ++} ++ ++int xen_usbport_remove(const domid_t domid, const unsigned int handle, ++ const int portnum) ++{ ++ struct xen_usbport *port, *tmp; ++ int err = -ENOENT; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&port_list_lock, flags); ++ list_for_each_entry_safe(port, tmp, &port_list, port_list) { ++ if (port->domid == domid && ++ port->handle == handle && ++ port->portnum == portnum) { ++ list_del(&port->port_list); ++ kfree(port); ++ ++ err = 0; ++ } ++ } ++ spin_unlock_irqrestore(&port_list_lock, flags); ++ ++ return err; ++} ++ ++static struct xen_usbdev *xen_usbdev_alloc(struct usb_device *udev, ++ struct xen_usbport *port) ++{ ++ struct xen_usbdev *dev; ++ ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ if (!dev) { ++ pr_alert(DRV_PFX "no memory for alloc xen_usbdev\n"); ++ return NULL; ++ } ++ kref_init(&dev->kref); ++ dev->udev = usb_get_dev(udev); ++ dev->port = port; ++ spin_lock_init(&dev->submitting_lock); ++ INIT_LIST_HEAD(&dev->submitting_list); ++ ++ return dev; ++} ++ ++static void usbdev_release(struct kref *kref) ++{ ++ struct xen_usbdev *dev; ++ ++ dev = container_of(kref, struct xen_usbdev, kref); ++ ++ usb_put_dev(dev->udev); ++ dev->udev = NULL; ++ dev->port = NULL; ++ kfree(dev); ++} ++ ++static inline void usbdev_get(struct xen_usbdev *dev) ++{ ++ kref_get(&dev->kref); ++} ++ ++static inline void usbdev_put(struct xen_usbdev *dev) ++{ ++ kref_put(&dev->kref, usbdev_release); ++} ++ ++static int usbdev_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *udev = interface_to_usbdev(intf); ++ const char *busid = dev_name(intf->dev.parent); ++ struct xen_usbport *port = NULL; ++ struct xen_usbdev *dev = NULL; ++ struct xen_usbif *usbif = NULL; ++ int retval = -ENODEV; ++ ++ /* hub currently not supported, so skip. */ ++ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) ++ goto out; ++ ++ port = xen_usbport_find_by_busid(busid); ++ if (!port) ++ goto out; ++ ++ usbif = xen_usbif_find(port->domid, port->handle); ++ if (!usbif) ++ goto out; ++ ++ switch (udev->speed) { ++ case USB_SPEED_LOW: ++ case USB_SPEED_FULL: ++ break; ++ case USB_SPEED_HIGH: ++ if (usbif->usb_ver >= USB_VER_USB20) ++ break; ++ /* fall through */ ++ default: ++ goto out; ++ } ++ ++ dev = xen_usbif_find_attached_device(usbif, port->portnum); ++ if (!dev) { ++ /* new connection */ ++ dev = xen_usbdev_alloc(udev, port); ++ if (!dev) ++ return -ENOMEM; ++ xen_usbif_attach_device(usbif, dev); ++ xen_usbif_hotplug_notify(usbif, port->portnum, udev->speed); ++ } else { ++ /* maybe already called and connected by other intf */ ++ if (strncmp(dev->port->phys_bus, busid, XEN_USB_BUS_ID_SIZE)) ++ goto out; /* invalid call */ ++ } ++ ++ usbdev_get(dev); ++ usb_set_intfdata(intf, dev); ++ retval = 0; ++ ++out: ++ return retval; ++} ++ ++static void usbdev_disconnect(struct usb_interface *intf) ++{ ++ struct xen_usbdev *dev ++ = (struct xen_usbdev *) usb_get_intfdata(intf); ++ ++ usb_set_intfdata(intf, NULL); ++ ++ if (!dev) ++ return; ++ ++ if (dev->usbif) { ++ xen_usbif_hotplug_notify(dev->usbif, dev->port->portnum, 0); ++ xen_usbif_detach_device(dev->usbif, dev); ++ } ++ xen_usbif_unlink_urbs(dev); ++ usbdev_put(dev); ++} ++ ++static ssize_t usbdev_show_ports(struct device_driver *driver, char *buf) ++{ ++ struct xen_usbport *port; ++ size_t count = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&port_list_lock, flags); ++ list_for_each_entry(port, &port_list, port_list) { ++ if (count >= PAGE_SIZE) ++ break; ++ count += scnprintf((char *)buf + count, PAGE_SIZE - count, ++ "%s:%d:%d:%d\n", ++ &port->phys_bus[0], ++ port->domid, ++ port->handle, ++ port->portnum); ++ } ++ spin_unlock_irqrestore(&port_list_lock, flags); ++ ++ return count; ++} ++ ++DRIVER_ATTR(port_ids, S_IRUSR, usbdev_show_ports, NULL); ++ ++/* table of devices that matches any usbdevice */ ++static const struct usb_device_id usbdev_table[] = { ++ { .driver_info = 1 }, /* wildcard, see usb_match_id() */ ++ { } /* Terminating entry */ ++}; ++MODULE_DEVICE_TABLE(usb, usbdev_table); ++ ++static struct usb_driver xen_usbdev_driver = { ++ .name = "usbback", ++ .probe = usbdev_probe, ++ .disconnect = usbdev_disconnect, ++ .id_table = usbdev_table, ++ .no_dynamic_id = 1, ++}; ++ ++int __init xen_usbdev_init(void) ++{ ++ int err; ++ ++ err = usb_register(&xen_usbdev_driver); ++ if (err < 0) { ++ pr_alert(DRV_PFX "usb_register failed (error %d)\n", ++ err); ++ goto out; ++ } ++ ++ err = driver_create_file(&xen_usbdev_driver.drvwrap.driver, ++ &driver_attr_port_ids); ++ if (err) ++ usb_deregister(&xen_usbdev_driver); ++ ++out: ++ return err; ++} ++ ++void xen_usbdev_exit(void) ++{ ++ driver_remove_file(&xen_usbdev_driver.drvwrap.driver, ++ &driver_attr_port_ids); ++ usb_deregister(&xen_usbdev_driver); ++} +diff --git a/drivers/usb/host/xen-usbback/xenbus.c b/drivers/usb/host/xen-usbback/xenbus.c +new file mode 100644 +index 0000000..5eae4ec +--- /dev/null ++++ b/drivers/usb/host/xen-usbback/xenbus.c +@@ -0,0 +1,482 @@ ++/* ++ * Xenbus interface for USB backend driver. ++ * ++ * Copyright (C) 2009, FUJITSU LABORATORIES LTD. ++ * Author: Noboru Iwamatsu ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ * ++ * or, by your choice, ++ * ++ * When distributed separately from the Linux kernel or incorporated into ++ * other software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to ++ * deal in the Software without restriction, including without limitation the ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include "common.h" ++ ++static LIST_HEAD(usbif_list); ++static DEFINE_SPINLOCK(usbif_list_lock); ++ ++struct xen_usbif *xen_usbif_find(domid_t domid, unsigned int handle) ++{ ++ struct xen_usbif *usbif; ++ int found = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&usbif_list_lock, flags); ++ list_for_each_entry(usbif, &usbif_list, usbif_list) { ++ if (usbif->domid == domid && usbif->handle == handle) { ++ found = 1; ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&usbif_list_lock, flags); ++ ++ if (found) ++ return usbif; ++ ++ return NULL; ++} ++ ++struct xen_usbif *xen_usbif_alloc(domid_t domid, unsigned int handle) ++{ ++ struct xen_usbif *usbif; ++ unsigned long flags; ++ int i; ++ ++ usbif = kzalloc(sizeof(struct xen_usbif), GFP_KERNEL); ++ if (!usbif) ++ return NULL; ++ ++ usbif->domid = domid; ++ usbif->handle = handle; ++ INIT_LIST_HEAD(&usbif->usbif_list); ++ spin_lock_init(&usbif->urb_ring_lock); ++ spin_lock_init(&usbif->conn_ring_lock); ++ atomic_set(&usbif->refcnt, 0); ++ init_waitqueue_head(&usbif->wq); ++ init_waitqueue_head(&usbif->waiting_to_free); ++ spin_lock_init(&usbif->dev_lock); ++ INIT_LIST_HEAD(&usbif->dev_list); ++ spin_lock_init(&usbif->addr_lock); ++ for (i = 0; i < XEN_USB_DEV_ADDR_SIZE; i++) ++ usbif->addr_table[i] = NULL; ++ ++ spin_lock_irqsave(&usbif_list_lock, flags); ++ list_add(&usbif->usbif_list, &usbif_list); ++ spin_unlock_irqrestore(&usbif_list_lock, flags); ++ ++ return usbif; ++} ++ ++static int xen_usbif_map(struct xen_usbif *usbif, unsigned long urb_ring_ref, ++ unsigned long conn_ring_ref, unsigned int evtchn) ++{ ++ int err = -ENOMEM; ++ ++ if (usbif->irq) ++ return 0; ++ ++ err = xenbus_map_ring_valloc(usbif->xbdev, urb_ring_ref, ++ &usbif->urb_sring); ++ if (err < 0) ++ return err; ++ ++ err = xenbus_map_ring_valloc(usbif->xbdev, conn_ring_ref, ++ &usbif->conn_sring); ++ if (err < 0) ++ goto fail_alloc; ++ ++ err = bind_interdomain_evtchn_to_irqhandler(usbif->domid, evtchn, ++ xen_usbif_be_int, 0, "usbif-backend", usbif); ++ if (err < 0) ++ goto fail_evtchn; ++ usbif->irq = err; ++ ++ BACK_RING_INIT(&usbif->urb_ring, ++ (struct usbif_urb_sring *)usbif->urb_sring, PAGE_SIZE); ++ BACK_RING_INIT(&usbif->conn_ring, ++ (struct usbif_conn_sring *)usbif->conn_sring, PAGE_SIZE); ++ ++ return 0; ++ ++fail_evtchn: ++ xenbus_unmap_ring_vfree(usbif->xbdev, usbif->conn_sring); ++fail_alloc: ++ xenbus_unmap_ring_vfree(usbif->xbdev, usbif->urb_sring); ++ ++ return err; ++} ++ ++static void xen_usbif_disconnect(struct xen_usbif *usbif) ++{ ++ struct xen_usbdev *dev, *tmp; ++ unsigned long flags; ++ ++ if (usbif->xenusbd) { ++ kthread_stop(usbif->xenusbd); ++ usbif->xenusbd = NULL; ++ } ++ ++ spin_lock_irqsave(&usbif->dev_lock, flags); ++ list_for_each_entry_safe(dev, tmp, &usbif->dev_list, dev_list) { ++ xen_usbif_unlink_urbs(dev); ++ xen_usbif_detach_device_without_lock(usbif, dev); ++ } ++ spin_unlock_irqrestore(&usbif->dev_lock, flags); ++ ++ wait_event(usbif->waiting_to_free, atomic_read(&usbif->refcnt) == 0); ++ ++ if (usbif->irq) { ++ unbind_from_irqhandler(usbif->irq, usbif); ++ usbif->irq = 0; ++ } ++ ++ if (usbif->urb_ring.sring) { ++ xenbus_unmap_ring_vfree(usbif->xbdev, usbif->urb_sring); ++ usbif->urb_ring.sring = NULL; ++ xenbus_unmap_ring_vfree(usbif->xbdev, usbif->conn_sring); ++ usbif->conn_ring.sring = NULL; ++ } ++} ++ ++static void xen_usbif_free(struct xen_usbif *usbif) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&usbif_list_lock, flags); ++ list_del(&usbif->usbif_list); ++ spin_unlock_irqrestore(&usbif_list_lock, flags); ++ kfree(usbif); ++} ++ ++static void usbbk_changed(struct xenbus_watch *watch, const char **vec, ++ unsigned int len) ++{ ++ struct xenbus_transaction xbt; ++ int err; ++ int i; ++ char node[8]; ++ char *busid; ++ struct xen_usbport *port = NULL; ++ ++ struct xen_usbif *usbif = container_of(watch, struct xen_usbif, ++ backend_watch); ++ struct xenbus_device *dev = usbif->xbdev; ++ ++again: ++ err = xenbus_transaction_start(&xbt); ++ if (err) { ++ xenbus_dev_fatal(dev, err, "starting transaction"); ++ return; ++ } ++ ++ for (i = 1; i <= usbif->num_ports; i++) { ++ sprintf(node, "port/%d", i); ++ busid = xenbus_read(xbt, dev->nodename, node, NULL); ++ if (IS_ERR(busid)) { ++ err = PTR_ERR(busid); ++ xenbus_dev_fatal(dev, err, "reading port/%d", i); ++ goto abort; ++ } ++ ++ /* ++ * remove port, if the port is not connected, ++ */ ++ if (strlen(busid) == 0) { ++ port = xen_usbport_find(usbif->domid, usbif->handle, i); ++ if (port) { ++ if (port->is_connected) ++ xenbus_dev_fatal(dev, err, ++ "can't remove port/%d, " ++ "unbind first", i); ++ else ++ xen_usbport_remove(usbif->domid, ++ usbif->handle, i); ++ } ++ continue; /* never configured, ignore */ ++ } ++ ++ /* ++ * add port, ++ * if the port is not configured and not used from other usbif. ++ */ ++ port = xen_usbport_find(usbif->domid, usbif->handle, i); ++ if (port) { ++ if ((strncmp(port->phys_bus, busid, ++ XEN_USB_BUS_ID_SIZE))) ++ xenbus_dev_fatal(dev, err, "can't add port/%d, " ++ "remove first", i); ++ else ++ continue; /* already configured, ignore */ ++ } else { ++ if (xen_usbport_find_by_busid(busid)) ++ xenbus_dev_fatal(dev, err, "can't add port/%d, " ++ "busid already used", i); ++ else ++ xen_usbport_add(busid, usbif->domid, ++ usbif->handle, i); ++ } ++ } ++ ++ err = xenbus_transaction_end(xbt, 0); ++ if (err == -EAGAIN) ++ goto again; ++ if (err) ++ xenbus_dev_fatal(dev, err, "completing transaction"); ++ ++ return; ++ ++abort: ++ xenbus_transaction_end(xbt, 1); ++ ++ return; ++} ++ ++static int usbbk_remove(struct xenbus_device *dev) ++{ ++ struct xen_usbif *usbif = dev_get_drvdata(&dev->dev); ++ int i; ++ ++ if (usbif->backend_watch.node) { ++ unregister_xenbus_watch(&usbif->backend_watch); ++ kfree(usbif->backend_watch.node); ++ usbif->backend_watch.node = NULL; ++ } ++ ++ if (usbif) { ++ /* remove all ports */ ++ for (i = 1; i <= usbif->num_ports; i++) ++ xen_usbport_remove(usbif->domid, usbif->handle, i); ++ xen_usbif_disconnect(usbif); ++ xen_usbif_free(usbif); ++ } ++ dev_set_drvdata(&dev->dev, NULL); ++ ++ return 0; ++} ++ ++static int usbbk_probe(struct xenbus_device *dev, ++ const struct xenbus_device_id *id) ++{ ++ struct xen_usbif *usbif; ++ unsigned long handle; ++ int num_ports; ++ int usb_ver; ++ int err; ++ ++ if (usb_disabled()) ++ return -ENODEV; ++ ++ if (kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle)) ++ return -ENOENT; ++ ++ usbif = xen_usbif_alloc(dev->otherend_id, handle); ++ if (!usbif) { ++ xenbus_dev_fatal(dev, -ENOMEM, "allocating backend interface"); ++ return -ENOMEM; ++ } ++ usbif->xbdev = dev; ++ dev_set_drvdata(&dev->dev, usbif); ++ ++ err = xenbus_scanf(XBT_NIL, dev->nodename, "num-ports", ++ "%d", &num_ports); ++ if (err != 1) { ++ xenbus_dev_fatal(dev, err, "reading num-ports"); ++ goto fail; ++ } ++ if (num_ports < 1 || num_ports > USB_MAXCHILDREN) { ++ xenbus_dev_fatal(dev, err, "invalid num-ports"); ++ goto fail; ++ } ++ usbif->num_ports = num_ports; ++ ++ err = xenbus_scanf(XBT_NIL, dev->nodename, "usb-ver", "%d", &usb_ver); ++ if (err != 1) { ++ xenbus_dev_fatal(dev, err, "reading usb-ver"); ++ goto fail; ++ } ++ switch (usb_ver) { ++ case USB_VER_USB11: ++ case USB_VER_USB20: ++ usbif->usb_ver = usb_ver; ++ break; ++ default: ++ xenbus_dev_fatal(dev, err, "invalid usb-ver"); ++ goto fail; ++ } ++ ++ err = xenbus_switch_state(dev, XenbusStateInitWait); ++ if (err) ++ goto fail; ++ ++ return 0; ++ ++fail: ++ usbbk_remove(dev); ++ return err; ++} ++ ++static int connect_rings(struct xen_usbif *usbif) ++{ ++ struct xenbus_device *dev = usbif->xbdev; ++ unsigned long urb_ring_ref; ++ unsigned long conn_ring_ref; ++ unsigned int evtchn; ++ int err; ++ ++ err = xenbus_gather(XBT_NIL, dev->otherend, ++ "urb-ring-ref", "%lu", &urb_ring_ref, ++ "conn-ring-ref", "%lu", &conn_ring_ref, ++ "event-channel", "%u", &evtchn, NULL); ++ if (err) { ++ xenbus_dev_fatal(dev, err, ++ "reading %s/ring-ref and event-channel", ++ dev->otherend); ++ return err; ++ } ++ ++ pr_info(DRV_PFX "urb-ring-ref %ld, conn-ring-ref %ld, " ++ "event-channel %d\n", urb_ring_ref, conn_ring_ref, evtchn); ++ ++ err = xen_usbif_map(usbif, urb_ring_ref, conn_ring_ref, evtchn); ++ if (err) { ++ xenbus_dev_fatal(dev, err, "mapping urb-ring-ref %lu " ++ "conn-ring-ref %lu port %u", ++ urb_ring_ref, conn_ring_ref, evtchn); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int start_xenusbd(struct xen_usbif *usbif) ++{ ++ int err = 0; ++ char name[TASK_COMM_LEN]; ++ ++ snprintf(name, TASK_COMM_LEN, "usbback.%d.%d", usbif->domid, ++ usbif->handle); ++ usbif->xenusbd = kthread_run(xen_usbif_schedule, usbif, name); ++ if (IS_ERR(usbif->xenusbd)) { ++ err = PTR_ERR(usbif->xenusbd); ++ usbif->xenusbd = NULL; ++ xenbus_dev_error(usbif->xbdev, err, "start xenusbd"); ++ } ++ ++ return err; ++} ++ ++static void frontend_changed(struct xenbus_device *dev, ++ enum xenbus_state frontend_state) ++{ ++ struct xen_usbif *usbif = dev_get_drvdata(&dev->dev); ++ int err; ++ ++ switch (frontend_state) { ++ case XenbusStateReconfiguring: ++ case XenbusStateReconfigured: ++ break; ++ ++ case XenbusStateInitialising: ++ if (dev->state == XenbusStateClosed) { ++ pr_info(DRV_PFX "%s: %s: prepare for reconnect\n", ++ __func__, dev->nodename); ++ xenbus_switch_state(dev, XenbusStateInitWait); ++ } ++ break; ++ ++ case XenbusStateInitialised: ++ case XenbusStateConnected: ++ if (dev->state == XenbusStateConnected) ++ break; ++ ++ xen_usbif_disconnect(usbif); ++ ++ err = connect_rings(usbif); ++ if (err) ++ break; ++ err = start_xenusbd(usbif); ++ if (err) ++ break; ++ err = xenbus_watch_pathfmt(dev, &usbif->backend_watch, ++ usbbk_changed, "%s/%s", dev->nodename, "port"); ++ if (err) ++ break; ++ xenbus_switch_state(dev, XenbusStateConnected); ++ break; ++ ++ case XenbusStateClosing: ++ xenbus_switch_state(dev, XenbusStateClosing); ++ break; ++ ++ case XenbusStateClosed: ++ xen_usbif_disconnect(usbif); ++ xenbus_switch_state(dev, XenbusStateClosed); ++ if (xenbus_dev_is_online(dev)) ++ break; ++ /* fall through if not online */ ++ case XenbusStateUnknown: ++ device_unregister(&dev->dev); ++ break; ++ ++ default: ++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", ++ frontend_state); ++ break; ++ } ++} ++ ++ ++/* ** Driver Registration ** */ ++ ++static const struct xenbus_device_id usbback_ids[] = { ++ { "vusb" }, ++ { "" }, ++}; ++ ++static DEFINE_XENBUS_DRIVER(usbback, , ++ .probe = usbbk_probe, ++ .remove = usbbk_remove, ++ .otherend_changed = frontend_changed, ++); ++ ++int __init xen_usbif_xenbus_init(void) ++{ ++ return xenbus_register_backend(&usbback_driver); ++} ++ ++void __exit xen_usbif_xenbus_exit(void) ++{ ++ xenbus_unregister_driver(&usbback_driver); ++} +diff --git a/drivers/usb/host/xen-usbfront.c b/drivers/usb/host/xen-usbfront.c +new file mode 100644 +index 0000000..e632de3 +--- /dev/null ++++ b/drivers/usb/host/xen-usbfront.c +@@ -0,0 +1,1739 @@ ++/* ++ * xen-usbfront.c ++ * ++ * This file is part of Xen USB Virtual Host Controller driver. ++ * ++ * Copyright (C) 2009, FUJITSU LABORATORIES LTD. ++ * Author: Noboru Iwamatsu ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ * ++ * or, by your choice, ++ * ++ * When distributed separately from the Linux kernel or incorporated into ++ * other software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to ++ * deal in the Software without restriction, including without limitation the ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static inline struct usbfront_info *hcd_to_info(struct usb_hcd *hcd) ++{ ++ return (struct usbfront_info *) (hcd->hcd_priv); ++} ++ ++static inline struct usb_hcd *info_to_hcd(struct usbfront_info *info) ++{ ++ return container_of((void *) info, struct usb_hcd, hcd_priv); ++} ++ ++/* Private per-URB data */ ++struct urb_priv { ++ struct list_head list; ++ struct urb *urb; ++ int req_id; /* RING_REQUEST id for submitting */ ++ int unlink_req_id; /* RING_REQUEST id for unlinking */ ++ int status; ++ unsigned unlinked:1; /* dequeued marker */ ++}; ++ ++/* virtual roothub port status */ ++struct rhport_status { ++ u32 status; ++ unsigned resuming:1; /* in resuming */ ++ unsigned c_connection:1; /* connection changed */ ++ unsigned long timeout; ++}; ++ ++/* status of attached device */ ++struct vdevice_status { ++ int devnum; ++ enum usb_device_state status; ++ enum usb_device_speed speed; ++}; ++ ++/* RING request shadow */ ++struct usb_shadow { ++ struct usbif_urb_request req; ++ struct urb *urb; ++}; ++ ++/* statistics for tuning, monitoring, ... */ ++struct xenhcd_stats { ++ unsigned long ring_full; /* RING_FULL conditions */ ++ unsigned long complete; /* normal giveback urbs */ ++ unsigned long unlink; /* unlinked urbs */ ++}; ++ ++struct usbfront_info { ++ /* Virtual Host Controller has 4 urb queues */ ++ struct list_head pending_submit_list; ++ struct list_head pending_unlink_list; ++ struct list_head in_progress_list; ++ struct list_head giveback_waiting_list; ++ ++ spinlock_t lock; ++ ++ /* timer that kick pending and giveback waiting urbs */ ++ struct timer_list watchdog; ++ unsigned long actions; ++ ++ /* virtual root hub */ ++ int rh_numports; ++ struct rhport_status ports[USB_MAXCHILDREN]; ++ struct vdevice_status devices[USB_MAXCHILDREN]; ++ ++ /* Xen related staff */ ++ struct xenbus_device *xbdev; ++ int urb_ring_ref; ++ int conn_ring_ref; ++ struct usbif_urb_front_ring urb_ring; ++ struct usbif_conn_front_ring conn_ring; ++ ++ unsigned int evtchn, irq; /* event channel */ ++ struct usb_shadow shadow[USB_URB_RING_SIZE]; ++ unsigned long shadow_free; ++ ++ /* RING_RESPONSE thread */ ++ struct task_struct *kthread; ++ wait_queue_head_t wq; ++ unsigned int waiting_resp; ++ ++ /* xmit statistics */ ++#ifdef XENHCD_STATS ++ struct xenhcd_stats stats; ++#define COUNT(x) do { (x)++; } while (0) ++#else ++#define COUNT(x) do {} while (0) ++#endif ++}; ++ ++#define XENHCD_RING_JIFFIES (HZ/200) ++#define XENHCD_SCAN_JIFFIES 1 ++ ++enum xenhcd_timer_action { ++ TIMER_RING_WATCHDOG, ++ TIMER_SCAN_PENDING_URBS, ++}; ++ ++static inline void ++timer_action_done(struct usbfront_info *info, enum xenhcd_timer_action action) ++{ ++ clear_bit(action, &info->actions); ++} ++ ++static inline void ++timer_action(struct usbfront_info *info, enum xenhcd_timer_action action) ++{ ++ if (timer_pending(&info->watchdog) && ++ test_bit(TIMER_SCAN_PENDING_URBS, &info->actions)) ++ return; ++ ++ if (!test_and_set_bit(action, &info->actions)) { ++ unsigned long t; ++ ++ switch (action) { ++ case TIMER_RING_WATCHDOG: ++ t = XENHCD_RING_JIFFIES; ++ break; ++ default: ++ t = XENHCD_SCAN_JIFFIES; ++ break; ++ } ++ mod_timer(&info->watchdog, t + jiffies); ++ } ++} ++ ++struct kmem_cache *xenhcd_urbp_cachep; ++struct hc_driver xen_usb20_hc_driver; ++struct hc_driver xen_usb11_hc_driver; ++ ++static ssize_t show_statistics(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct usb_hcd *hcd; ++ struct usbfront_info *info; ++ unsigned long flags; ++ unsigned temp, size; ++ char *next; ++ ++ hcd = dev_get_drvdata(dev); ++ info = hcd_to_info(hcd); ++ next = buf; ++ size = PAGE_SIZE; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ temp = scnprintf(next, size, ++ "bus %s, device %s\n" ++ "%s\n" ++ "xenhcd, hcd state %d\n", ++ hcd->self.controller->bus->name, ++ dev_name(hcd->self.controller), ++ hcd->product_desc, ++ hcd->state); ++ size -= temp; ++ next += temp; ++ ++#ifdef XENHCD_STATS ++ temp = scnprintf(next, size, ++ "complete %ld unlink %ld ring_full %ld\n", ++ info->stats.complete, info->stats.unlink, ++ info->stats.ring_full); ++ size -= temp; ++ next += temp; ++#endif ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++ ++ return PAGE_SIZE - size; ++} ++ ++static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); ++ ++static inline void create_debug_file(struct usbfront_info *info) ++{ ++ struct device *dev = info_to_hcd(info)->self.controller; ++ if (device_create_file(dev, &dev_attr_statistics)) ++ printk(KERN_WARNING "statistics file not created for %s\n", ++ info_to_hcd(info)->self.bus_name); ++} ++ ++static inline void remove_debug_file(struct usbfront_info *info) ++{ ++ struct device *dev = info_to_hcd(info)->self.controller; ++ device_remove_file(dev, &dev_attr_statistics); ++} ++ ++/* ++ * set virtual port connection status ++ */ ++void set_connect_state(struct usbfront_info *info, int portnum) ++{ ++ int port; ++ ++ port = portnum - 1; ++ if (info->ports[port].status & USB_PORT_STAT_POWER) { ++ switch (info->devices[port].speed) { ++ case USB_SPEED_UNKNOWN: ++ info->ports[port].status &= ++ ~(USB_PORT_STAT_CONNECTION | ++ USB_PORT_STAT_ENABLE | ++ USB_PORT_STAT_LOW_SPEED | ++ USB_PORT_STAT_HIGH_SPEED | ++ USB_PORT_STAT_SUSPEND); ++ break; ++ case USB_SPEED_LOW: ++ info->ports[port].status |= USB_PORT_STAT_CONNECTION; ++ info->ports[port].status |= USB_PORT_STAT_LOW_SPEED; ++ break; ++ case USB_SPEED_FULL: ++ info->ports[port].status |= USB_PORT_STAT_CONNECTION; ++ break; ++ case USB_SPEED_HIGH: ++ info->ports[port].status |= USB_PORT_STAT_CONNECTION; ++ info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED; ++ break; ++ default: /* error */ ++ return; ++ } ++ info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16); ++ } ++} ++ ++/* ++ * set virtual device connection status ++ */ ++void rhport_connect(struct usbfront_info *info, int portnum, ++ enum usb_device_speed speed) ++{ ++ int port; ++ ++ if (portnum < 1 || portnum > info->rh_numports) ++ return; /* invalid port number */ ++ ++ port = portnum - 1; ++ if (info->devices[port].speed != speed) { ++ switch (speed) { ++ case USB_SPEED_UNKNOWN: /* disconnect */ ++ info->devices[port].status = USB_STATE_NOTATTACHED; ++ break; ++ case USB_SPEED_LOW: ++ case USB_SPEED_FULL: ++ case USB_SPEED_HIGH: ++ info->devices[port].status = USB_STATE_ATTACHED; ++ break; ++ default: /* error */ ++ return; ++ } ++ info->devices[port].speed = speed; ++ info->ports[port].c_connection = 1; ++ ++ set_connect_state(info, portnum); ++ } ++} ++ ++/* ++ * SetPortFeature(PORT_SUSPENDED) ++ */ ++void rhport_suspend(struct usbfront_info *info, int portnum) ++{ ++ int port; ++ ++ port = portnum - 1; ++ info->ports[port].status |= USB_PORT_STAT_SUSPEND; ++ info->devices[port].status = USB_STATE_SUSPENDED; ++} ++ ++/* ++ * ClearPortFeature(PORT_SUSPENDED) ++ */ ++void rhport_resume(struct usbfront_info *info, int portnum) ++{ ++ int port; ++ ++ port = portnum - 1; ++ if (info->ports[port].status & USB_PORT_STAT_SUSPEND) { ++ info->ports[port].resuming = 1; ++ info->ports[port].timeout = jiffies + msecs_to_jiffies(20); ++ } ++} ++ ++/* ++ * SetPortFeature(PORT_POWER) ++ */ ++void rhport_power_on(struct usbfront_info *info, int portnum) ++{ ++ int port; ++ ++ port = portnum - 1; ++ if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) { ++ info->ports[port].status |= USB_PORT_STAT_POWER; ++ if (info->devices[port].status != USB_STATE_NOTATTACHED) ++ info->devices[port].status = USB_STATE_POWERED; ++ if (info->ports[port].c_connection) ++ set_connect_state(info, portnum); ++ } ++} ++ ++/* ++ * ClearPortFeature(PORT_POWER) ++ * SetConfiguration(non-zero) ++ * Power_Source_Off ++ * Over-current ++ */ ++void rhport_power_off(struct usbfront_info *info, int portnum) ++{ ++ int port; ++ ++ port = portnum - 1; ++ if (info->ports[port].status & USB_PORT_STAT_POWER) { ++ info->ports[port].status = 0; ++ if (info->devices[port].status != USB_STATE_NOTATTACHED) ++ info->devices[port].status = USB_STATE_ATTACHED; ++ } ++} ++ ++/* ++ * ClearPortFeature(PORT_ENABLE) ++ */ ++void rhport_disable(struct usbfront_info *info, int portnum) ++{ ++ int port; ++ ++ port = portnum - 1; ++ info->ports[port].status &= ~USB_PORT_STAT_ENABLE; ++ info->ports[port].status &= ~USB_PORT_STAT_SUSPEND; ++ info->ports[port].resuming = 0; ++ if (info->devices[port].status != USB_STATE_NOTATTACHED) ++ info->devices[port].status = USB_STATE_POWERED; ++} ++ ++/* ++ * SetPortFeature(PORT_RESET) ++ */ ++void rhport_reset(struct usbfront_info *info, int portnum) ++{ ++ int port; ++ ++ port = portnum - 1; ++ info->ports[port].status &= ~(USB_PORT_STAT_ENABLE ++ | USB_PORT_STAT_LOW_SPEED ++ | USB_PORT_STAT_HIGH_SPEED); ++ info->ports[port].status |= USB_PORT_STAT_RESET; ++ ++ if (info->devices[port].status != USB_STATE_NOTATTACHED) ++ info->devices[port].status = USB_STATE_ATTACHED; ++ ++ /* 10msec reset signaling */ ++ info->ports[port].timeout = jiffies + msecs_to_jiffies(10); ++} ++ ++#ifdef XENHCD_PM ++#ifdef CONFIG_PM ++static int xenhcd_bus_suspend(struct usb_hcd *hcd) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ int ret = 0; ++ int i, ports; ++ ++ ports = info->rh_numports; ++ ++ spin_lock_irq(&info->lock); ++ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) ++ ret = -ESHUTDOWN; ++ else { ++ /* suspend any active ports*/ ++ for (i = 1; i <= ports; i++) ++ rhport_suspend(info, i); ++ } ++ spin_unlock_irq(&info->lock); ++ ++ del_timer_sync(&info->watchdog); ++ ++ return ret; ++} ++ ++static int xenhcd_bus_resume(struct usb_hcd *hcd) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ int ret = 0; ++ int i, ports; ++ ++ ports = info->rh_numports; ++ ++ spin_lock_irq(&info->lock); ++ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) ++ ret = -ESHUTDOWN; ++ else { ++ /* resume any suspended ports*/ ++ for (i = 1; i <= ports; i++) ++ rhport_resume(info, i); ++ } ++ spin_unlock_irq(&info->lock); ++ ++ return ret; ++} ++#endif ++#endif ++ ++static void xenhcd_hub_descriptor(struct usbfront_info *info, ++ struct usb_hub_descriptor *desc) ++{ ++ u16 temp; ++ int ports = info->rh_numports; ++ ++ desc->bDescriptorType = 0x29; ++ desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */ ++ desc->bHubContrCurrent = 0; ++ desc->bNbrPorts = ports; ++ ++ /* size of DeviceRemovable and PortPwrCtrlMask fields*/ ++ temp = 1 + (ports / 8); ++ desc->bDescLength = 7 + 2 * temp; ++ ++ /* bitmaps for DeviceRemovable and PortPwrCtrlMask */ ++ memset(&desc->u.hs.DeviceRemovable[0], 0, temp); ++ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); ++ ++ /* per-port over current reporting and no power switching */ ++ temp = 0x000a; ++ desc->wHubCharacteristics = cpu_to_le16(temp); ++} ++ ++/* port status change mask for hub_status_data */ ++#define PORT_C_MASK \ ++ ((USB_PORT_STAT_C_CONNECTION \ ++ | USB_PORT_STAT_C_ENABLE \ ++ | USB_PORT_STAT_C_SUSPEND \ ++ | USB_PORT_STAT_C_OVERCURRENT \ ++ | USB_PORT_STAT_C_RESET) << 16) ++ ++/* ++ * See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap. ++ * If port status changed, writes the bitmap to buf and return ++ * that length(number of bytes). ++ * If Nothing changed, return 0. ++ */ ++static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ ++ int ports; ++ int i; ++ int length; ++ ++ unsigned long flags; ++ int ret = 0; ++ ++ int changed = 0; ++ ++ if (!HC_IS_RUNNING(hcd->state)) ++ return 0; ++ ++ /* initialize the status to no-changes */ ++ ports = info->rh_numports; ++ length = 1 + (ports / 8); ++ for (i = 0; i < length; i++) { ++ buf[i] = 0; ++ ret++; ++ } ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ for (i = 0; i < ports; i++) { ++ /* check status for each port */ ++ if (info->ports[i].status & PORT_C_MASK) { ++ if (i < 7) ++ buf[0] |= 1 << (i + 1); ++ else if (i < 15) ++ buf[1] |= 1 << (i - 7); ++ else if (i < 23) ++ buf[2] |= 1 << (i - 15); ++ else ++ buf[3] |= 1 << (i - 23); ++ changed = 1; ++ } ++ } ++ ++ if (!changed) ++ ret = 0; ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++ ++ return ret; ++} ++ ++static int xenhcd_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ++ u16 wIndex, char *buf, u16 wLength) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ int ports = info->rh_numports; ++ unsigned long flags; ++ int ret = 0; ++ int i; ++ int changed = 0; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ switch (typeReq) { ++ case ClearHubFeature: ++ /* ignore this request */ ++ break; ++ case ClearPortFeature: ++ if (!wIndex || wIndex > ports) ++ goto error; ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_SUSPEND: ++ rhport_resume(info, wIndex); ++ break; ++ case USB_PORT_FEAT_POWER: ++ rhport_power_off(info, wIndex); ++ break; ++ case USB_PORT_FEAT_ENABLE: ++ rhport_disable(info, wIndex); ++ break; ++ case USB_PORT_FEAT_C_CONNECTION: ++ info->ports[wIndex-1].c_connection = 0; ++ /* falling through */ ++ default: ++ info->ports[wIndex-1].status &= ~(1 << wValue); ++ break; ++ } ++ break; ++ case GetHubDescriptor: ++ xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *) buf); ++ break; ++ case GetHubStatus: ++ /* always local power supply good and no over-current exists. */ ++ *(__le32 *)buf = cpu_to_le32(0); ++ break; ++ case GetPortStatus: ++ if (!wIndex || wIndex > ports) ++ goto error; ++ ++ wIndex--; ++ ++ /* resume completion */ ++ if (info->ports[wIndex].resuming && ++ time_after_eq(jiffies, info->ports[wIndex].timeout)) { ++ info->ports[wIndex].status |= ++ (USB_PORT_STAT_C_SUSPEND << 16); ++ info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND; ++ } ++ ++ /* reset completion */ ++ if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 && ++ time_after_eq(jiffies, info->ports[wIndex].timeout)) { ++ info->ports[wIndex].status |= ++ (USB_PORT_STAT_C_RESET << 16); ++ info->ports[wIndex].status &= ~USB_PORT_STAT_RESET; ++ ++ if (info->devices[wIndex].status != ++ USB_STATE_NOTATTACHED) { ++ info->ports[wIndex].status |= ++ USB_PORT_STAT_ENABLE; ++ info->devices[wIndex].status = ++ USB_STATE_DEFAULT; ++ } ++ ++ switch (info->devices[wIndex].speed) { ++ case USB_SPEED_LOW: ++ info->ports[wIndex].status |= ++ USB_PORT_STAT_LOW_SPEED; ++ break; ++ case USB_SPEED_HIGH: ++ info->ports[wIndex].status |= ++ USB_PORT_STAT_HIGH_SPEED; ++ break; ++ default: ++ break; ++ } ++ } ++ ++ ((u16 *) buf)[0] = cpu_to_le16(info->ports[wIndex].status); ++ ((u16 *) buf)[1] = cpu_to_le16(info->ports[wIndex].status ++ >> 16); ++ break; ++ case SetHubFeature: ++ /* not supported */ ++ goto error; ++ case SetPortFeature: ++ if (!wIndex || wIndex > ports) ++ goto error; ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_POWER: ++ rhport_power_on(info, wIndex); ++ break; ++ case USB_PORT_FEAT_RESET: ++ rhport_reset(info, wIndex); ++ break; ++ case USB_PORT_FEAT_SUSPEND: ++ rhport_suspend(info, wIndex); ++ break; ++ default: ++ if ((info->ports[wIndex-1].status & ++ USB_PORT_STAT_POWER) != 0) ++ info->ports[wIndex-1].status |= (1 << wValue); ++ } ++ break; ++ ++ default: ++error: ++ ret = -EPIPE; ++ } ++ spin_unlock_irqrestore(&info->lock, flags); ++ ++ /* check status for each port */ ++ for (i = 0; i < ports; i++) { ++ if (info->ports[i].status & PORT_C_MASK) ++ changed = 1; ++ } ++ if (changed) ++ usb_hcd_poll_rh_status(hcd); ++ ++ return ret; ++} ++ ++struct kmem_cache *xenhcd_urbp_cachep; ++ ++static struct urb_priv *alloc_urb_priv(struct urb *urb) ++{ ++ struct urb_priv *urbp; ++ ++ urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, GFP_ATOMIC); ++ if (!urbp) ++ return NULL; ++ ++ urbp->urb = urb; ++ urb->hcpriv = urbp; ++ urbp->req_id = ~0; ++ urbp->unlink_req_id = ~0; ++ INIT_LIST_HEAD(&urbp->list); ++ ++ return urbp; ++} ++ ++static void free_urb_priv(struct urb_priv *urbp) ++{ ++ urbp->urb->hcpriv = NULL; ++ kmem_cache_free(xenhcd_urbp_cachep, urbp); ++} ++ ++static inline int get_id_from_freelist(struct usbfront_info *info) ++{ ++ unsigned long free; ++ free = info->shadow_free; ++ BUG_ON(free >= USB_URB_RING_SIZE); ++ info->shadow_free = info->shadow[free].req.id; ++ info->shadow[free].req.id = (unsigned int)0x0fff; /* debug */ ++ return free; ++} ++ ++static inline void add_id_to_freelist(struct usbfront_info *info, ++ unsigned long id) ++{ ++ info->shadow[id].req.id = info->shadow_free; ++ info->shadow[id].urb = NULL; ++ info->shadow_free = id; ++} ++ ++static inline int count_pages(void *addr, int length) ++{ ++ unsigned long start = (unsigned long) addr >> PAGE_SHIFT; ++ unsigned long end = (unsigned long) ++ (addr + length + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ return end - start; ++} ++ ++static inline void xenhcd_gnttab_map(struct usbfront_info *info, void *addr, ++ int length, grant_ref_t *gref_head, ++ struct usbif_request_segment *seg, ++ int nr_pages, int flags) ++{ ++ grant_ref_t ref; ++ unsigned long mfn; ++ unsigned int offset; ++ unsigned int len; ++ unsigned int bytes; ++ int i; ++ ++ len = length; ++ ++ for (i = 0; i < nr_pages; i++) { ++ BUG_ON(!len); ++ ++ mfn = virt_to_mfn(addr); ++ offset = offset_in_page(addr); ++ ++ bytes = PAGE_SIZE - offset; ++ if (bytes > len) ++ bytes = len; ++ ++ ref = gnttab_claim_grant_reference(gref_head); ++ BUG_ON(ref == -ENOSPC); ++ gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, ++ mfn, flags); ++ seg[i].gref = ref; ++ seg[i].offset = (uint16_t)offset; ++ seg[i].length = (uint16_t)bytes; ++ ++ addr += bytes; ++ len -= bytes; ++ } ++} ++ ++static int map_urb_for_request(struct usbfront_info *info, struct urb *urb, ++ struct usbif_urb_request *req) ++{ ++ grant_ref_t gref_head; ++ int nr_buff_pages = 0; ++ int nr_isodesc_pages = 0; ++ int ret = 0; ++ ++ if (urb->transfer_buffer_length) { ++ nr_buff_pages = count_pages(urb->transfer_buffer, ++ urb->transfer_buffer_length); ++ ++ if (usb_pipeisoc(urb->pipe)) ++ nr_isodesc_pages = count_pages(&urb->iso_frame_desc[0], ++ sizeof(struct usb_iso_packet_descriptor) * ++ urb->number_of_packets); ++ ++ if (nr_buff_pages + nr_isodesc_pages > ++ USBIF_MAX_SEGMENTS_PER_REQUEST) ++ return -E2BIG; ++ ++ ret = gnttab_alloc_grant_references( ++ USBIF_MAX_SEGMENTS_PER_REQUEST, &gref_head); ++ if (ret) { ++ printk(KERN_ERR "usbfront: " ++ "gnttab_alloc_grant_references() error\n"); ++ return -ENOMEM; ++ } ++ ++ xenhcd_gnttab_map(info, urb->transfer_buffer, ++ urb->transfer_buffer_length, &gref_head, ++ &req->seg[0], nr_buff_pages, ++ usb_pipein(urb->pipe) ? 0 : GTF_readonly); ++ ++ if (!usb_pipeisoc(urb->pipe)) ++ gnttab_free_grant_references(gref_head); ++ } ++ ++ req->pipe = usbif_setportnum_pipe(urb->pipe, urb->dev->portnum); ++ req->transfer_flags = urb->transfer_flags; ++ req->buffer_length = urb->transfer_buffer_length; ++ req->nr_buffer_segs = nr_buff_pages; ++ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_ISOCHRONOUS: ++ req->u.isoc.interval = urb->interval; ++ req->u.isoc.start_frame = urb->start_frame; ++ req->u.isoc.number_of_packets = urb->number_of_packets; ++ req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages; ++ /* urb->number_of_packets must be > 0 */ ++ if (unlikely(urb->number_of_packets <= 0)) ++ BUG(); ++ xenhcd_gnttab_map(info, &urb->iso_frame_desc[0], ++ sizeof(struct usb_iso_packet_descriptor) * ++ urb->number_of_packets, &gref_head, ++ &req->seg[nr_buff_pages], nr_isodesc_pages, 0); ++ gnttab_free_grant_references(gref_head); ++ break; ++ case PIPE_INTERRUPT: ++ req->u.intr.interval = urb->interval; ++ break; ++ case PIPE_CONTROL: ++ if (urb->setup_packet) ++ memcpy(req->u.ctrl, urb->setup_packet, 8); ++ break; ++ case PIPE_BULK: ++ break; ++ default: ++ ret = -EINVAL; ++ } ++ ++ return ret; ++} ++ ++static void xenhcd_gnttab_done(struct usb_shadow *shadow) ++{ ++ int nr_segs = 0; ++ int i; ++ ++ nr_segs = shadow->req.nr_buffer_segs; ++ ++ if (usb_pipeisoc(shadow->req.pipe)) ++ nr_segs += shadow->req.u.isoc.nr_frame_desc_segs; ++ ++ for (i = 0; i < nr_segs; i++) ++ gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL); ++ ++ shadow->req.nr_buffer_segs = 0; ++ shadow->req.u.isoc.nr_frame_desc_segs = 0; ++} ++ ++static void xenhcd_giveback_urb(struct usbfront_info *info, struct urb *urb, ++ int status) ++__releases(info->lock) ++__acquires(info->lock) ++{ ++ struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; ++ ++ list_del_init(&urbp->list); ++ free_urb_priv(urbp); ++ switch (urb->status) { ++ case -ECONNRESET: ++ case -ENOENT: ++ COUNT(info->stats.unlink); ++ break; ++ case -EINPROGRESS: ++ urb->status = status; ++ /* falling through */ ++ default: ++ COUNT(info->stats.complete); ++ } ++ spin_unlock(&info->lock); ++ usb_hcd_giveback_urb(info_to_hcd(info), urb, ++ urbp->status <= 0 ? urbp->status : urb->status); ++ spin_lock(&info->lock); ++} ++ ++static inline int xenhcd_do_request(struct usbfront_info *info, ++ struct urb_priv *urbp) ++{ ++ struct usbif_urb_request *req; ++ struct urb *urb = urbp->urb; ++ uint16_t id; ++ int notify; ++ int ret = 0; ++ ++ req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt); ++ id = get_id_from_freelist(info); ++ req->id = id; ++ ++ if (unlikely(urbp->unlinked)) { ++ req->u.unlink.unlink_id = urbp->req_id; ++ req->pipe = usbif_setunlink_pipe(usbif_setportnum_pipe( ++ urb->pipe, urb->dev->portnum)); ++ urbp->unlink_req_id = id; ++ } else { ++ ret = map_urb_for_request(info, urb, req); ++ if (ret < 0) { ++ add_id_to_freelist(info, id); ++ return ret; ++ } ++ urbp->req_id = id; ++ } ++ ++ info->urb_ring.req_prod_pvt++; ++ info->shadow[id].urb = urb; ++ info->shadow[id].req = *req; ++ ++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify); ++ if (notify) ++ notify_remote_via_irq(info->irq); ++ ++ return ret; ++} ++ ++static void xenhcd_kick_pending_urbs(struct usbfront_info *info) ++{ ++ struct urb_priv *urbp; ++ int ret; ++ ++ while (!list_empty(&info->pending_submit_list)) { ++ if (RING_FULL(&info->urb_ring)) { ++ COUNT(info->stats.ring_full); ++ timer_action(info, TIMER_RING_WATCHDOG); ++ goto done; ++ } ++ ++ urbp = list_entry(info->pending_submit_list.next, ++ struct urb_priv, list); ++ ret = xenhcd_do_request(info, urbp); ++ if (ret == 0) ++ list_move_tail(&urbp->list, &info->in_progress_list); ++ else ++ xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN); ++ } ++ timer_action_done(info, TIMER_SCAN_PENDING_URBS); ++ ++done: ++ return; ++} ++ ++/* ++ * caller must lock info->lock ++ */ ++static void xenhcd_cancel_all_enqueued_urbs(struct usbfront_info *info) ++{ ++ struct urb_priv *urbp, *tmp; ++ ++ list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) { ++ if (!urbp->unlinked) { ++ xenhcd_gnttab_done(&info->shadow[urbp->req_id]); ++ barrier(); ++ if (urbp->urb->status == -EINPROGRESS)/* not dequeued */ ++ xenhcd_giveback_urb(info, urbp->urb, ++ -ESHUTDOWN); ++ else /* dequeued */ ++ xenhcd_giveback_urb(info, urbp->urb, ++ urbp->urb->status); ++ } ++ info->shadow[urbp->req_id].urb = NULL; ++ } ++ ++ list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list) { ++ xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN); ++ } ++ ++ return; ++} ++ ++/* ++ * caller must lock info->lock ++ */ ++static void xenhcd_giveback_unlinked_urbs(struct usbfront_info *info) ++{ ++ struct urb_priv *urbp, *tmp; ++ ++ list_for_each_entry_safe(urbp, tmp, ++ &info->giveback_waiting_list, list) { ++ xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status); ++ } ++} ++ ++static int xenhcd_submit_urb(struct usbfront_info *info, struct urb_priv *urbp) ++{ ++ int ret = 0; ++ ++ if (RING_FULL(&info->urb_ring)) { ++ list_add_tail(&urbp->list, &info->pending_submit_list); ++ COUNT(info->stats.ring_full); ++ timer_action(info, TIMER_RING_WATCHDOG); ++ goto done; ++ } ++ ++ if (!list_empty(&info->pending_submit_list)) { ++ list_add_tail(&urbp->list, &info->pending_submit_list); ++ timer_action(info, TIMER_SCAN_PENDING_URBS); ++ goto done; ++ } ++ ++ ret = xenhcd_do_request(info, urbp); ++ if (ret == 0) ++ list_add_tail(&urbp->list, &info->in_progress_list); ++ ++done: ++ return ret; ++} ++ ++static int xenhcd_unlink_urb(struct usbfront_info *info, struct urb_priv *urbp) ++{ ++ int ret = 0; ++ ++ /* already unlinked? */ ++ if (urbp->unlinked) ++ return -EBUSY; ++ ++ urbp->unlinked = 1; ++ ++ /* the urb is still in pending_submit queue */ ++ if (urbp->req_id == ~0) { ++ list_move_tail(&urbp->list, &info->giveback_waiting_list); ++ timer_action(info, TIMER_SCAN_PENDING_URBS); ++ goto done; ++ } ++ ++ /* send unlink request to backend */ ++ if (RING_FULL(&info->urb_ring)) { ++ list_move_tail(&urbp->list, &info->pending_unlink_list); ++ COUNT(info->stats.ring_full); ++ timer_action(info, TIMER_RING_WATCHDOG); ++ goto done; ++ } ++ ++ if (!list_empty(&info->pending_unlink_list)) { ++ list_move_tail(&urbp->list, &info->pending_unlink_list); ++ timer_action(info, TIMER_SCAN_PENDING_URBS); ++ goto done; ++ } ++ ++ ret = xenhcd_do_request(info, urbp); ++ if (ret == 0) ++ list_move_tail(&urbp->list, &info->in_progress_list); ++ ++done: ++ return ret; ++} ++ ++static int xenhcd_urb_request_done(struct usbfront_info *info) ++{ ++ struct usbif_urb_response *res; ++ struct urb *urb; ++ ++ RING_IDX i, rp; ++ uint16_t id; ++ int more_to_do = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ rp = info->urb_ring.sring->rsp_prod; ++ rmb(); /* ensure we see queued responses up to "rp" */ ++ ++ for (i = info->urb_ring.rsp_cons; i != rp; i++) { ++ res = RING_GET_RESPONSE(&info->urb_ring, i); ++ id = res->id; ++ ++ if (likely(usbif_pipesubmit(info->shadow[id].req.pipe))) { ++ xenhcd_gnttab_done(&info->shadow[id]); ++ urb = info->shadow[id].urb; ++ barrier(); ++ if (likely(urb)) { ++ urb->actual_length = res->actual_length; ++ urb->error_count = res->error_count; ++ urb->start_frame = res->start_frame; ++ barrier(); ++ xenhcd_giveback_urb(info, urb, res->status); ++ } ++ } ++ ++ add_id_to_freelist(info, id); ++ } ++ info->urb_ring.rsp_cons = i; ++ ++ if (i != info->urb_ring.req_prod_pvt) ++ RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do); ++ else ++ info->urb_ring.sring->rsp_event = i + 1; ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++ ++ cond_resched(); ++ ++ return more_to_do; ++} ++ ++static int xenhcd_conn_notify(struct usbfront_info *info) ++{ ++ struct usbif_conn_response *res; ++ struct usbif_conn_request *req; ++ RING_IDX rc, rp; ++ uint16_t id; ++ uint8_t portnum, speed; ++ int more_to_do = 0; ++ int notify; ++ int port_changed = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ rc = info->conn_ring.rsp_cons; ++ rp = info->conn_ring.sring->rsp_prod; ++ rmb(); /* ensure we see queued responses up to "rp" */ ++ ++ while (rc != rp) { ++ res = RING_GET_RESPONSE(&info->conn_ring, rc); ++ id = res->id; ++ portnum = res->portnum; ++ speed = res->speed; ++ info->conn_ring.rsp_cons = ++rc; ++ ++ rhport_connect(info, portnum, speed); ++ if (info->ports[portnum-1].c_connection) ++ port_changed = 1; ++ ++ barrier(); ++ ++ req = RING_GET_REQUEST(&info->conn_ring, ++ info->conn_ring.req_prod_pvt); ++ req->id = id; ++ info->conn_ring.req_prod_pvt++; ++ } ++ ++ if (rc != info->conn_ring.req_prod_pvt) ++ RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do); ++ else ++ info->conn_ring.sring->rsp_event = rc + 1; ++ ++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify); ++ if (notify) ++ notify_remote_via_irq(info->irq); ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++ ++ if (port_changed) ++ usb_hcd_poll_rh_status(info_to_hcd(info)); ++ ++ cond_resched(); ++ ++ return more_to_do; ++} ++ ++int xenhcd_schedule(void *arg) ++{ ++ struct usbfront_info *info = (struct usbfront_info *) arg; ++ ++ while (!kthread_should_stop()) { ++ wait_event_interruptible(info->wq, ++ info->waiting_resp || kthread_should_stop()); ++ info->waiting_resp = 0; ++ smp_mb(); ++ ++ if (xenhcd_urb_request_done(info)) ++ info->waiting_resp = 1; ++ ++ if (xenhcd_conn_notify(info)) ++ info->waiting_resp = 1; ++ } ++ ++ return 0; ++} ++ ++static void xenhcd_notify_work(struct usbfront_info *info) ++{ ++ info->waiting_resp = 1; ++ wake_up(&info->wq); ++} ++ ++irqreturn_t xenhcd_int(int irq, void *dev_id) ++{ ++ xenhcd_notify_work((struct usbfront_info *) dev_id); ++ return IRQ_HANDLED; ++} ++ ++static void xenhcd_watchdog(unsigned long param) ++{ ++ struct usbfront_info *info = (struct usbfront_info *) param; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ if (likely(HC_IS_RUNNING(info_to_hcd(info)->state))) { ++ timer_action_done(info, TIMER_RING_WATCHDOG); ++ xenhcd_giveback_unlinked_urbs(info); ++ xenhcd_kick_pending_urbs(info); ++ } ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ ++/* ++ * one-time HC init ++ */ ++static int xenhcd_setup(struct usb_hcd *hcd) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ ++ spin_lock_init(&info->lock); ++ INIT_LIST_HEAD(&info->pending_submit_list); ++ INIT_LIST_HEAD(&info->pending_unlink_list); ++ INIT_LIST_HEAD(&info->in_progress_list); ++ INIT_LIST_HEAD(&info->giveback_waiting_list); ++ init_timer(&info->watchdog); ++ info->watchdog.function = xenhcd_watchdog; ++ info->watchdog.data = (unsigned long) info; ++ return 0; ++} ++ ++/* ++ * start HC running ++ */ ++static int xenhcd_run(struct usb_hcd *hcd) ++{ ++ hcd->uses_new_polling = 1; ++ hcd->state = HC_STATE_RUNNING; ++ create_debug_file(hcd_to_info(hcd)); ++ return 0; ++} ++ ++/* ++ * stop running HC ++ */ ++static void xenhcd_stop(struct usb_hcd *hcd) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ ++ del_timer_sync(&info->watchdog); ++ remove_debug_file(info); ++ spin_lock_irq(&info->lock); ++ /* cancel all urbs */ ++ hcd->state = HC_STATE_HALT; ++ xenhcd_cancel_all_enqueued_urbs(info); ++ xenhcd_giveback_unlinked_urbs(info); ++ spin_unlock_irq(&info->lock); ++} ++ ++/* ++ * called as .urb_enqueue() ++ * non-error returns are promise to giveback the urb later ++ */ ++static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, ++ gfp_t mem_flags) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ struct urb_priv *urbp; ++ unsigned long flags; ++ int ret = 0; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ urbp = alloc_urb_priv(urb); ++ if (!urbp) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ urbp->status = 1; ++ ++ ret = xenhcd_submit_urb(info, urbp); ++ if (ret != 0) ++ free_urb_priv(urbp); ++ ++done: ++ spin_unlock_irqrestore(&info->lock, flags); ++ return ret; ++} ++ ++/* ++ * called as .urb_dequeue() ++ */ ++static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ++{ ++ struct usbfront_info *info = hcd_to_info(hcd); ++ struct urb_priv *urbp; ++ unsigned long flags; ++ int ret = 0; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ urbp = urb->hcpriv; ++ if (!urbp) ++ goto done; ++ ++ urbp->status = status; ++ ret = xenhcd_unlink_urb(info, urbp); ++ ++done: ++ spin_unlock_irqrestore(&info->lock, flags); ++ return ret; ++} ++ ++/* ++ * called from usb_get_current_frame_number(), ++ * but, almost all drivers not use such function. ++ */ ++static int xenhcd_get_frame(struct usb_hcd *hcd) ++{ ++ /* it means error, but probably no problem :-) */ ++ return 0; ++} ++ ++static const char hcd_name[] = "xen_hcd"; ++ ++struct hc_driver xen_usb20_hc_driver = { ++ .description = hcd_name, ++ .product_desc = "Xen USB2.0 Virtual Host Controller", ++ .hcd_priv_size = sizeof(struct usbfront_info), ++ .flags = HCD_USB2, ++ ++ /* basic HC lifecycle operations */ ++ .reset = xenhcd_setup, ++ .start = xenhcd_run, ++ .stop = xenhcd_stop, ++ ++ /* managing urb I/O */ ++ .urb_enqueue = xenhcd_urb_enqueue, ++ .urb_dequeue = xenhcd_urb_dequeue, ++ .get_frame_number = xenhcd_get_frame, ++ ++ /* root hub operations */ ++ .hub_status_data = xenhcd_hub_status_data, ++ .hub_control = xenhcd_hub_control, ++#ifdef XENHCD_PM ++#ifdef CONFIG_PM ++ .bus_suspend = xenhcd_bus_suspend, ++ .bus_resume = xenhcd_bus_resume, ++#endif ++#endif ++}; ++ ++struct hc_driver xen_usb11_hc_driver = { ++ .description = hcd_name, ++ .product_desc = "Xen USB1.1 Virtual Host Controller", ++ .hcd_priv_size = sizeof(struct usbfront_info), ++ .flags = HCD_USB11, ++ ++ /* basic HC lifecycle operations */ ++ .reset = xenhcd_setup, ++ .start = xenhcd_run, ++ .stop = xenhcd_stop, ++ ++ /* managing urb I/O */ ++ .urb_enqueue = xenhcd_urb_enqueue, ++ .urb_dequeue = xenhcd_urb_dequeue, ++ .get_frame_number = xenhcd_get_frame, ++ ++ /* root hub operations */ ++ .hub_status_data = xenhcd_hub_status_data, ++ .hub_control = xenhcd_hub_control, ++#ifdef XENHCD_PM ++#ifdef CONFIG_PM ++ .bus_suspend = xenhcd_bus_suspend, ++ .bus_resume = xenhcd_bus_resume, ++#endif ++#endif ++}; ++ ++#define GRANT_INVALID_REF 0 ++ ++static void destroy_rings(struct usbfront_info *info) ++{ ++ if (info->irq) ++ unbind_from_irqhandler(info->irq, info); ++ info->evtchn = info->irq = 0; ++ ++ if (info->urb_ring_ref != GRANT_INVALID_REF) { ++ gnttab_end_foreign_access(info->urb_ring_ref, 0, ++ (unsigned long)info->urb_ring.sring); ++ info->urb_ring_ref = GRANT_INVALID_REF; ++ } ++ info->urb_ring.sring = NULL; ++ ++ if (info->conn_ring_ref != GRANT_INVALID_REF) { ++ gnttab_end_foreign_access(info->conn_ring_ref, 0, ++ (unsigned long)info->conn_ring.sring); ++ info->conn_ring_ref = GRANT_INVALID_REF; ++ } ++ info->conn_ring.sring = NULL; ++} ++ ++static int setup_rings(struct xenbus_device *dev, struct usbfront_info *info) ++{ ++ struct usbif_urb_sring *urb_sring; ++ struct usbif_conn_sring *conn_sring; ++ int err; ++ ++ info->urb_ring_ref = GRANT_INVALID_REF; ++ info->conn_ring_ref = GRANT_INVALID_REF; ++ ++ urb_sring = (struct usbif_urb_sring *) ++ get_zeroed_page(GFP_NOIO|__GFP_HIGH); ++ if (!urb_sring) { ++ xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring"); ++ return -ENOMEM; ++ } ++ SHARED_RING_INIT(urb_sring); ++ FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE); ++ ++ err = xenbus_grant_ring(dev, virt_to_mfn(info->urb_ring.sring)); ++ if (err < 0) { ++ free_page((unsigned long)urb_sring); ++ info->urb_ring.sring = NULL; ++ goto fail; ++ } ++ info->urb_ring_ref = err; ++ ++ conn_sring = (struct usbif_conn_sring *) ++ get_zeroed_page(GFP_NOIO|__GFP_HIGH); ++ if (!conn_sring) { ++ xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring"); ++ return -ENOMEM; ++ } ++ SHARED_RING_INIT(conn_sring); ++ FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE); ++ ++ err = xenbus_grant_ring(dev, virt_to_mfn(info->conn_ring.sring)); ++ if (err < 0) { ++ free_page((unsigned long)conn_sring); ++ info->conn_ring.sring = NULL; ++ goto fail; ++ } ++ info->conn_ring_ref = err; ++ ++ err = xenbus_alloc_evtchn(dev, &info->evtchn); ++ if (err) ++ goto fail; ++ ++ err = bind_evtchn_to_irqhandler(info->evtchn, xenhcd_int, 0, ++ "usbif", info); ++ if (err <= 0) { ++ xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); ++ goto fail; ++ } ++ info->irq = err; ++ ++ return 0; ++fail: ++ destroy_rings(info); ++ return err; ++} ++ ++static int talk_to_usbback(struct xenbus_device *dev, ++ struct usbfront_info *info) ++{ ++ const char *message; ++ struct xenbus_transaction xbt; ++ int err; ++ ++ err = setup_rings(dev, info); ++ if (err) ++ goto out; ++ ++again: ++ err = xenbus_transaction_start(&xbt); ++ if (err) { ++ xenbus_dev_fatal(dev, err, "starting transaction"); ++ goto destroy_ring; ++ } ++ ++ err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", ++ "%u", info->urb_ring_ref); ++ if (err) { ++ message = "writing urb-ring-ref"; ++ goto abort_transaction; ++ } ++ ++ err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", ++ "%u", info->conn_ring_ref); ++ if (err) { ++ message = "writing conn-ring-ref"; ++ goto abort_transaction; ++ } ++ ++ err = xenbus_printf(xbt, dev->nodename, "event-channel", ++ "%u", info->evtchn); ++ if (err) { ++ message = "writing event-channel"; ++ goto abort_transaction; ++ } ++ ++ err = xenbus_transaction_end(xbt, 0); ++ if (err) { ++ if (err == -EAGAIN) ++ goto again; ++ xenbus_dev_fatal(dev, err, "completing transaction"); ++ goto destroy_ring; ++ } ++ ++ return 0; ++ ++abort_transaction: ++ xenbus_transaction_end(xbt, 1); ++ xenbus_dev_fatal(dev, err, "%s", message); ++ ++destroy_ring: ++ destroy_rings(info); ++ ++out: ++ return err; ++} ++ ++static int connect(struct xenbus_device *dev) ++{ ++ struct usbfront_info *info = dev_get_drvdata(&dev->dev); ++ ++ struct usbif_conn_request *req; ++ int i, idx, err; ++ int notify; ++ char name[TASK_COMM_LEN]; ++ struct usb_hcd *hcd; ++ ++ hcd = info_to_hcd(info); ++ snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum); ++ ++ err = talk_to_usbback(dev, info); ++ if (err) ++ return err; ++ ++ info->kthread = kthread_run(xenhcd_schedule, info, name); ++ if (IS_ERR(info->kthread)) { ++ err = PTR_ERR(info->kthread); ++ info->kthread = NULL; ++ xenbus_dev_fatal(dev, err, "Error creating thread"); ++ return err; ++ } ++ /* prepare ring for hotplug notification */ ++ for (idx = 0, i = 0; i < USB_CONN_RING_SIZE; i++) { ++ req = RING_GET_REQUEST(&info->conn_ring, idx); ++ req->id = idx; ++ idx++; ++ } ++ info->conn_ring.req_prod_pvt = idx; ++ ++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify); ++ if (notify) ++ notify_remote_via_irq(info->irq); ++ ++ return 0; ++} ++ ++static struct usb_hcd *create_hcd(struct xenbus_device *dev) ++{ ++ int i; ++ int err = 0; ++ int num_ports; ++ int usb_ver; ++ struct usb_hcd *hcd = NULL; ++ struct usbfront_info *info = NULL; ++ ++ err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", ++ "%d", &num_ports); ++ if (err != 1) { ++ xenbus_dev_fatal(dev, err, "reading num-ports"); ++ return ERR_PTR(-EINVAL); ++ } ++ if (num_ports < 1 || num_ports > USB_MAXCHILDREN) { ++ xenbus_dev_fatal(dev, err, "invalid num-ports"); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver); ++ if (err != 1) { ++ xenbus_dev_fatal(dev, err, "reading usb-ver"); ++ return ERR_PTR(-EINVAL); ++ } ++ switch (usb_ver) { ++ case USB_VER_USB11: ++ hcd = usb_create_hcd(&xen_usb11_hc_driver, ++ &dev->dev, dev_name(&dev->dev)); ++ break; ++ case USB_VER_USB20: ++ hcd = usb_create_hcd(&xen_usb20_hc_driver, ++ &dev->dev, dev_name(&dev->dev)); ++ break; ++ default: ++ xenbus_dev_fatal(dev, err, "invalid usb-ver"); ++ return ERR_PTR(-EINVAL); ++ } ++ if (!hcd) { ++ xenbus_dev_fatal(dev, err, ++ "fail to allocate USB host controller"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ info = hcd_to_info(hcd); ++ info->xbdev = dev; ++ info->rh_numports = num_ports; ++ ++ for (i = 0; i < USB_URB_RING_SIZE; i++) { ++ info->shadow[i].req.id = i + 1; ++ info->shadow[i].urb = NULL; ++ } ++ info->shadow[USB_URB_RING_SIZE-1].req.id = 0x0fff; ++ ++ return hcd; ++} ++ ++static int usbfront_probe(struct xenbus_device *dev, ++ const struct xenbus_device_id *id) ++{ ++ int err; ++ struct usb_hcd *hcd; ++ struct usbfront_info *info; ++ ++ if (usb_disabled()) ++ return -ENODEV; ++ ++ hcd = create_hcd(dev); ++ if (IS_ERR(hcd)) { ++ err = PTR_ERR(hcd); ++ xenbus_dev_fatal(dev, err, ++ "failed to create usb host controller"); ++ goto fail; ++ } ++ ++ info = hcd_to_info(hcd); ++ dev_set_drvdata(&dev->dev, info); ++ ++ err = usb_add_hcd(hcd, 0, 0); ++ if (err != 0) { ++ xenbus_dev_fatal(dev, err, "fail to add USB host controller"); ++ goto fail; ++ } ++ ++ init_waitqueue_head(&info->wq); ++ ++ return 0; ++ ++fail: ++ usb_put_hcd(hcd); ++ dev_set_drvdata(&dev->dev, NULL); ++ return err; ++} ++ ++static void usbfront_disconnect(struct xenbus_device *dev) ++{ ++ struct usbfront_info *info = dev_get_drvdata(&dev->dev); ++ struct usb_hcd *hcd = info_to_hcd(info); ++ ++ usb_remove_hcd(hcd); ++ if (info->kthread) { ++ kthread_stop(info->kthread); ++ info->kthread = NULL; ++ } ++ xenbus_frontend_closed(dev); ++} ++ ++static void usbback_changed(struct xenbus_device *dev, ++ enum xenbus_state backend_state) ++{ ++ switch (backend_state) { ++ case XenbusStateInitialising: ++ case XenbusStateInitialised: ++ case XenbusStateConnected: ++ case XenbusStateReconfiguring: ++ case XenbusStateReconfigured: ++ case XenbusStateUnknown: ++ case XenbusStateClosed: ++ break; ++ ++ case XenbusStateInitWait: ++ if (dev->state != XenbusStateInitialising) ++ break; ++ if (!connect(dev)) ++ xenbus_switch_state(dev, XenbusStateConnected); ++ break; ++ ++ case XenbusStateClosing: ++ usbfront_disconnect(dev); ++ break; ++ ++ default: ++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", ++ backend_state); ++ break; ++ } ++} ++ ++static int usbfront_remove(struct xenbus_device *dev) ++{ ++ struct usbfront_info *info = dev_get_drvdata(&dev->dev); ++ struct usb_hcd *hcd = info_to_hcd(info); ++ ++ destroy_rings(info); ++ usb_put_hcd(hcd); ++ ++ return 0; ++} ++ ++static const struct xenbus_device_id usbfront_ids[] = { ++ { "vusb" }, ++ { "" }, ++}; ++MODULE_ALIAS("xen:vusb"); ++ ++static DEFINE_XENBUS_DRIVER(usbfront, , ++ .probe = usbfront_probe, ++ .remove = usbfront_remove, ++ .otherend_changed = usbback_changed, ++); ++ ++static int __init usbfront_init(void) ++{ ++ if (!xen_domain()) ++ return -ENODEV; ++ ++ xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv", ++ sizeof(struct urb_priv), 0, 0, NULL); ++ if (!xenhcd_urbp_cachep) { ++ printk(KERN_ERR "usbfront failed to create kmem cache\n"); ++ return -ENOMEM; ++ } ++ ++ return xenbus_register_frontend(&usbfront_driver); ++} ++ ++static void __exit usbfront_exit(void) ++{ ++ kmem_cache_destroy(xenhcd_urbp_cachep); ++ xenbus_unregister_driver(&usbfront_driver); ++} ++ ++module_init(usbfront_init); ++module_exit(usbfront_exit); ++ ++MODULE_AUTHOR(""); ++MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (usbfront)"); ++MODULE_LICENSE("Dual BSD/GPL"); +diff --git a/include/xen/interface/io/usbif.h b/include/xen/interface/io/usbif.h +new file mode 100644 +index 0000000..f3bb1b2 +--- /dev/null ++++ b/include/xen/interface/io/usbif.h +@@ -0,0 +1,150 @@ ++/* ++ * usbif.h ++ * ++ * USB I/O interface for Xen guest OSes. ++ * ++ * Copyright (C) 2009, FUJITSU LABORATORIES LTD. ++ * Author: Noboru Iwamatsu ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to ++ * deal in the Software without restriction, including without limitation the ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __XEN_PUBLIC_IO_USBIF_H__ ++#define __XEN_PUBLIC_IO_USBIF_H__ ++ ++#include "ring.h" ++#include "../grant_table.h" ++ ++enum usb_spec_version { ++ USB_VER_UNKNOWN = 0, ++ USB_VER_USB11, ++ USB_VER_USB20, ++ USB_VER_USB30, /* not supported yet */ ++}; ++ ++/* ++ * USB pipe in usbif_request ++ * ++ * bits 0-5 are specific bits for virtual USB driver. ++ * bits 7-31 are standard urb pipe. ++ * ++ * - port number(NEW): bits 0-4 ++ * (USB_MAXCHILDREN is 31) ++ * ++ * - operation flag(NEW): bit 5 ++ * (0 = submit urb, ++ * 1 = unlink urb) ++ * ++ * - direction: bit 7 ++ * (0 = Host-to-Device [Out] ++ * 1 = Device-to-Host [In]) ++ * ++ * - device address: bits 8-14 ++ * ++ * - endpoint: bits 15-18 ++ * ++ * - pipe type: bits 30-31 ++ * (00 = isochronous, 01 = interrupt, ++ * 10 = control, 11 = bulk) ++ */ ++#define usbif_pipeportnum(pipe) ((pipe) & 0x1f) ++#define usbif_setportnum_pipe(pipe, portnum) \ ++ ((pipe)|(portnum)) ++ ++#define usbif_pipeunlink(pipe) ((pipe) & 0x20) ++#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe)) ++#define usbif_setunlink_pipe(pipe) ((pipe)|(0x20)) ++ ++#define USBIF_BACK_MAX_PENDING_REQS (128) ++#define USBIF_MAX_SEGMENTS_PER_REQUEST (16) ++ ++/* ++ * RING for transferring urbs. ++ */ ++struct usbif_request_segment { ++ grant_ref_t gref; ++ uint16_t offset; ++ uint16_t length; ++}; ++ ++struct usbif_urb_request { ++ uint16_t id; /* request id */ ++ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ ++ ++ /* basic urb parameter */ ++ uint32_t pipe; ++ uint16_t transfer_flags; ++ uint16_t buffer_length; ++ union { ++ uint8_t ctrl[8]; /* setup_packet (Ctrl) */ ++ ++ struct { ++ uint16_t interval; /* maximum (1024*8) in usb core */ ++ uint16_t start_frame; /* start frame */ ++ uint16_t number_of_packets; /* number of ISO packet */ ++ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc ++ segments */ ++ } isoc; ++ ++ struct { ++ uint16_t interval; /* maximum (1024*8) in usb core */ ++ uint16_t pad[3]; ++ } intr; ++ ++ struct { ++ uint16_t unlink_id; /* unlink request id */ ++ uint16_t pad[3]; ++ } unlink; ++ ++ } u; ++ ++ /* urb data segments */ ++ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; ++}; ++ ++struct usbif_urb_response { ++ uint16_t id; /* request id */ ++ uint16_t start_frame; /* start frame (ISO) */ ++ int32_t status; /* status (non-ISO) */ ++ int32_t actual_length; /* actual transfer length */ ++ int32_t error_count; /* number of ISO errors */ ++}; ++ ++DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, ++ struct usbif_urb_response); ++#define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, PAGE_SIZE) ++ ++/* ++ * RING for notifying connect/disconnect events to frontend ++ */ ++struct usbif_conn_request { ++ uint16_t id; ++}; ++ ++struct usbif_conn_response { ++ uint16_t id; /* request id */ ++ uint8_t portnum; /* port number */ ++ uint8_t speed; /* usb_device_speed */ ++}; ++ ++DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, ++ struct usbif_conn_response); ++#define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, PAGE_SIZE) ++ ++#endif /* __XEN_PUBLIC_IO_USBIF_H__ */ diff --git a/series-pvops.conf b/series-pvops.conf index bde4589..a638e90 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -1,2 +1,3 @@ patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch +patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch From dc079b0edd2e81074829ef530bd1aaf676ec6e4d Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 24 May 2012 09:34:00 +0200 Subject: [PATCH 03/56] pvops: disable memory cgroups We don't use this, so save some wasted memory (usage tracking struct). --- config-pvops | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/config-pvops b/config-pvops index bb17523..e987c8e 100644 --- a/config-pvops +++ b/config-pvops @@ -124,9 +124,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_CPUACCT=y CONFIG_RESOURCE_COUNTERS=y -CONFIG_CGROUP_MEM_RES_CTLR=y -# CONFIG_CGROUP_MEM_RES_CTLR_SWAP is not set -CONFIG_CGROUP_MEM_RES_CTLR_KMEM=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y @@ -142,7 +140,6 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y # CONFIG_SCHED_AUTOGROUP is not set -CONFIG_MM_OWNER=y # CONFIG_SYSFS_DEPRECATED is not set CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y @@ -188,7 +185,6 @@ CONFIG_PCI_QUIRKS=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y # CONFIG_SLUB is not set -# CONFIG_SLOB is not set CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y CONFIG_OPROFILE=m From 81c2c73ebfee9735a0889d6de6879a4d21fa663a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 9 May 2012 13:16:34 +0200 Subject: [PATCH 04/56] include generated includes in -devel package Required to compile external modules - there are eg unistd_{32,64}.h. --- kernel.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel.spec b/kernel.spec index 81c1e7c..4e4c806 100644 --- a/kernel.spec +++ b/kernel.spec @@ -225,6 +225,7 @@ rm -f %buildroot/lib/modules/%kernelrelease/build/scripts/*/*.o cp -a scripts/* %buildroot/lib/modules/%kernelrelease/build/scripts/ cp -a include/* %buildroot/lib/modules/%kernelrelease/build/include +cp -a --parents arch/x86/include/generated %buildroot/lib/modules/%kernelrelease/build/ # Make sure the Makefile and version.h have a matching timestamp so that # external modules can be built From dcecd5104cb13b471d89cd24860029cee876b471 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 24 May 2012 16:19:53 +0200 Subject: [PATCH 05/56] spec: dont check for stale symlinks --- kernel.spec | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel.spec b/kernel.spec index 4e4c806..666c202 100644 --- a/kernel.spec +++ b/kernel.spec @@ -163,6 +163,11 @@ make -C %kernel_build_dir SUBDIRS=%_builddir/u2mfn modules export NO_BRP_STRIP_DEBUG=true export STRIP_KEEP_SYMTAB='*/vmlinux-*' +# /lib/modules/%kernelrelease-%build_flavor/build will be a stale symlink until the +# kernel-devel package is installed. Don't check for stale symlinks +# in the brp-symlink check: +export NO_BRP_STALE_LINK_ERROR=yes + cd %kernel_build_dir mkdir -p %buildroot/boot From 041f871514dac3c7030ccaa62f9ea2c7e4ad0611 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 25 May 2012 23:46:26 +0200 Subject: [PATCH 06/56] vm-initrd: support new dracut folder layout This enables building kernel under FC15. --- .../{ => lib/dracut/hooks}/pre-pivot/50_mount_modules.sh | 0 .../{ => lib/dracut/hooks}/pre-udev/90_qubes_cow_setup.sh | 0 vm-initramfs/pre-pivot | 1 + vm-initramfs/pre-udev | 1 + 4 files changed, 2 insertions(+) rename vm-initramfs/{ => lib/dracut/hooks}/pre-pivot/50_mount_modules.sh (100%) rename vm-initramfs/{ => lib/dracut/hooks}/pre-udev/90_qubes_cow_setup.sh (100%) create mode 120000 vm-initramfs/pre-pivot create mode 120000 vm-initramfs/pre-udev diff --git a/vm-initramfs/pre-pivot/50_mount_modules.sh b/vm-initramfs/lib/dracut/hooks/pre-pivot/50_mount_modules.sh similarity index 100% rename from vm-initramfs/pre-pivot/50_mount_modules.sh rename to vm-initramfs/lib/dracut/hooks/pre-pivot/50_mount_modules.sh diff --git a/vm-initramfs/pre-udev/90_qubes_cow_setup.sh b/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh similarity index 100% rename from vm-initramfs/pre-udev/90_qubes_cow_setup.sh rename to vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh diff --git a/vm-initramfs/pre-pivot b/vm-initramfs/pre-pivot new file mode 120000 index 0000000..8460cde --- /dev/null +++ b/vm-initramfs/pre-pivot @@ -0,0 +1 @@ +lib/dracut/hooks/pre-pivot \ No newline at end of file diff --git a/vm-initramfs/pre-udev b/vm-initramfs/pre-udev new file mode 120000 index 0000000..f8c9b04 --- /dev/null +++ b/vm-initramfs/pre-udev @@ -0,0 +1 @@ +lib/dracut/hooks/pre-udev \ No newline at end of file From 3c69343cca1df47b762d66683af03e55e06dd055 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 25 May 2012 23:47:23 +0200 Subject: [PATCH 07/56] spec: build scripts_base explicitly Prevent race when building with -j4. --- kernel.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel.spec b/kernel.spec index 666c202..4456ee3 100644 --- a/kernel.spec +++ b/kernel.spec @@ -128,6 +128,7 @@ fi make prepare $MAKE_ARGS make scripts $MAKE_ARGS +make scripts_basic $MAKE_ARGS krel=$(make -s kernelrelease $MAKE_ARGS) if [ "$krel" != "%kernelrelease" ]; then From 2b14e535adc4fe734f6588d80b20a0dd8575a4f8 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 25 May 2012 23:30:15 +0200 Subject: [PATCH 08/56] pvops: ACPI S3 patches for 3.4 kernel --- ...rovide-registration-for-acpi_suspend.patch | 112 ++++++++++ ...nable-ACPI-sleep-via-the-__acpi_os_p.patch | 197 ++++++++++++++++++ ...egister-to-the-acpi_suspend_lowlevel.patch | 53 +++++ series-pvops.conf | 3 + 4 files changed, 365 insertions(+) create mode 100644 patches.xen/pvops-3.4-0001-x86-acpi-sleep-Provide-registration-for-acpi_suspend.patch create mode 100644 patches.xen/pvops-3.4-0002-xen-acpi-sleep-Enable-ACPI-sleep-via-the-__acpi_os_p.patch create mode 100644 patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel.patch diff --git a/patches.xen/pvops-3.4-0001-x86-acpi-sleep-Provide-registration-for-acpi_suspend.patch b/patches.xen/pvops-3.4-0001-x86-acpi-sleep-Provide-registration-for-acpi_suspend.patch new file mode 100644 index 0000000..55cc9f1 --- /dev/null +++ b/patches.xen/pvops-3.4-0001-x86-acpi-sleep-Provide-registration-for-acpi_suspend.patch @@ -0,0 +1,112 @@ +From b2ed886e43ec90bae86d6cae6582b457e76d1fd8 Mon Sep 17 00:00:00 2001 +From: Konrad Rzeszutek Wilk +Date: Thu, 8 Dec 2011 17:16:43 +0800 +Subject: x86/acpi/sleep: Provide registration for acpi_suspend_lowlevel. + +Which by default will be x86_acpi_suspend_lowlevel. +This registration allows us to register another callback +if there is a need to use another platform specific callback. + +CC: Thomas Gleixner +CC: "H. Peter Anvin" +CC: x86@kernel.org +CC: Len Brown +CC: Joseph Cihula +CC: Shane Wang +CC: linux-pm@lists.linux-foundation.org +CC: linux-acpi@vger.kernel.org +CC: Len Brown +Signed-off-by: Liang Tang +[v1: Fix when CONFIG_ACPI_SLEEP is not set] +Signed-off-by: Konrad Rzeszutek Wilk +--- + arch/x86/include/asm/acpi.h | 2 +- + arch/x86/kernel/acpi/boot.c | 7 +++++++ + arch/x86/kernel/acpi/sleep.c | 4 ++-- + arch/x86/kernel/acpi/sleep.h | 2 ++ + drivers/acpi/sleep.c | 2 ++ + 5 files changed, 14 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h +index 610001d..68cf060 100644 +--- a/arch/x86/include/asm/acpi.h ++++ b/arch/x86/include/asm/acpi.h +@@ -115,7 +115,7 @@ static inline void acpi_disable_pci(void) + } + + /* Low-level suspend routine. */ +-extern int acpi_suspend_lowlevel(void); ++extern int (*acpi_suspend_lowlevel)(void); + + extern const unsigned char acpi_wakeup_code[]; + #define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code))) +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index ce664f3..c3a5b95 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -44,6 +44,7 @@ + #include + #include + ++#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */ + static int __initdata acpi_force = 0; + u32 acpi_rsdt_forced; + int acpi_disabled; +@@ -558,6 +559,12 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, + int (*__acpi_register_gsi)(struct device *dev, u32 gsi, + int trigger, int polarity) = acpi_register_gsi_pic; + ++#ifdef CONFIG_ACPI_SLEEP ++int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel; ++#else ++int (*acpi_suspend_lowlevel)(void); ++#endif ++ + /* + * success: return IRQ number (>=0) + * failure: return < 0 +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index 103b6ab..4d2d0b1 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -25,12 +25,12 @@ static char temp_stack[4096]; + acpi_enter_sleep_state(3, wake_sleep_flags); + } + /** +- * acpi_suspend_lowlevel - save kernel state ++ * x86_acpi_suspend_lowlevel - save kernel state + * + * Create an identity mapped page table and copy the wakeup routine to + * low memory. + */ +-int acpi_suspend_lowlevel(void) ++int x86_acpi_suspend_lowlevel(void) + { + struct wakeup_header *header; + /* address in low memory of the wakeup routine. */ +diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h +index 416d4be..4d3feb5 100644 +--- a/arch/x86/kernel/acpi/sleep.h ++++ b/arch/x86/kernel/acpi/sleep.h +@@ -13,3 +13,5 @@ extern unsigned long acpi_copy_wakeup_routine(unsigned long); + extern void wakeup_long64(void); + + extern void do_suspend_lowlevel(void); ++ ++extern int x86_acpi_suspend_lowlevel(void); +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c +index 0a7ed69..44dbdde 100644 +--- a/drivers/acpi/sleep.c ++++ b/drivers/acpi/sleep.c +@@ -254,6 +254,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) + break; + + case ACPI_STATE_S3: ++ if (!acpi_suspend_lowlevel) ++ return -ENOSYS; + error = acpi_suspend_lowlevel(); + if (error) + return error; +-- +1.7.6.4 + diff --git a/patches.xen/pvops-3.4-0002-xen-acpi-sleep-Enable-ACPI-sleep-via-the-__acpi_os_p.patch b/patches.xen/pvops-3.4-0002-xen-acpi-sleep-Enable-ACPI-sleep-via-the-__acpi_os_p.patch new file mode 100644 index 0000000..48da1d1 --- /dev/null +++ b/patches.xen/pvops-3.4-0002-xen-acpi-sleep-Enable-ACPI-sleep-via-the-__acpi_os_p.patch @@ -0,0 +1,197 @@ +From 9b10575276a220543b8791f2cb8268fbd4a0bc2e Mon Sep 17 00:00:00 2001 +From: Konrad Rzeszutek Wilk +Date: Thu, 8 Dec 2011 17:32:23 +0800 +Subject: xen/acpi/sleep: Enable ACPI sleep via the __acpi_os_prepare_sleep + +Provide the registration callback to call in the Xen's +ACPI sleep functionality. This means that during S3/S5 +we make a hypercall XENPF_enter_acpi_sleep with the +proper PM1A/PM1B registers. + +Based of Ke Yu's initial idea. +[ From http://xenbits.xensource.com/linux-2.6.18-xen.hg +change c68699484a65 ] + +[v1: Added Copyright and license] +[v2: Added check if PM1A/B the 16-bits MSB contain something. The spec + only uses 16-bits but might have more in future] +Signed-off-by: Liang Tang +Signed-off-by: Konrad Rzeszutek Wilk +--- + arch/x86/xen/enlighten.c | 3 ++ + drivers/xen/Makefile | 2 +- + drivers/xen/acpi.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++ + include/xen/acpi.h | 58 +++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 124 insertions(+), 1 deletions(-) + create mode 100644 drivers/xen/acpi.c + create mode 100644 include/xen/acpi.h + +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index 12eb07b..a5277c2 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1275,6 +1276,8 @@ asmlinkage void __init xen_start_kernel(void) + + /* Make sure ACS will be enabled */ + pci_request_acs(); ++ ++ xen_acpi_sleep_register(); + } + #ifdef CONFIG_PCI + /* PCI BIOS service won't work from a PV guest. */ +diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile +index aa31337..77a845f 100644 +--- a/drivers/xen/Makefile ++++ b/drivers/xen/Makefile +@@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o + obj-$(CONFIG_XEN_PVHVM) += platform-pci.o + obj-$(CONFIG_XEN_TMEM) += tmem.o + obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o +-obj-$(CONFIG_XEN_DOM0) += pci.o ++obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o + obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ + obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o + obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o +diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c +new file mode 100644 +index 0000000..119d42a +--- /dev/null ++++ b/drivers/xen/acpi.c +@@ -0,0 +1,62 @@ ++/****************************************************************************** ++ * acpi.c ++ * acpi file for domain 0 kernel ++ * ++ * Copyright (c) 2011 Konrad Rzeszutek Wilk ++ * Copyright (c) 2011 Yu Ke ke.yu@intel.com ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License version 2 ++ * as published by the Free Software Foundation; or, when distributed ++ * separately from the Linux kernel or incorporated into other ++ * software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this source file (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, copy, modify, ++ * merge, publish, distribute, sublicense, and/or sell copies of the Software, ++ * and to permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++int xen_acpi_notify_hypervisor_state(u8 sleep_state, ++ u32 pm1a_cnt, u32 pm1b_cnt) ++{ ++ struct xen_platform_op op = { ++ .cmd = XENPF_enter_acpi_sleep, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ .u = { ++ .enter_acpi_sleep = { ++ .pm1a_cnt_val = (u16)pm1a_cnt, ++ .pm1b_cnt_val = (u16)pm1b_cnt, ++ .sleep_state = sleep_state, ++ }, ++ }, ++ }; ++ ++ if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) { ++ WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!" ++ "Email xen-devel@lists.xensource.com Thank you.\n", \ ++ pm1a_cnt, pm1b_cnt); ++ return -1; ++ } ++ ++ HYPERVISOR_dom0_op(&op); ++ return 1; ++} +diff --git a/include/xen/acpi.h b/include/xen/acpi.h +new file mode 100644 +index 0000000..48a9c01 +--- /dev/null ++++ b/include/xen/acpi.h +@@ -0,0 +1,58 @@ ++/****************************************************************************** ++ * acpi.h ++ * acpi file for domain 0 kernel ++ * ++ * Copyright (c) 2011 Konrad Rzeszutek Wilk ++ * Copyright (c) 2011 Yu Ke ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License version 2 ++ * as published by the Free Software Foundation; or, when distributed ++ * separately from the Linux kernel or incorporated into other ++ * software packages, subject to the following license: ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this source file (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, copy, modify, ++ * merge, publish, distribute, sublicense, and/or sell copies of the Software, ++ * and to permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++ ++#ifndef _XEN_ACPI_H ++#define _XEN_ACPI_H ++ ++#include ++ ++#ifdef CONFIG_XEN_DOM0 ++#include ++#include ++#include ++ ++int xen_acpi_notify_hypervisor_state(u8 sleep_state, ++ u32 pm1a_cnt, u32 pm1b_cnd); ++ ++static inline void xen_acpi_sleep_register(void) ++{ ++ if (xen_initial_domain()) ++ acpi_os_set_prepare_sleep( ++ &xen_acpi_notify_hypervisor_state); ++} ++#else ++static inline void xen_acpi_sleep_register(void) ++{ ++} ++#endif ++ ++#endif /* _XEN_ACPI_H */ +-- +1.7.6.4 + diff --git a/patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel.patch b/patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel.patch new file mode 100644 index 0000000..a348c8d --- /dev/null +++ b/patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel.patch @@ -0,0 +1,53 @@ +From 5000cd48f33e3e4d31cdeda0751188794f8bebf4 Mon Sep 17 00:00:00 2001 +From: Konrad Rzeszutek Wilk +Date: Thu, 8 Dec 2011 17:34:58 +0800 +Subject: xen/acpi/sleep: Register to the acpi_suspend_lowlevel a callback. + +We piggyback on "x86/acpi: Provide registration for acpi_suspend_lowlevel." +to register a Xen version of the callback. The callback does not +do anything special - except it omits the x86_acpi_suspend_lowlevel. +It does that b/c during suspend it tries to save cr8 values (which +the hypervisor does not support), and then on resume path the +cr3, cr8, idt, and gdt are all resumed which clashes with what +the hypervisor has set up for the guest. + +Signed-off-by: Liang Tang +Signed-off-by: Konrad Rzeszutek Wilk +--- + include/xen/acpi.h | 16 +++++++++++++++- + 1 files changed, 15 insertions(+), 1 deletions(-) + +diff --git a/include/xen/acpi.h b/include/xen/acpi.h +index 48a9c01..ebaabbb 100644 +--- a/include/xen/acpi.h ++++ b/include/xen/acpi.h +@@ -43,11 +43,25 @@ + int xen_acpi_notify_hypervisor_state(u8 sleep_state, + u32 pm1a_cnt, u32 pm1b_cnd); + ++static inline int xen_acpi_suspend_lowlevel(void) ++{ ++ /* ++ * Xen will save and restore CPU context, so ++ * we can skip that and just go straight to ++ * the suspend. ++ */ ++ acpi_enter_sleep_state(ACPI_STATE_S3, 0); ++ return 0; ++} ++ + static inline void xen_acpi_sleep_register(void) + { +- if (xen_initial_domain()) ++ if (xen_initial_domain()) { + acpi_os_set_prepare_sleep( + &xen_acpi_notify_hypervisor_state); ++ ++ acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel; ++ } + } + #else + static inline void xen_acpi_sleep_register(void) +-- +1.7.6.4 + diff --git a/series-pvops.conf b/series-pvops.conf index a638e90..2c2f308 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -1,3 +1,6 @@ +patches.xen/pvops-3.4-0001-x86-acpi-sleep-Provide-registration-for-acpi_suspend.patch +patches.xen/pvops-3.4-0002-xen-acpi-sleep-Enable-ACPI-sleep-via-the-__acpi_os_p.patch +patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel.patch patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch From ca2b2ba12ab29e0b7ff4771f6954bdf1390e2f5d Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 27 May 2012 22:59:09 +0200 Subject: [PATCH 09/56] spec: allow to install multiple releases of single kernel Include release in kernel subdir name. --- kernel.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel.spec b/kernel.spec index 4456ee3..4d3aa94 100644 --- a/kernel.spec +++ b/kernel.spec @@ -19,7 +19,7 @@ %define build_src_dir %my_builddir/linux-%version %define src_install_dir /usr/src/kernels/%kernelrelease %define kernel_build_dir %my_builddir/linux-obj -%define vm_install_dir /var/lib/qubes/vm-kernels/%version +%define vm_install_dir /var/lib/qubes/vm-kernels/%version-%rel %(chmod +x %_sourcedir/{guards,apply-patches,check-for-config-changes}) From 4ce6980d66bb76fe4b6b79b5e60478a772d0cbc9 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 28 May 2012 00:10:21 +0200 Subject: [PATCH 10/56] pvops: suppress debug code in wifi drivers --- config-pvops | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/config-pvops b/config-pvops index e987c8e..e226bcf 100644 --- a/config-pvops +++ b/config-pvops @@ -2376,23 +2376,22 @@ CONFIG_HOSTAP_PCI=m CONFIG_HOSTAP_CS=m CONFIG_IPW2100=m CONFIG_IPW2100_MONITOR=y -CONFIG_IPW2100_DEBUG=y +# CONFIG_IPW2100_DEBUG is not set CONFIG_IPW2200=m CONFIG_IPW2200_MONITOR=y CONFIG_IPW2200_RADIOTAP=y CONFIG_IPW2200_PROMISCUOUS=y CONFIG_IPW2200_QOS=y -CONFIG_IPW2200_DEBUG=y +# CONFIG_IPW2200_DEBUG is not set CONFIG_LIBIPW=m -CONFIG_LIBIPW_DEBUG=y +# CONFIG_LIBIPW_DEBUG is not set CONFIG_IWLWIFI=m # # Debugging Options # -CONFIG_IWLWIFI_DEBUG=y +# CONFIG_IWLWIFI_DEBUG is not set CONFIG_IWLWIFI_DEBUGFS=y -# CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE is not set # CONFIG_IWLWIFI_DEVICE_TRACING is not set CONFIG_IWLWIFI_DEVICE_TESTMODE=y # CONFIG_IWLWIFI_P2P is not set From 02f451a3fcdfeeace472577d79368647495afa9c Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 4 Jun 2012 15:33:31 +0200 Subject: [PATCH 11/56] include generated includes in -devel package - only for pvops Required to compile external modules - there are eg unistd_{32,64}.h. --- kernel.spec | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel.spec b/kernel.spec index 4d3aa94..f6a37ba 100644 --- a/kernel.spec +++ b/kernel.spec @@ -231,7 +231,9 @@ rm -f %buildroot/lib/modules/%kernelrelease/build/scripts/*/*.o cp -a scripts/* %buildroot/lib/modules/%kernelrelease/build/scripts/ cp -a include/* %buildroot/lib/modules/%kernelrelease/build/include -cp -a --parents arch/x86/include/generated %buildroot/lib/modules/%kernelrelease/build/ +if [ "%{build_flavor}" != "xenlinux" ]; then + cp -a --parents arch/x86/include/generated %buildroot/lib/modules/%kernelrelease/build/ +fi # Make sure the Makefile and version.h have a matching timestamp so that # external modules can be built From d1dac3e56d6879dad9927f0b6c719bcf610092ac Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 8 Jun 2012 01:22:15 +0200 Subject: [PATCH 12/56] pvops: version 3.4.1 --- linux-3.4.1.tar.bz2.sha1sum | 1 + linux-3.4.1.tar.sign | 17 +++++++++++++++++ version-pvops | 2 +- 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 linux-3.4.1.tar.bz2.sha1sum create mode 100644 linux-3.4.1.tar.sign diff --git a/linux-3.4.1.tar.bz2.sha1sum b/linux-3.4.1.tar.bz2.sha1sum new file mode 100644 index 0000000..c4d2475 --- /dev/null +++ b/linux-3.4.1.tar.bz2.sha1sum @@ -0,0 +1 @@ +152828418bbea9d8cc2a08f501e9cee3c6d48d03 linux-3.4.1.tar.bz2 diff --git a/linux-3.4.1.tar.sign b/linux-3.4.1.tar.sign new file mode 100644 index 0000000..d93d9ee --- /dev/null +++ b/linux-3.4.1.tar.sign @@ -0,0 +1,17 @@ +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v2.0.18 (GNU/Linux) + +iQIcBAABAgAGBQJPyGzqAAoJEDjbvchgkmk+L5sQAI4LU+CQkCTBSwbUtKQqVTW7 +HEK+zyipn0+TGqzuQEMFDiBnWRBn/uTl7vw4S7dsIskDxDDiLIK3k+FG677S8CIX +e62qwQQHh3NyIE6P5rXuITIfUtQgT4iQ5HYrdfKZ5X6aVxsw2hLm370xKzr76mx/ +eagIHSVYbmi4b2dKAItcD+aZ3482jLYhytIO7yyoqaQrW5ql90/z2v85oQD3pM6J +lPOyU10EQo7ymKUT2ey7uc9Eodp2VTi538qfvEHYBWMpv08bW8zd/uvgiTYIR1Wy +Xpf5fpfnVHbVWrwNycLpMXyubfYJXCIlfQEL9/7e1VFDRGdydLcDLVVS+rjy+zxM +Ua2ZdpKbhz4vrA4GMWW91FEWt37zCmpL1lEG8KB45bVOc2WcRN4Im9oeSVym5+Gw +KyL/Vq54vdHdIzaXd9JFkB8qspxQ6Z9uZjrdHCa4hoeonwfse4MlJ+Jkp603ztzg +wziBlktjDqB1NVR1suJU3TnoNg6jMpI6hmGSch/GXj1ImAgvA0Ti5+Ifx2oiWcrl +sO9bjVX3HBGDzNd1g8qo7l17JsTyhgEK7nCrT78aMqwUR8zuWoMR8RqHjs/7A5hO +UTIwRmViursL6JiBMibqnStLQEDQO1yOi9Jmsr9SuIEGRFzRWw7cCaVu7eiz5Wc5 +5qJFOz6f9ye6w/6OnzDM +=BE9n +-----END PGP SIGNATURE----- diff --git a/version-pvops b/version-pvops index 2f4b607..47b322c 100644 --- a/version-pvops +++ b/version-pvops @@ -1 +1 @@ -3.4 +3.4.1 From 550b5f7d956e95af8786b3ceb2ef2e2d6472d9bf Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 10 Jun 2012 02:01:47 +0200 Subject: [PATCH 13/56] initramfs: setup dmroot as non-persistent snapshot The volatile.img is cleared before each startup anyway. This should speed up AppVM start and reduce ssd wearing. --- vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh b/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh index 117d257..fa1616f 100755 --- a/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh +++ b/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh @@ -20,7 +20,7 @@ if [ `blockdev --getro /dev/xvda` = 1 ] ; then while ! [ -e /dev/xvdc ]; do sleep 0.1; done while ! [ -e /dev/xvdc2 ]; do sleep 0.1; done - echo "0 `blockdev --getsz /dev/xvda` snapshot /dev/xvda /dev/xvdc2 P 16" | \ + echo "0 `blockdev --getsz /dev/xvda` snapshot /dev/xvda /dev/xvdc2 N 16" | \ dmsetup create dmroot || { echo "Qubes: FATAL: cannot create dmroot!"; } echo Qubes: done. else From 5751db78f995498bb0196ff1aeee9a63a3d6f10b Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 11 Jun 2012 22:49:31 +0200 Subject: [PATCH 14/56] pvops: respect 'removable' xenstore flag for block devices Especially this is needed by pmount to allow mount qvm-block attached devices by normal user. --- .../pvops-blkfront-removable-flag.patch | 26 +++++++++++++++++++ series-pvops.conf | 1 + 2 files changed, 27 insertions(+) create mode 100644 patches.xen/pvops-blkfront-removable-flag.patch diff --git a/patches.xen/pvops-blkfront-removable-flag.patch b/patches.xen/pvops-blkfront-removable-flag.patch new file mode 100644 index 0000000..633f708 --- /dev/null +++ b/patches.xen/pvops-blkfront-removable-flag.patch @@ -0,0 +1,26 @@ +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index 4e86393..34493d7 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -1188,7 +1188,7 @@ static void blkfront_connect(struct blkfront_info *info) + unsigned long sector_size; + unsigned int binfo; + int err; +- int barrier, flush, discard; ++ int barrier, flush, discard, removable; + + switch (info->connected) { + case BLKIF_STATE_CONNECTED: +@@ -1266,6 +1266,12 @@ static void blkfront_connect(struct blkfront_info *info) + if (!err && discard) + blkfront_setup_discard(info); + ++ err = xenbus_gather(XBT_NIL, info->xbdev->otherend, ++ "removable", "%d", &removable, ++ NULL); ++ if (!err && removable) ++ binfo |= VDISK_REMOVABLE; ++ + err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); + if (err) { + xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", diff --git a/series-pvops.conf b/series-pvops.conf index 2c2f308..6c8b00f 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -4,3 +4,4 @@ patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel. patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch +patches.xen/pvops-blkfront-removable-flag.patch From 3541d3d0126c62cdf10c8f76862d4ae90bbda472 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 14 Jun 2012 00:26:45 +0200 Subject: [PATCH 15/56] pvops: add a couple of fixes from 3.5-rc kernels Especially for block backend. --- ...py-id-field-when-doing-BLKIF_DISCARD.patch | 54 +++++++ ...-WARN-to-deal-with-misbehaving-backe.patch | 141 ++++++++++++++++++ ...ont-module-exit-handling-adjustments.patch | 33 ++++ ...-filter-APERFMPERF-cpuid-feature-out.patch | 56 +++++++ series-pvops.conf | 9 ++ 5 files changed, 293 insertions(+) create mode 100644 patches.xen/pvops-xen-blkback-Copy-id-field-when-doing-BLKIF_DISCARD.patch create mode 100644 patches.xen/pvops-xen-blkfront-Add-WARN-to-deal-with-misbehaving-backe.patch create mode 100644 patches.xen/pvops-xen-blkfront-module-exit-handling-adjustments.patch create mode 100644 patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch diff --git a/patches.xen/pvops-xen-blkback-Copy-id-field-when-doing-BLKIF_DISCARD.patch b/patches.xen/pvops-xen-blkback-Copy-id-field-when-doing-BLKIF_DISCARD.patch new file mode 100644 index 0000000..edf6398 --- /dev/null +++ b/patches.xen/pvops-xen-blkback-Copy-id-field-when-doing-BLKIF_DISCARD.patch @@ -0,0 +1,54 @@ +From 8c9ce606a60e4a0cb447bdc082ce383b96b227b4 Mon Sep 17 00:00:00 2001 +From: Konrad Rzeszutek Wilk +Date: Fri, 25 May 2012 16:11:09 -0400 +Subject: [PATCH 2/3] xen/blkback: Copy id field when doing BLKIF_DISCARD. + +We weren't copying the id field so when we sent the response +back to the frontend (especially with a 64-bit host and 32-bit +guest), we ended up using a random value. This lead to the +frontend crashing as it would try to pass to __blk_end_request_all +a NULL 'struct request' (b/c it would use the 'id' to find the +proper 'struct request' in its shadow array) and end up crashing: + +BUG: unable to handle kernel NULL pointer dereference at 000000e4 +IP: [] __blk_end_request_all+0xc/0x40 +.. snip.. +EIP is at __blk_end_request_all+0xc/0x40 +.. snip.. + [] blkif_interrupt+0x172/0x330 [xen_blkfront] + +This fixes the bug by passing in the proper id for the response. + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=824641 + +CC: stable@kernel.org +Tested-by: William Dauchy +Acked-by: Stefano Stabellini +Signed-off-by: Konrad Rzeszutek Wilk +--- + drivers/block/xen-blkback/common.h | 2 ++ + 1 files changed, 2 insertions(+), 0 deletions(-) + +diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h +index 773cf27..9ad3b5e 100644 +--- a/drivers/block/xen-blkback/common.h ++++ b/drivers/block/xen-blkback/common.h +@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, + break; + case BLKIF_OP_DISCARD: + dst->u.discard.flag = src->u.discard.flag; ++ dst->u.discard.id = src->u.discard.id; + dst->u.discard.sector_number = src->u.discard.sector_number; + dst->u.discard.nr_sectors = src->u.discard.nr_sectors; + break; +@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, + break; + case BLKIF_OP_DISCARD: + dst->u.discard.flag = src->u.discard.flag; ++ dst->u.discard.id = src->u.discard.id; + dst->u.discard.sector_number = src->u.discard.sector_number; + dst->u.discard.nr_sectors = src->u.discard.nr_sectors; + break; +-- +1.7.4.4 + diff --git a/patches.xen/pvops-xen-blkfront-Add-WARN-to-deal-with-misbehaving-backe.patch b/patches.xen/pvops-xen-blkfront-Add-WARN-to-deal-with-misbehaving-backe.patch new file mode 100644 index 0000000..96968dd --- /dev/null +++ b/patches.xen/pvops-xen-blkfront-Add-WARN-to-deal-with-misbehaving-backe.patch @@ -0,0 +1,141 @@ +From 6878c32e5cc0e40980abe51d1f02fb453e27493e Mon Sep 17 00:00:00 2001 +From: Konrad Rzeszutek Wilk +Date: Fri, 25 May 2012 17:34:51 -0400 +Subject: [PATCH 3/3] xen/blkfront: Add WARN to deal with misbehaving + backends. + +Part of the ring structure is the 'id' field which is under +control of the frontend. The frontend stamps it with "some" +value (this some in this implementation being a value less +than BLK_RING_SIZE), and when it gets a response expects +said value to be in the response structure. We have a check +for the id field when spolling new requests but not when +de-spolling responses. + +We also add an extra check in add_id_to_freelist to make +sure that the 'struct request' was not NULL - as we cannot +pass a NULL to __blk_end_request_all, otherwise that crashes +(and all the operations that the response is dealing with +end up with __blk_end_request_all). + +Lastly we also print the name of the operation that failed. + +[v1: s/BUG/WARN/ suggested by Stefano] +[v2: Add extra check in add_id_to_freelist] +[v3: Redid op_name per Jan's suggestion] +[v4: add const * and add WARN on failure returns] +Acked-by: Jan Beulich +Acked-by: Stefano Stabellini +Signed-off-by: Konrad Rzeszutek Wilk +--- + drivers/block/xen-blkfront.c | 58 +++++++++++++++++++++++++++++++++-------- + 1 files changed, 46 insertions(+), 12 deletions(-) + +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index 60eed4b..e4fb337 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info) + return free; + } + +-static void add_id_to_freelist(struct blkfront_info *info, ++static int add_id_to_freelist(struct blkfront_info *info, + unsigned long id) + { ++ if (info->shadow[id].req.u.rw.id != id) ++ return -EINVAL; ++ if (info->shadow[id].request == NULL) ++ return -EINVAL; + info->shadow[id].req.u.rw.id = info->shadow_free; + info->shadow[id].request = NULL; + info->shadow_free = id; ++ return 0; + } + ++static const char *op_name(int op) ++{ ++ static const char *const names[] = { ++ [BLKIF_OP_READ] = "read", ++ [BLKIF_OP_WRITE] = "write", ++ [BLKIF_OP_WRITE_BARRIER] = "barrier", ++ [BLKIF_OP_FLUSH_DISKCACHE] = "flush", ++ [BLKIF_OP_DISCARD] = "discard" }; ++ ++ if (op < 0 || op >= ARRAY_SIZE(names)) ++ return "unknown"; ++ ++ if (!names[op]) ++ return "reserved"; ++ ++ return names[op]; ++} + static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) + { + unsigned int end = minor + nr; +@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) + + bret = RING_GET_RESPONSE(&info->ring, i); + id = bret->id; ++ /* ++ * The backend has messed up and given us an id that we would ++ * never have given to it (we stamp it up to BLK_RING_SIZE - ++ * look in get_id_from_freelist. ++ */ ++ if (id >= BLK_RING_SIZE) { ++ WARN(1, "%s: response to %s has incorrect id (%ld)\n", ++ info->gd->disk_name, op_name(bret->operation), id); ++ /* We can't safely get the 'struct request' as ++ * the id is busted. */ ++ continue; ++ } + req = info->shadow[id].request; + + if (bret->operation != BLKIF_OP_DISCARD) + blkif_completion(&info->shadow[id]); + +- add_id_to_freelist(info, id); ++ if (add_id_to_freelist(info, id)) { ++ WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n", ++ info->gd->disk_name, op_name(bret->operation), id); ++ continue; ++ } + + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + switch (bret->operation) { + case BLKIF_OP_DISCARD: + if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { + struct request_queue *rq = info->rq; +- printk(KERN_WARNING "blkfront: %s: discard op failed\n", +- info->gd->disk_name); ++ printk(KERN_WARNING "blkfront: %s: %s op failed\n", ++ info->gd->disk_name, op_name(bret->operation)); + error = -EOPNOTSUPP; + info->feature_discard = 0; + info->feature_secdiscard = 0; +@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) + case BLKIF_OP_FLUSH_DISKCACHE: + case BLKIF_OP_WRITE_BARRIER: + if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { +- printk(KERN_WARNING "blkfront: %s: write %s op failed\n", +- info->flush_op == BLKIF_OP_WRITE_BARRIER ? +- "barrier" : "flush disk cache", +- info->gd->disk_name); ++ printk(KERN_WARNING "blkfront: %s: %s op failed\n", ++ info->gd->disk_name, op_name(bret->operation)); + error = -EOPNOTSUPP; + } + if (unlikely(bret->status == BLKIF_RSP_ERROR && + info->shadow[id].req.u.rw.nr_segments == 0)) { +- printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", +- info->flush_op == BLKIF_OP_WRITE_BARRIER ? +- "barrier" : "flush disk cache", +- info->gd->disk_name); ++ printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", ++ info->gd->disk_name, op_name(bret->operation)); + error = -EOPNOTSUPP; + } + if (unlikely(error)) { +-- +1.7.4.4 + diff --git a/patches.xen/pvops-xen-blkfront-module-exit-handling-adjustments.patch b/patches.xen/pvops-xen-blkfront-module-exit-handling-adjustments.patch new file mode 100644 index 0000000..c3479ff --- /dev/null +++ b/patches.xen/pvops-xen-blkfront-module-exit-handling-adjustments.patch @@ -0,0 +1,33 @@ +From 8605067fb9b8e34aecf44ec258657c9cc009fc5a Mon Sep 17 00:00:00 2001 +From: Jan Beulich +Date: Thu, 5 Apr 2012 16:04:52 +0100 +Subject: [PATCH 1/3] xen-blkfront: module exit handling adjustments + +The blkdev major must be released upon exit, or else the module can't +attach to devices using the same majors upon being loaded again. Also +avoid leaking the minor tracking bitmap. + +Signed-off-by: Jan Beulich +Signed-off-by: Konrad Rzeszutek Wilk +--- + drivers/block/xen-blkfront.c | 4 +++- + 1 files changed, 3 insertions(+), 1 deletions(-) + +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index 4f2b460..60eed4b 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -1500,7 +1500,9 @@ module_init(xlblk_init); + + static void __exit xlblk_exit(void) + { +- return xenbus_unregister_driver(&blkfront_driver); ++ xenbus_unregister_driver(&blkfront_driver); ++ unregister_blkdev(XENVBD_MAJOR, DEV_NAME); ++ kfree(minors); + } + module_exit(xlblk_exit); + +-- +1.7.4.4 + diff --git a/patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch b/patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch new file mode 100644 index 0000000..106ff6b --- /dev/null +++ b/patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch @@ -0,0 +1,56 @@ +From 5e626254206a709c6e937f3dda69bf26c7344f6f Mon Sep 17 00:00:00 2001 +From: Andre Przywara +Date: Tue, 29 May 2012 13:07:31 +0200 +Subject: [PATCH] xen/setup: filter APERFMPERF cpuid feature out + +Xen PV kernels allow access to the APERF/MPERF registers to read the +effective frequency. Access to the MSRs is however redirected to the +currently scheduled physical CPU, making consecutive read and +compares unreliable. In addition each rdmsr traps into the hypervisor. +So to avoid bogus readouts and expensive traps, disable the kernel +internal feature flag for APERF/MPERF if running under Xen. +This will +a) remove the aperfmperf flag from /proc/cpuinfo +b) not mislead the power scheduler (arch/x86/kernel/cpu/sched.c) to + use the feature to improve scheduling (by default disabled) +c) not mislead the cpufreq driver to use the MSRs + +This does not cover userland programs which access the MSRs via the +device file interface, but this will be addressed separately. + +Signed-off-by: Andre Przywara +Cc: stable@vger.kernel.org # v3.0+ +Signed-off-by: Konrad Rzeszutek Wilk +--- + arch/x86/xen/enlighten.c | 8 ++++++++ + 1 files changed, 8 insertions(+), 0 deletions(-) + +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index d1f9a04..272ebd0 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -208,6 +208,9 @@ static void __init xen_banner(void) + xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); + } + ++#define CPUID_THERM_POWER_LEAF 6 ++#define APERFMPERF_PRESENT 0 ++ + static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; + static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; + +@@ -241,6 +244,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, + *dx = cpuid_leaf5_edx_val; + return; + ++ case CPUID_THERM_POWER_LEAF: ++ /* Disabling APERFMPERF for kernel usage */ ++ maskecx = ~(1 << APERFMPERF_PRESENT); ++ break; ++ + case 0xb: + /* Suppress extended topology stuff */ + maskebx = 0; +-- +1.7.4.4 + diff --git a/series-pvops.conf b/series-pvops.conf index 6c8b00f..ecaa364 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -1,7 +1,16 @@ +# ACPI S3 patches.xen/pvops-3.4-0001-x86-acpi-sleep-Provide-registration-for-acpi_suspend.patch patches.xen/pvops-3.4-0002-xen-acpi-sleep-Enable-ACPI-sleep-via-the-__acpi_os_p.patch patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel.patch + +# Fixes which should will go in 3.5 +patches.xen/pvops-xen-blkfront-module-exit-handling-adjustments.patch +patches.xen/pvops-xen-blkback-Copy-id-field-when-doing-BLKIF_DISCARD.patch +patches.xen/pvops-xen-blkfront-Add-WARN-to-deal-with-misbehaving-backe.patch +patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch + +# Additional features patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch patches.xen/pvops-blkfront-removable-flag.patch From a1eea488620cc9715302d74720b7ebff8b333ff0 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 14 Jun 2012 02:52:26 +0200 Subject: [PATCH 16/56] spec: fix vm-kernel dir name Use simple x.y.z-r instead of x.y.z-r.pvops.qubes --- kernel.spec | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel.spec b/kernel.spec index f6a37ba..ab8fc44 100644 --- a/kernel.spec +++ b/kernel.spec @@ -4,7 +4,8 @@ #%define _unpackaged_files_terminate_build 0 %define variant %{build_flavor}.qubes -%define rel %(cat rel-%{build_flavor}).%{variant} +%define plainrel %(cat rel-%{build_flavor}) +%define rel %{plainrel}.%{variant} %define version %(cat version-%{build_flavor}) %define _buildshell /bin/bash @@ -19,7 +20,7 @@ %define build_src_dir %my_builddir/linux-%version %define src_install_dir /usr/src/kernels/%kernelrelease %define kernel_build_dir %my_builddir/linux-obj -%define vm_install_dir /var/lib/qubes/vm-kernels/%version-%rel +%define vm_install_dir /var/lib/qubes/vm-kernels/%version-%{plainrel} %(chmod +x %_sourcedir/{guards,apply-patches,check-for-config-changes}) @@ -442,7 +443,7 @@ umount /tmp/qubes-modules-%kernelrelease rmdir /tmp/qubes-modules-%kernelrelease mv /tmp/qubes-modules-%kernelrelease.img %vm_install_dir/modules.img -qubes-prefs --set default-kernel %version +qubes-prefs --set default-kernel %version-%plainrel %files qubes-vm %defattr(-, root, root) From d1eb185f1be1bde17017de8f07ba0ef43c0eaffd Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 14 Jun 2012 03:51:11 +0200 Subject: [PATCH 17/56] vm-initrd: fix compatibility with older dracut --- vm-initramfs/pre-pivot | 1 - vm-initramfs/pre-pivot/50_mount_modules.sh | 1 + vm-initramfs/pre-udev | 1 - vm-initramfs/pre-udev/90_qubes_cow_setup.sh | 1 + 4 files changed, 2 insertions(+), 2 deletions(-) delete mode 120000 vm-initramfs/pre-pivot create mode 120000 vm-initramfs/pre-pivot/50_mount_modules.sh delete mode 120000 vm-initramfs/pre-udev create mode 120000 vm-initramfs/pre-udev/90_qubes_cow_setup.sh diff --git a/vm-initramfs/pre-pivot b/vm-initramfs/pre-pivot deleted file mode 120000 index 8460cde..0000000 --- a/vm-initramfs/pre-pivot +++ /dev/null @@ -1 +0,0 @@ -lib/dracut/hooks/pre-pivot \ No newline at end of file diff --git a/vm-initramfs/pre-pivot/50_mount_modules.sh b/vm-initramfs/pre-pivot/50_mount_modules.sh new file mode 120000 index 0000000..968f123 --- /dev/null +++ b/vm-initramfs/pre-pivot/50_mount_modules.sh @@ -0,0 +1 @@ +../lib/dracut/hooks/pre-pivot/50_mount_modules.sh \ No newline at end of file diff --git a/vm-initramfs/pre-udev b/vm-initramfs/pre-udev deleted file mode 120000 index f8c9b04..0000000 --- a/vm-initramfs/pre-udev +++ /dev/null @@ -1 +0,0 @@ -lib/dracut/hooks/pre-udev \ No newline at end of file diff --git a/vm-initramfs/pre-udev/90_qubes_cow_setup.sh b/vm-initramfs/pre-udev/90_qubes_cow_setup.sh new file mode 120000 index 0000000..7c3643c --- /dev/null +++ b/vm-initramfs/pre-udev/90_qubes_cow_setup.sh @@ -0,0 +1 @@ +../lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh \ No newline at end of file From d3b78a4d48b4332b45eca1a76bb24b5f360ef78d Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 15 Jun 2012 13:15:44 +0200 Subject: [PATCH 18/56] pvops: version 3.4.2 This version include fix for misbehaving loop devices (reads of pages not present in pagecache were messed up). The commit is: "radix-tree: fix contiguous iterator". --- linux-3.4.2.tar.bz2.sha1sum | 1 + linux-3.4.2.tar.sign | 17 +++++++++++++++++ version-pvops | 2 +- 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 linux-3.4.2.tar.bz2.sha1sum create mode 100644 linux-3.4.2.tar.sign diff --git a/linux-3.4.2.tar.bz2.sha1sum b/linux-3.4.2.tar.bz2.sha1sum new file mode 100644 index 0000000..32f6624 --- /dev/null +++ b/linux-3.4.2.tar.bz2.sha1sum @@ -0,0 +1 @@ +326d0ae0c438454283a9230eb65c074920d1b6b1 linux-3.4.2.tar.bz2 diff --git a/linux-3.4.2.tar.sign b/linux-3.4.2.tar.sign new file mode 100644 index 0000000..3a08eb1 --- /dev/null +++ b/linux-3.4.2.tar.sign @@ -0,0 +1,17 @@ +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v2.0.18 (GNU/Linux) + +iQIcBAABAgAGBQJP022nAAoJEDjbvchgkmk+VdsQAJKaQhijelIyQ7n9YHKXXBxI +55iGvqzlFgo555t+FoUuFr30lOCS0+oLDX/v+tnfFASFx4OgqVRV0hnZS+qfE+0V +RmJGqTK7mxxl24Tz9bEKtMJyfl16yAexBAJm859TdP0D6rNTcpf/GqKV35B/8Wvx +cXCbXgD3Eh1/osef1j68lwmujXhJxOxxkjlH2dzrZgLHJmE2yof7GzgS0JXfKTpF +YouTJPGlCOQathZmxR7vMZP1mCdAI2zqm1jSwqhIiospDQ0noIVBD1WxVyccvg2q +Ef0mW+eCYETrJtG54bjKccjfzsLWov6ECVsz7KwvGwymT0CrumgTJnsW1s2pPiJD +7PoMGyFnMdrCa4PVsoU04Sey2iw8a43GK4bOFLen54Gx+njwcOrKnqYsj3I6JhhY +rlqiZo56in1wI4IYAynQjQ1AkSlGoJNM0nh4kzDon6vC00WBVwuNmVsUEBdqg/1A +PlbDaxTjQ0UZbneOvZZC3BVBs2D1+XvHoOSPOvKtjnrEBMnkvVCUGp9yNDzGaY1G +YIiShlFsMbnHrqVE5zuquqMlPj7hcvBta8yhvL66x6fssbD2awnzICpxx9xm69s5 +oiNy22epZ0+DIKC7puD5e7bAb6fk+ZpnJmlivenWv0ihnlDapU230zQgnwCAxtG7 +YzIiWqXXts9yvIubdvvD +=iHjT +-----END PGP SIGNATURE----- diff --git a/version-pvops b/version-pvops index 47b322c..4d9d11c 100644 --- a/version-pvops +++ b/version-pvops @@ -1 +1 @@ -3.4.1 +3.4.2 From b5606021c954a99f847f7148854c7daacc2a8f58 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 15 Jun 2012 13:19:44 +0200 Subject: [PATCH 19/56] vm-initramfs: use sysfs directly to get block device parameters Don't use blkdev program, as isn't installed by default by dracut in FC17. Actually it isn't necessary to get 'ro' and 'size' parameters, as they can be read directly from sysfs. --- .../lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh b/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh index fa1616f..19bb9d3 100755 --- a/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh +++ b/vm-initramfs/lib/dracut/hooks/pre-udev/90_qubes_cow_setup.sh @@ -14,18 +14,18 @@ modprobe xenblk || modprobe xen-blkfront || echo "Qubes: Cannot load Xen Block F echo "Waiting for /dev/xvda* devices..." while ! [ -e /dev/xvda ]; do sleep 0.1; done -if [ `blockdev --getro /dev/xvda` = 1 ] ; then +if [ `cat /sys/block/xvda/ro` = 1 ] ; then echo "Qubes: Doing COW setup for AppVM..." while ! [ -e /dev/xvdc ]; do sleep 0.1; done while ! [ -e /dev/xvdc2 ]; do sleep 0.1; done - echo "0 `blockdev --getsz /dev/xvda` snapshot /dev/xvda /dev/xvdc2 N 16" | \ + echo "0 `cat /sys/block/xvda/size` snapshot /dev/xvda /dev/xvdc2 N 16" | \ dmsetup create dmroot || { echo "Qubes: FATAL: cannot create dmroot!"; } echo Qubes: done. else echo "Qubes: Doing R/W setup for TemplateVM..." - echo "0 `blockdev --getsz /dev/xvda` linear /dev/xvda 0" | \ + echo "0 `cat /sys/block/xvda/size` linear /dev/xvda 0" | \ dmsetup create dmroot || { echo "Qubes: FATAL: cannot create dmroot!"; exit 1; } echo Qubes: done. fi From 394e2aaef1fd24daff895ddf08bdac754c2cbfe8 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 23 Jun 2012 20:11:13 +0200 Subject: [PATCH 20/56] pvops: fix for GPU performance Details here: http://groups.google.com/group/qubes-devel/t/33256d29a29fa883 --- ...-xen-pat-Disable-PAT-support-for-now.patch | 72 +++++++ ...attrs-instead-of-pte_flags-on-CPA-se.patch | 196 ++++++++++++++++++ series-pvops.conf | 4 + 3 files changed, 272 insertions(+) create mode 100644 patches.xen/pvops-3.4-Revert-xen-pat-Disable-PAT-support-for-now.patch create mode 100644 patches.xen/pvops-3.4-x86-cpa-Use-pte_attrs-instead-of-pte_flags-on-CPA-se.patch diff --git a/patches.xen/pvops-3.4-Revert-xen-pat-Disable-PAT-support-for-now.patch b/patches.xen/pvops-3.4-Revert-xen-pat-Disable-PAT-support-for-now.patch new file mode 100644 index 0000000..52a33e2 --- /dev/null +++ b/patches.xen/pvops-3.4-Revert-xen-pat-Disable-PAT-support-for-now.patch @@ -0,0 +1,72 @@ +From 433928d3823f561919ead305194e46e5311b573d Mon Sep 17 00:00:00 2001 +From: Marek Marczykowski +Date: Sat, 23 Jun 2012 19:50:44 +0200 +Subject: [PATCH 1/2] Revert "xen/pat: Disable PAT support for now." +Organization: Invisible Things Lab + +This reverts commit 8eaffa67b43e99ae581622c5133e20b0f48bcef1. + +We haven't observed failure which is workarounded by this patch, but it caused +horrible GPU performance. Anyway there is "nopat" option. + +Signed-off-by: Marek Marczykowski +--- + arch/x86/xen/enlighten.c | 2 -- + arch/x86/xen/mmu.c | 8 ++++---- + 2 files changed, 4 insertions(+), 6 deletions(-) + +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index 6c7f1e8..bf3319c 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -1269,9 +1269,7 @@ asmlinkage void __init xen_start_kernel(void) + + /* Prevent unwanted bits from being set in PTEs. */ + __supported_pte_mask &= ~_PAGE_GLOBAL; +-#if 0 + if (!xen_initial_domain()) +-#endif + __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); + + __supported_pte_mask |= _PAGE_IOMAP; +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c +index 69f5857..a5d252a 100644 +--- a/arch/x86/xen/mmu.c ++++ b/arch/x86/xen/mmu.c +@@ -420,13 +420,13 @@ static pteval_t iomap_pte(pteval_t val) + static pteval_t xen_pte_val(pte_t pte) + { + pteval_t pteval = pte.pte; +-#if 0 ++ + /* If this is a WC pte, convert back from Xen WC to Linux WC */ + if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { + WARN_ON(!pat_enabled); + pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; + } +-#endif ++ + if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) + return pteval; + +@@ -468,7 +468,7 @@ void xen_set_pat(u64 pat) + static pte_t xen_make_pte(pteval_t pte) + { + phys_addr_t addr = (pte & PTE_PFN_MASK); +-#if 0 ++ + /* If Linux is trying to set a WC pte, then map to the Xen WC. + * If _PAGE_PAT is set, then it probably means it is really + * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope +@@ -481,7 +481,7 @@ static pte_t xen_make_pte(pteval_t pte) + if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) + pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; + } +-#endif ++ + /* + * Unprivileged domains are allowed to do IOMAPpings for + * PCI passthrough, but not map ISA space. The ISA +-- +1.7.4.4 + diff --git a/patches.xen/pvops-3.4-x86-cpa-Use-pte_attrs-instead-of-pte_flags-on-CPA-se.patch b/patches.xen/pvops-3.4-x86-cpa-Use-pte_attrs-instead-of-pte_flags-on-CPA-se.patch new file mode 100644 index 0000000..52cb5eb --- /dev/null +++ b/patches.xen/pvops-3.4-x86-cpa-Use-pte_attrs-instead-of-pte_flags-on-CPA-se.patch @@ -0,0 +1,196 @@ +From f37a97dead89d07bce4d8fedc4c295c9bc700ab5 Mon Sep 17 00:00:00 2001 +From: Konrad Rzeszutek Wilk +Date: Fri, 4 Nov 2011 11:59:34 -0400 +Subject: [PATCH 2/2] x86/cpa: Use pte_attrs instead of pte_flags on + CPA/set_p.._wb/wc operations. + +When using the paravirt interface, most of the page operations are wrapped +in the pvops interface. The one that is not is the pte_flags. The reason +being that for most cases, the "raw" PTE flag values for baremetal and whatever +pvops platform is running (in this case) - share the same bit meaning. + +Except for PAT. Under Linux, the PAT MSR is written to be: + + PAT4 PAT0 ++---+----+----+----+-----+----+----+ + WC | WC | WB | UC | UC- | WC | WB | <= Linux ++---+----+----+----+-----+----+----+ + WC | WT | WB | UC | UC- | WT | WB | <= BIOS ++---+----+----+----+-----+----+----+ + WC | WP | WC | UC | UC- | WT | WB | <= Xen ++---+----+----+----+-----+----+----+ + +The lookup of this index table translates to looking up +Bit 7, Bit 4, and Bit 3 of PTE: + + PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3). + +If all bits are off, then we are using PAT0. If bit 3 turned on, +then we are using PAT1, if bit 3 and bit 4, then PAT2.. + +Back to the PAT MSR table: + +As you can see, the PAT1 translates to PAT4 under Xen. Under Linux +we only use PAT0, PAT1, and PAT2 for the caching as: + + WB = none (so PAT0) + WC = PWT (bit 3 on) + UC = PWT | PCD (bit 3 and 4 are on). + +But to make it work with Xen, we end up doing for WC a translation: + + PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3 + +And to translate back (when the paravirt pte_val is used) we would: + + PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7. + +This works quite well, except if code uses the pte_flags, as pte_flags +reads the raw value and does not go through the paravirt. Which means +that if (when running under Xen): + + 1) we allocate some pages. + 2) call set_pages_array_wc, which ends up calling: + __page_change_att_set_clr(.., __pgprot(__PAGE_WC), /* set */ + , __pgprot(__PAGE_MASK), /* clear */ + which ends up reading the _raw_ PTE flags and _only_ look at the + _PTE_FLAG_MASK contents with __PAGE_MASK cleared (0x18) and + __PAGE_WC (0x8) set. + + read raw *pte -> 0x67 + *pte = 0x67 & ^0x18 | 0x8 + *pte = 0x67 & 0xfffffe7 | 0x8 + *pte = 0x6f + + [now set_pte_atomic is called, and 0x6f is written in, but under + xen_make_pte, the bit 3 is translated to bit 7, so it ends up + writting 0xa7, which is correct] + + 3) do something to them. + 4) call set_pages_array_wb + __page_change_att_set_clr(.., __pgprot(__PAGE_WB), /* set */ + , __pgprot(__PAGE_MASK), /* clear */ + which ends up reading the _raw_ PTE and _only_ look at the + _PTE_FLAG_MASK contents with _PAGE_MASK cleared (0x18) and + __PAGE_WB (0x0) set: + + read raw *pte -> 0xa7 + *pte = 0xa7 & &0x18 | 0 + *pte = 0xa7 & 0xfffffe7 | 0 + *pte = 0xa7 + + [we check whether the old PTE is different from the new one + + if (pte_val(old_pte) != pte_val(new_pte)) { + set_pte_atomic(kpte, new_pte); + ... + + and find out that 0xA7 == 0xA7 so we do not write the new PTE value in] + + End result is that we failed at removing the WC caching bit! + + 5) free them. + [and have pages with PAT4 (bit 7) set, so other subsystems end up using + the pages that have the write combined bit set resulting in crashes. Yikes!]. + +The fix, which this patch proposes, is to wrap the pte_pgprot in the CPA +code with newly introduced pte_attrs which can go through the pvops interface +to get the "emulated" value instead of the raw. Naturally if CONFIG_PARAVIRT is +not set, it would end calling native_pte_val. + +The other way to fix this is by wrapping pte_flags and go through the pvops +interface and it really is the Right Thing to do. The problem is, that past +experience with mprotect stuff demonstrates that it be really expensive in inner +loops, and pte_flags() is used in some very perf-critical areas. + +Example code to run this and see the various mysterious subsystems/applications +crashing + +MODULE_AUTHOR("Konrad Rzeszutek Wilk "); +MODULE_DESCRIPTION("wb_to_wc_and_back"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(WB_TO_WC); + +static int thread(void *arg) +{ + struct page *a[MAX_PAGES]; + unsigned int i, j; + do { + for (j = 0, i = 0;i < MAX_PAGES; i++, j++) { + a[i] = alloc_page(GFP_KERNEL); + if (!a[i]) + break; + } + set_pages_array_wc(a, j); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout_interruptible(HZ); + for (i = 0; i < j; i++) { + unsigned long *addr = page_address(a[i]); + if (addr) { + memset(addr, 0xc2, PAGE_SIZE); + } + } + set_pages_array_wb(a, j); + for (i = 0; i< MAX_PAGES; i++) { + if (a[i]) + __free_page(a[i]); + a[i] = NULL; + } + } while (!kthread_should_stop()); + return 0; +} +static struct task_struct *t; +static int __init wb_to_wc_init(void) +{ + t = kthread_run(thread, NULL, "wb_to_wc_and_back"); + return 0; +} +static void __exit wb_to_wc_exit(void) +{ + if (t) + kthread_stop(t); +} +module_init(wb_to_wc_init); +module_exit(wb_to_wc_exit); + +This fixes RH BZ #742032, #787403, and #745574 +Signed-off-by: Konrad Rzeszutek Wilk +Tested-by: Tom Goetz +CC: stable@kernel.org +--- + arch/x86/include/asm/pgtable.h | 5 +++++ + arch/x86/mm/pageattr.c | 2 +- + 2 files changed, 6 insertions(+), 1 deletions(-) + +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index 49afb3f..fa7bd2c 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -349,6 +349,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) + return __pgprot(preservebits | addbits); + } + ++static inline pgprot_t pte_attrs(pte_t pte) ++{ ++ return __pgprot(pte_val(pte) & PTE_FLAGS_MASK); ++} ++ + #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) + + #define canon_pgprot(p) __pgprot(massage_pgprot(p)) +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index e1ebde3..1ae1b4b 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -651,7 +651,7 @@ repeat: + + if (level == PG_LEVEL_4K) { + pte_t new_pte; +- pgprot_t new_prot = pte_pgprot(old_pte); ++ pgprot_t new_prot = pte_attrs(old_pte); + unsigned long pfn = pte_pfn(old_pte); + + pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); +-- +1.7.4.4 + diff --git a/series-pvops.conf b/series-pvops.conf index ecaa364..6aa9887 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -11,6 +11,10 @@ patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch +# fix for GPU performance (revert workaround and apply proper fix), should go in 3.5 +patches.xen/pvops-3.4-Revert-xen-pat-Disable-PAT-support-for-now.patch +patches.xen/pvops-3.4-x86-cpa-Use-pte_attrs-instead-of-pte_flags-on-CPA-se.patch + # Additional features patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch patches.xen/pvops-blkfront-removable-flag.patch From dd4e419e25bc234a3213918e01050dc8b9cf9fbd Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 23 Jun 2012 20:23:34 +0200 Subject: [PATCH 21/56] pvops: version 3.4.4 One patch already in upstream, so remove it here. --- linux-3.4.4.tar.bz2.sha1sum | 1 + ...-filter-APERFMPERF-cpuid-feature-out.patch | 56 ------------------- series-pvops.conf | 1 - version-pvops | 2 +- 4 files changed, 2 insertions(+), 58 deletions(-) create mode 100644 linux-3.4.4.tar.bz2.sha1sum delete mode 100644 patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch diff --git a/linux-3.4.4.tar.bz2.sha1sum b/linux-3.4.4.tar.bz2.sha1sum new file mode 100644 index 0000000..eae2cdd --- /dev/null +++ b/linux-3.4.4.tar.bz2.sha1sum @@ -0,0 +1 @@ +edf887dd7f5805345da38bd54ae01dd5da757894 linux-3.4.4.tar.bz2 diff --git a/patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch b/patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch deleted file mode 100644 index 106ff6b..0000000 --- a/patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 5e626254206a709c6e937f3dda69bf26c7344f6f Mon Sep 17 00:00:00 2001 -From: Andre Przywara -Date: Tue, 29 May 2012 13:07:31 +0200 -Subject: [PATCH] xen/setup: filter APERFMPERF cpuid feature out - -Xen PV kernels allow access to the APERF/MPERF registers to read the -effective frequency. Access to the MSRs is however redirected to the -currently scheduled physical CPU, making consecutive read and -compares unreliable. In addition each rdmsr traps into the hypervisor. -So to avoid bogus readouts and expensive traps, disable the kernel -internal feature flag for APERF/MPERF if running under Xen. -This will -a) remove the aperfmperf flag from /proc/cpuinfo -b) not mislead the power scheduler (arch/x86/kernel/cpu/sched.c) to - use the feature to improve scheduling (by default disabled) -c) not mislead the cpufreq driver to use the MSRs - -This does not cover userland programs which access the MSRs via the -device file interface, but this will be addressed separately. - -Signed-off-by: Andre Przywara -Cc: stable@vger.kernel.org # v3.0+ -Signed-off-by: Konrad Rzeszutek Wilk ---- - arch/x86/xen/enlighten.c | 8 ++++++++ - 1 files changed, 8 insertions(+), 0 deletions(-) - -diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c -index d1f9a04..272ebd0 100644 ---- a/arch/x86/xen/enlighten.c -+++ b/arch/x86/xen/enlighten.c -@@ -208,6 +208,9 @@ static void __init xen_banner(void) - xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); - } - -+#define CPUID_THERM_POWER_LEAF 6 -+#define APERFMPERF_PRESENT 0 -+ - static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; - static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; - -@@ -241,6 +244,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, - *dx = cpuid_leaf5_edx_val; - return; - -+ case CPUID_THERM_POWER_LEAF: -+ /* Disabling APERFMPERF for kernel usage */ -+ maskecx = ~(1 << APERFMPERF_PRESENT); -+ break; -+ - case 0xb: - /* Suppress extended topology stuff */ - maskebx = 0; --- -1.7.4.4 - diff --git a/series-pvops.conf b/series-pvops.conf index 6aa9887..f4df209 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -7,7 +7,6 @@ patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel. patches.xen/pvops-xen-blkfront-module-exit-handling-adjustments.patch patches.xen/pvops-xen-blkback-Copy-id-field-when-doing-BLKIF_DISCARD.patch patches.xen/pvops-xen-blkfront-Add-WARN-to-deal-with-misbehaving-backe.patch -patches.xen/pvops-xen-setup-filter-APERFMPERF-cpuid-feature-out.patch patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch diff --git a/version-pvops b/version-pvops index 4d9d11c..f989260 100644 --- a/version-pvops +++ b/version-pvops @@ -1 +1 @@ -3.4.2 +3.4.4 From 2411f1bba2780aba5e68bfa0b2a4aab3ce473b01 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 7 Jul 2012 02:09:38 +0200 Subject: [PATCH 22/56] spec: add flavor-dependent provides (#581) --- kernel.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel.spec b/kernel.spec index ab8fc44..ff437e8 100644 --- a/kernel.spec +++ b/kernel.spec @@ -42,6 +42,7 @@ Provides: %name = %version-%kernelrelease Provides: kernel-xen-dom0 Provides: kernel-qubes-dom0 +Provides: kernel-qubes-dom0-%{build_flavor} Provides: kernel-drm-nouveau = 16 Requires: xen >= 3.4.3 From e45b11c7736d4332fc75104f4777ccf5c7f61d22 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 15 Jul 2012 19:57:47 +0200 Subject: [PATCH 23/56] pvops/xen-blkfront: handle FDEJECT as detach request (#630) --- .../pvops-blkfront-eject-support.patch | 20 +++++++++++++++++++ series-pvops.conf | 1 + 2 files changed, 21 insertions(+) create mode 100644 patches.xen/pvops-blkfront-eject-support.patch diff --git a/patches.xen/pvops-blkfront-eject-support.patch b/patches.xen/pvops-blkfront-eject-support.patch new file mode 100644 index 0000000..e304ed6 --- /dev/null +++ b/patches.xen/pvops-blkfront-eject-support.patch @@ -0,0 +1,20 @@ +--- linux-3.4.1.orig/drivers/block/xen-blkfront.c 2012-06-01 09:18:44.000000000 +0200 ++++ linux-3.4.1/drivers/block/xen-blkfront.c 2012-07-15 15:54:31.350255623 +0200 +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -241,6 +264,9 @@ + return 0; + return -EINVAL; + } ++ case FDEJECT: ++ xenbus_switch_state(info->xbdev, XenbusStateClosing); ++ return 0; + + default: + /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", diff --git a/series-pvops.conf b/series-pvops.conf index f4df209..f143a53 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -17,3 +17,4 @@ patches.xen/pvops-3.4-x86-cpa-Use-pte_attrs-instead-of-pte_flags-on-CPA-se.patch # Additional features patches.xen/pvops-3.4-0100-usb-xen-pvusb-driver.patch patches.xen/pvops-blkfront-removable-flag.patch +patches.xen/pvops-blkfront-eject-support.patch From e9371ef60d2f3d636c237cd24f6a496252913818 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 24 Sep 2012 16:10:24 +0200 Subject: [PATCH 24/56] gitignore (.sign files) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index d8b1164..6e59496 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ linux-*.tar.bz2 +linux-*.sign kernel-*/ From e241f60e7a8238c727727877c1d589ae141e7472 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 24 Sep 2012 16:10:40 +0200 Subject: [PATCH 25/56] pvops: linux 3.4.11 --- config-pvops | 1 + linux-3.4.11.tar.bz2.sha1sum | 1 + series-pvops.conf | 1 - version-pvops | 2 +- 4 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 linux-3.4.11.tar.bz2.sha1sum diff --git a/config-pvops b/config-pvops index e226bcf..ca0bcca 100644 --- a/config-pvops +++ b/config-pvops @@ -3007,6 +3007,7 @@ CONFIG_I2C_SCMI=m # # I2C system bus drivers (mostly embedded / system-on-chip) # +CONFIG_I2C_DESIGNWARE_CORE=m CONFIG_I2C_DESIGNWARE_PCI=m CONFIG_I2C_EG20T=m CONFIG_I2C_GPIO=m diff --git a/linux-3.4.11.tar.bz2.sha1sum b/linux-3.4.11.tar.bz2.sha1sum new file mode 100644 index 0000000..45e1fc6 --- /dev/null +++ b/linux-3.4.11.tar.bz2.sha1sum @@ -0,0 +1 @@ +af77cf477f8943046e820c265c24ec4de7711cf2 linux-3.4.11.tar.bz2 diff --git a/series-pvops.conf b/series-pvops.conf index f143a53..1759224 100644 --- a/series-pvops.conf +++ b/series-pvops.conf @@ -5,7 +5,6 @@ patches.xen/pvops-3.4-0003-xen-acpi-sleep-Register-to-the-acpi_suspend_lowlevel. # Fixes which should will go in 3.5 patches.xen/pvops-xen-blkfront-module-exit-handling-adjustments.patch -patches.xen/pvops-xen-blkback-Copy-id-field-when-doing-BLKIF_DISCARD.patch patches.xen/pvops-xen-blkfront-Add-WARN-to-deal-with-misbehaving-backe.patch patches.xen/pvops-3.4-enable-netfront-in-dom0.patch patches.xen/pvops-netback-calculate-correctly-the-SKB-slots.patch diff --git a/version-pvops b/version-pvops index f989260..0ce07b7 100644 --- a/version-pvops +++ b/version-pvops @@ -1 +1 @@ -3.4.4 +3.4.11 From 6148e5d1dabb5886f633cf25bf906db8a74f20c0 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 4 Oct 2012 04:29:45 +0200 Subject: [PATCH 26/56] pvops: enable CONFIG_PREEMPT_VOLUNTARY This should improve responsiveness of system. --- config-pvops | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config-pvops b/config-pvops index ca0bcca..13f7742 100644 --- a/config-pvops +++ b/config-pvops @@ -365,8 +365,8 @@ CONFIG_NR_CPUS=512 # CONFIG_SCHED_SMT is not set CONFIG_SCHED_MC=y # CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y From fa53c91147abb8f5c45dfa85f7e86142ad940d6d Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 27 Sep 2012 03:08:59 +0200 Subject: [PATCH 27/56] makefile: do not require sha1sum when signature available --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 397e71a..8a9ed62 100644 --- a/Makefile +++ b/Makefile @@ -69,8 +69,8 @@ else # The key has been compromised # and kernel.org decided not to release signature # with a new key... oh, well... -endif sha1sum -c ${HASH_FILE} +endif .PHONY: clean-sources clean-sources: From 4e13372d7ebb7801faf2da884ae9fc66fa562c8b Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 23 Sep 2012 23:42:48 +0200 Subject: [PATCH 28/56] spec: include kernel-specific firmware dir in initramfs --- kernel.spec | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel.spec b/kernel.spec index ff437e8..cffb4fc 100644 --- a/kernel.spec +++ b/kernel.spec @@ -315,6 +315,11 @@ if [ "%{build_flavor}" == "pvops" ]; then mkdir -p %buildroot/lib/firmware mv %buildroot/lib/firmware-all %buildroot/lib/firmware/%kernelrelease fi + +# Include firmware in initramfs +mkdir -p %buildroot/etc/dracut.conf.d +echo "fw_dir+=\"/lib/firmware/%kernelrelease\"" > %buildroot/etc/dracut.conf.d/firmware-%kernelrelease.conf + # Prepare initramfs for Qubes VM mkdir -p %buildroot/%vm_install_dir /sbin/dracut --nomdadmconf --nolvmconf \ @@ -372,6 +377,7 @@ fi %attr(0644, root, root) /boot/vmlinuz-%{kernelrelease} /lib/firmware/%{kernelrelease} /lib/modules/%{kernelrelease} +/etc/dracut.conf.d/firmware-%kernelrelease.conf %package devel Summary: Development files necessary for building kernel modules From 4f53ec6a3c8b31670c56bc3396336c9848b5b955 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 24 Jul 2012 23:59:25 +0200 Subject: [PATCH 29/56] spec: fix default xen console parameter (#644) --- kernel.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel.spec b/kernel.spec index cffb4fc..79a0809 100644 --- a/kernel.spec +++ b/kernel.spec @@ -353,7 +353,7 @@ INITRD_OPT="--mkinitrd --dracut" /sbin/new-kernel-pkg --package %{name}-%{kernelrelease}\ $INITRD_OPT \ --depmod --kernel-args="max_loop=255"\ - --multiboot=/boot/xen.gz --mbargs="console=com1" \ + --multiboot=/boot/xen.gz --mbargs="console=none" \ --banner="Qubes"\ --make-default --install %{kernelrelease} From 66dc83ceb2cbcb36cbc31802862508cc626cff86 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 4 Oct 2012 05:47:45 +0200 Subject: [PATCH 30/56] pvops: linux 3.4.12 --- version-pvops | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version-pvops b/version-pvops index 0ce07b7..25e8632 100644 --- a/version-pvops +++ b/version-pvops @@ -1 +1 @@ -3.4.11 +3.4.12 From cd7dc1f7dda4a8ea7a8e7a73da9eb98a99ccdde3 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 4 Oct 2012 08:02:52 +0200 Subject: [PATCH 31/56] spec: fix firmware config syntax --- kernel.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel.spec b/kernel.spec index 79a0809..ee1ba5b 100644 --- a/kernel.spec +++ b/kernel.spec @@ -318,7 +318,7 @@ fi # Include firmware in initramfs mkdir -p %buildroot/etc/dracut.conf.d -echo "fw_dir+=\"/lib/firmware/%kernelrelease\"" > %buildroot/etc/dracut.conf.d/firmware-%kernelrelease.conf +echo 'fw_dir="$fw_dir:/lib/firmware/%kernelrelease"' > %buildroot/etc/dracut.conf.d/firmware-%kernelrelease.conf # Prepare initramfs for Qubes VM mkdir -p %buildroot/%vm_install_dir From 9ceaf53181212a9a64a1fe4b6a461443ec7fa5f8 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 4 Oct 2012 08:03:55 +0200 Subject: [PATCH 32/56] pvops: linux 3.4.12-2 --- rel-pvops | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rel-pvops b/rel-pvops index d00491f..0cfbf08 100644 --- a/rel-pvops +++ b/rel-pvops @@ -1 +1 @@ -1 +2 From 0e068253ac6a2993ba6e591709518b65220fd1ee Mon Sep 17 00:00:00 2001 From: Bruce A Downs Date: Mon, 8 Oct 2012 16:26:33 -0600 Subject: [PATCH 33/56] kernel: Turn on time-stamping (-N) for wget when getting kernel sources wget downloads new linux-*.sign files and backs up the existing files as file.1, file.2, etc. This causes false positives during 'git status' or 'make check' i.e. * linux-2.6.38.3.tar.bz2.sign.1 * linux-3.2.7.tar.sign.1 Adding -N causes wget to smartly overwrite as required. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 8a9ed62..1c6b3e0 100644 --- a/Makefile +++ b/Makefile @@ -54,8 +54,8 @@ get-sources: $(SRC_FILE) $(SRC_FILE): @echo -n "Downloading $(URL)... " - @wget -q $(URL) - @wget -q $(URL_SIGN) + @wget -q -N $(URL) + @wget -q -N $(URL_SIGN) @echo "OK." import-keys: From 2d04cac5e5b25938e0dc446a12b2c6162afeca38 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 7 Nov 2012 18:36:00 +0100 Subject: [PATCH 34/56] makefile: make sure that sign file is downloaded --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1c6b3e0..935dafd 100644 --- a/Makefile +++ b/Makefile @@ -50,11 +50,15 @@ HASH_FILE :=${SRC_FILE}.sha1sum URL := $(SRC_BASEURL)/$(SRC_FILE) URL_SIGN := $(SRC_BASEURL)/$(SIGN_FILE) -get-sources: $(SRC_FILE) +get-sources: $(SRC_FILE) $(SIGN_FILE) $(SRC_FILE): @echo -n "Downloading $(URL)... " @wget -q -N $(URL) + @echo "OK." + +$(SIGN_FILE): + @echo -n "Downloading $(URL_SIGN)... " @wget -q -N $(URL_SIGN) @echo "OK." From 8a1ac2f7a9709e17ea6194505f5378fc02cd4a7f Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 7 Nov 2012 18:35:50 +0100 Subject: [PATCH 35/56] Add buils-deps file (#666) --- build-deps.list | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 build-deps.list diff --git a/build-deps.list b/build-deps.list new file mode 100644 index 0000000..a093065 --- /dev/null +++ b/build-deps.list @@ -0,0 +1,4 @@ +sparse +tar +qubes-core-vm-devel-*DIST* +dracut From 16520270d4e05e2d5e00d1c571e142287b48f21f Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 8 Nov 2012 01:59:04 +0100 Subject: [PATCH 36/56] nuke xenlinux kernel files We've completly migrated to upstream kernel with pvops xen support. --- Makefile | 4 +- config-xenlinux | 5371 -- linux-2.6.38.3.tar.bz2.sha1sum | 1 - linux-2.6.38.3.tar.bz2.sign | 8 - ...ckward-compatibility-with-broken-userspace | 64 - ...armor-compatibility-patch-for-v5-interface | 379 - ...compatibility-patch-for-v5-network-control | 518 - ...ioapic-Fix-potential-resume-deadlock.patch | 50 - patches.arch/acpi-export-hotplug_execute | 24 - ...rovide_non_interrupt_mode_boot_param.patch | 67 - .../acpi_fix_fadt_32_bit_zero_length.patch | 30 - patches.arch/acpi_srat-pxm-rev-ia64.patch | 59 - patches.arch/acpi_srat-pxm-rev-store.patch | 52 - patches.arch/acpi_srat-pxm-rev-x86-64.patch | 42 - .../acpi_thermal_passive_blacklist.patch | 125 - ...introduce_acpi_root_table_boot_param.patch | 118 - patches.arch/i386-unwind-annotations | 15 - patches.arch/ia64-page-migration | 603 - patches.arch/ia64-page-migration.fix | 159 - patches.arch/kmsg-fix-parameter-limitations | 54 - ...nly-export-selected-pv-ops-feature-structs | 139 - ...place-kvm-io-delay-pv-ops-with-linux-magic | 80 - .../kvm-split-paravirt-ops-by-functionality | 728 - ...vm-split-the-KVM-pv-ops-support-by-feature | 125 - patches.arch/mm-avoid-bad-page-on-lru | 148 - .../perf_timechart_fix_zero_timestamps.patch | 32 - .../ppc-ipic-suspend-without-83xx-fix | 33 - .../ppc-pegasos-console-autodetection.patch | 19 - patches.arch/ppc-prom-nodisplay.patch | 77 - patches.arch/ppc64-xmon-dmesg-printing.patch | 119 - patches.arch/s390-add-FREE_PTE_NR | 43 - patches.arch/s390-message-catalog-fix.diff | 23 - patches.arch/s390-message-catalog.diff | 8630 --- ...apic-force-bigsmp-apic-on-IBM-EXA3-4.patch | 87 - patches.arch/x86-hpet-pre-read | 26 - patches.arch/x86-mcp51-no-dac | 38 - patches.arch/x86_64-hpet-64bit-timer.patch | 223 - patches.arch/x86_64-unwind-annotations | 439 - .../x86_agpgart-g33-stoeln-fix-2.patch | 74 - ...ze-the-output-registers-after-resume.patch | 126 - ...CPI-OpRegion-to-determine-lid-status.patch | 64 - ...nable-plane-pipe-and-PLL-prematurely.patch | 35 - ...-pipe-plane-enable-disable-functions.patch | 429 - ...sa-asihpi-check-adapter-index-in-hpi_ioctl | 35 - ...a-hda-0018-Fix-pin-config-of-Gigabyte-mobo | 96 - ...alsa-hda-0019-Increase-default-buffer-size | 22 - patches.drivers/bnx2-entropy-source.patch | 40 - ...y-default-to-avoid-problems-with-eeh.patch | 45 - patches.drivers/e1000-entropy-source.patch | 47 - patches.drivers/e1000e-entropy-source.patch | 86 - patches.drivers/ehea-modinfo.patch | 43 - patches.drivers/elousb.patch | 380 - patches.drivers/igb-entropy-source.patch | 70 - .../input-Add-LED-support-to-Synaptics-device | 218 - patches.drivers/ixgbe-entropy-source.patch | 90 - patches.drivers/libata-unlock-hpa-by-default | 24 - patches.drivers/megaraid-mbox-fix-SG_IO | 70 - patches.drivers/mpt-fusion-4.22.00.00-update | 18610 ----- patches.drivers/ppc64-adb | 53 - ...4xxx-5.01.00-k9-5.01.00.00.11.01-k10.patch | 2809 - ...ptop-add-support-for-lots-of-laptops.patch | 707 - ...5785-and-57780-asic-revs-not-working.patch | 193 - patches.drivers/tg3-entropy-source.patch | 61 - ..._sys_access_user_space_with_get_user.patch | 78 - patches.fixes/aggressive-zone-reclaim.patch | 67 - .../bonding-Incorrect-TX-queue-offset.patch | 61 - patches.fixes/bridge-module-get-put.patch | 45 - ...dc-phonet-handle-empty-phonet-header.patch | 74 - ...erformance_optimise_default_settings.patch | 65 - patches.fixes/dm-mpath-reattach-dh | 29 - .../dm-release-map_lock-before-set_disk_ro | 37 - patches.fixes/dm-table-switch-to-readonly | 90 - patches.fixes/fix-nf_conntrack_slp | 63 - ...cop-fix-registering-braindead-stupid-names | 43 - ...uid-partition-tables-can-cause-kernel-oops | 54 - patches.fixes/grab-swap-token-oops | 30 - .../hfs-avoid-crash-in-hfs_bnode_create | 30 - ...for-Skycable-0x3f07-wireless-present.patch | 102 - ...HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch | 44 - patches.fixes/ia64-sparse-fixes.diff | 53 - .../input-add-acer-aspire-5710-to-nomux.patch | 30 - ...wer-configuration-on-3945-and-4965-devices | 60 - patches.fixes/kvm-ioapic.patch | 21 - patches.fixes/kvm-macos.patch | 73 - ...a-video-sn9c102-world-wirtable-sysfs-files | 37 - ...sdn-add-support-for-group-membership-check | 69 - ...ilter-implement-rfc-1123-for-ftp-conntrack | 104 - patches.fixes/nfs-acl-caching.diff | 46 - patches.fixes/nfs-adaptive-readdir-plus | 80 - patches.fixes/nfs-slot-table-alloc | 31 - ...ry-cache-lookups-that-return-ETIMEDO.patch | 127 - patches.fixes/novfs-LFS-initialization | 25 - patches.fixes/novfs-bdi-init.diff | 54 - patches.fixes/novfs-dentry-cache-limit.patch | 46 - patches.fixes/novfs-err_ptr-fix.diff | 34 - patches.fixes/novfs-fix-inode-uid | 34 - patches.fixes/novfs-fix-oops-in-scope-finding | 31 - patches.fixes/novfs-fragment-size-fix.patch | 26 - patches.fixes/novfs-incorrect-filesize-fix | 39 - patches.fixes/novfs-lindent | 8008 --- patches.fixes/novfs-overflow-fixes | 1720 - ...eturn-ENOTEMPTY-when-deleting-nonempty-dir | 37 - patches.fixes/novfs-truncate-fix | 58 - patches.fixes/novfs-unlink-oops | 36 - patches.fixes/novfs-xattr-errcode-cleanup | 40 - patches.fixes/novfs-xattr-errcode-cleanup2 | 32 - patches.fixes/novfs-xattr-memleak | 30 - patches.fixes/oom-warning | 30 - patches.fixes/oprofile_bios_ctr.patch | 103 - patches.fixes/parport-mutex | 42 - patches.fixes/proc-scsi-scsi-fix.diff | 110 - patches.fixes/ptrace-getsiginfo | 79 - ...ode-evictions-before-umount-to-avoid-crash | 60 - .../reiserfs-remove-2-tb-file-size-limit | 66 - patches.fixes/remount-no-shrink-dcache | 89 - patches.fixes/scsi-add-tgps-setting | 325 - ...94xx-world-writable-sysfs-update_bios-file | 26 - patches.fixes/scsi-check-host-lookup-failure | 29 - patches.fixes/scsi-dh-alua-retry-UA | 53 - patches.fixes/scsi-dh-alua-send-stpg | 33 - patches.fixes/scsi-dh-queuedata-accessors | 98 - patches.fixes/scsi-dh-rdac-add-stk | 25 - .../scsi-ibmvscsi-module_alias.patch | 39 - patches.fixes/scsi-ibmvscsi-show-config.patch | 80 - .../scsi-inquiry-too-short-ratelimit | 26 - .../scsi-retry-alua-transition-in-progress | 33 - patches.fixes/scsi-scan-blist-update | 26 - .../sd_liberal_28_sense_invalid.diff | 28 - patches.fixes/seccomp-disable-tsc-option | 56 - patches.fixes/tg3-fix-default-wol.patch | 43 - patches.fixes/tulip-quad-NIC-ifdown | 27 - ...bifs-restrict-world-writable-debugfs-files | 45 - .../xen-blkfront-connect-overflow.patch | 14 - .../xen-disable-cdrom-dbgprints.diff | 18 - patches.fixes/xencons_close_deadlock.patch | 15 - patches.kernel.org/patch-2.6.38.1 | 2788 - patches.kernel.org/patch-2.6.38.1-2 | 2543 - patches.kernel.org/patch-2.6.38.2-3 | 3905 - .../nuke_balloon_minimum_target.patch | 12 - patches.rpmify/buildhost | 37 - patches.rpmify/cloneconfig.diff | 41 - patches.rpmify/dw_spi-fix-PPC-build.patch | 37 - patches.rpmify/firmware-path | 26 - ...a-fix-cast-from-integer-to-pointer-warning | 35 - .../qla4xx-missing-readq-definition | 38 - patches.rpmify/rpm-kernel-config | 22 - patches.rpmify/split-package | 33 - ...ignment-from-incompatible-pointer-warnings | 23 - ...re-fine-grained-directory-permission.patch | 221 - ...-generic-IS_ACL-test-for-acl-support.patch | 73 - ...-IS_RICHACL-test-for-richacl-support.patch | 42 - ...y-representation-and-helper-function.patch | 415 - ...richacl-Permission-mapping-functions.patch | 167 - ...mpute-maximum-file-masks-from-an-acl.patch | 164 - ...chacl-Update-the-file-masks-in-chmod.patch | 79 - ...8-richacl-Permission-check-algorithm.patch | 130 - ...unctions-for-implementing-richacl-in.patch | 252 - ...0010-richacl-Create-time-inheritance.patch | 127 - ...-an-acl-is-equivalent-to-a-file-mode.patch | 79 - .../0012-richacl-Automatic-Inheritance.patch | 143 - ...hacl-Restrict-access-check-algorithm.patch | 52 - ...0014-richacl-xattr-mapping-functions.patch | 237 - ...IXACL-to-check-for-POSIX-ACL-support.patch | 156 - ...t4-Implement-richacl-support-in-ext4.patch | 690 - patches.suse/8250-sysrq-ctrl_o.patch | 135 - ...p-and-make-boot-splash-work-with-KMS.patch | 1500 - .../SoN-01-mm-setup_per_zone_wmarks.patch | 65 - patches.suse/SoN-02-doc.patch | 286 - .../SoN-03-mm-gfp-to-alloc_flags-expose.patch | 70 - patches.suse/SoN-04-page_alloc-reserve.patch | 43 - patches.suse/SoN-05-reserve-slub.patch | 425 - .../SoN-06-mm-kmem_estimate_pages.patch | 314 - .../SoN-07-mm-PF_MEMALLOC-softirq.patch | 83 - patches.suse/SoN-08-mm-page_alloc-emerg.patch | 219 - .../SoN-08a-mm-page_alloc-emerg.patch | 30 - .../SoN-09-global-ALLOC_NO_WATERMARKS.patch | 36 - .../SoN-10-mm-page_alloc-GFP_EMERGENCY.patch | 57 - patches.suse/SoN-11-mm-reserve.patch | 873 - .../SoN-12-mm-selinux-emergency.patch | 24 - patches.suse/SoN-13-net-ps_rx.patch | 184 - patches.suse/SoN-14-net-sk_allocation.patch | 156 - patches.suse/SoN-15-netvm-reserve.patch | 254 - patches.suse/SoN-16-netvm-reserve-inet.patch | 504 - patches.suse/SoN-16a-netvm-reserve-inet.patch | 71 - .../SoN-17-netvm-reserve-inet.patch-fix | 23 - .../SoN-18-netvm-skbuff-reserve.patch | 445 - patches.suse/SoN-19-netvm-sk_filter.patch | 28 - patches.suse/SoN-20-netvm-tcp-deadlock.patch | 118 - patches.suse/SoN-21-emergency-nf_queue.patch | 31 - patches.suse/SoN-22-netvm.patch | 183 - patches.suse/SoN-23-mm-swapfile.patch | 348 - .../SoN-24-mm-page_file_methods.patch | 112 - patches.suse/SoN-25-nfs-swapcache.patch | 292 - patches.suse/SoN-25a-nfs-swapcache.patch | 31 - patches.suse/SoN-25b-nfs-swapcache.patch | 29 - patches.suse/SoN-26-nfs-swapper.patch | 164 - patches.suse/SoN-27-nfs-swap_ops.patch | 318 - patches.suse/SoN-27a-nfs-swap_ops.patch | 46 - .../SoN-28-nfs-alloc-recursions.patch | 59 - patches.suse/SoN-29-fix-swap_sync_page-race | 58 - .../SoN-30-fix-uninitialized-var.patch | 40 - .../SoN-31-fix-null-pointer-dereference | 37 - ...-32-fix-kernel-bug-with-multiple-swapfiles | 45 - patches.suse/SoN-33-slab-leak-fix.patch | 45 - patches.suse/SoN-fix | 22 - .../acpi-don-t-preempt-until-the-system-is-up | 25 - .../acpi-dsdt-initrd-v0.9a-2.6.25.patch | 410 - ...i-generic-initramfs-table-override-support | 401 - patches.suse/acpi_osi_sle11_ident.patch | 29 - patches.suse/add-initramfs-file_read_write | 207 - patches.suse/audit-export-logging.patch | 46 - patches.suse/b43-missing-firmware-info.patch | 37 - patches.suse/bootsplash | 2872 - patches.suse/bootsplash-console-fix | 65 - patches.suse/bootsplash-keep-multiple-data | 321 - patches.suse/bootsplash-scaler | 1282 - patches.suse/connector-read-mostly | 23 - patches.suse/crasher-26.diff | 264 - patches.suse/dm-emulate-blkrrpart-ioctl | 51 - patches.suse/dm-mpath-accept-failed-paths | 224 - .../dm-mpath-detach-existing-hardware-handler | 59 - ...dm-mpath-evaluate-request-result-and-sense | 158 - .../dm-mpath-leastpending-path-update | 301 - .../dm-mpath-no-activate-for-offlined-paths | 85 - patches.suse/dm-mpath-no-partitions-feature | 67 - patches.suse/dm-mpath-null-pgs | 27 - patches.suse/dm-raid45-26-Nov-2009.patch | 5286 -- patches.suse/dm-raid45-api-update-no-barriers | 34 - ...update-remove-dm_put-after-dm_table_get_md | 45 - .../dmraid45-dm_dirty_log_create-api-fix | 25 - ...raid45-dm_get_device-takes-fewer-arguments | 26 - patches.suse/elousb-2.6.35-api-changes | 51 - patches.suse/export-release_open_intent | 23 - patches.suse/export-security_inode_permission | 21 - patches.suse/ext3-barrier-default | 77 - .../file-capabilities-disable-by-default.diff | 56 - patches.suse/files-slab-rcu.patch | 330 - patches.suse/genksyms-add-override-flag.diff | 116 - .../hung_task_timeout-configurable-default | 54 - ...-move-populate_rootfs-back-to-start_kernel | 111 - patches.suse/kbd-ignore-gfx.patch | 37 - patches.suse/kconfig-automate-kernel-desktop | 54 - patches.suse/kdump-dump_after_notifier.patch | 136 - patches.suse/led_classdev.sysfs-name.patch | 22 - .../linux-2.6.29-dont-wait-for-mouse.patch | 46 - .../linux-2.6.29-even-faster-kms.patch | 30 - ...ux-2.6.29-jbd-longer-commit-interval.patch | 26 - .../linux-2.6.29-kms-after-sata.patch | 46 - patches.suse/linux-2.6.29-touchkit.patch | 135 - patches.suse/mm-devzero-optimisation.patch | 260 - patches.suse/mm-increase-dirty-limits.patch | 26 - patches.suse/mm-tune-dirty-limits.patch | 77 - patches.suse/mpath-fix | 50 - patches.suse/nameif-track-rename.patch | 53 - patches.suse/netfilter-ip_conntrack_slp.patch | 181 - patches.suse/nfsacl-client-cache-CHECK.diff | 76 - patches.suse/no-frame-pointer-select | 41 - patches.suse/no-partition-scan | 108 - patches.suse/novfs-2.6.35-api-changes | 54 - patches.suse/novfs-2.6.37-api-changes | 298 - patches.suse/novfs-build-fix | 142 - patches.suse/novfs-client-module | 15977 ----- patches.suse/novfs-fix-debug-message.patch | 22 - patches.suse/novfs-fix-ioctl-usage | 202 - patches.suse/novfs-use-evict_inode | 47 - patches.suse/osync-error | 49 - ...panic-on-io-nmi-SLE11-user-space-api.patch | 47 - patches.suse/ppc-no-LDFLAGS_MODULE.patch | 32 - .../ppc-powerbook-usb-fn-key-default.patch | 32 - patches.suse/radeon-monitor-jsxx-quirk.patch | 65 - patches.suse/raw_device_max_minors_param.diff | 112 - patches.suse/readahead-request-tunables.patch | 44 - patches.suse/reiser4-dependencies | 225 - patches.suse/reiserfs-barrier-default | 56 - patches.suse/richacl-fix | 235 - patches.suse/s390-Kerntypes.diff | 387 - patches.suse/s390-System.map.diff | 30 - patches.suse/sched-revert-latency-defaults | 95 - .../scsi-error-test-unit-ready-timeout | 35 - patches.suse/scsi-netlink-ml | 215 - patches.suse/setuid-dumpable-wrongdir | 48 - patches.suse/shmall-bigger | 50 - .../slab-handle-memoryless-nodes-v2a.patch | 308 - patches.suse/stack-unwind | 2193 - patches.suse/supported-flag | 442 - patches.suse/supported-flag-enterprise | 245 - patches.suse/suse-ppc64-branding | 21 - patches.suse/unmap_vmas-lat | 33 - ...deo-ignore-hue-control-for-5986-0241.patch | 57 - patches.suse/wireless-no-aes-select | 33 - patches.suse/x86-mark_rodata_rw.patch | 184 - patches.trace/utrace-core | 4101 -- patches.xen/add-console-use-vt | 46 - patches.xen/ipv6-no-autoconf | 35 - ....19-rc1-kexec-move_segment_code-i386.patch | 155 - ...9-rc1-kexec-move_segment_code-x86_64.patch | 150 - patches.xen/pci-guestdev | 2696 - patches.xen/pci-reserve | 236 - patches.xen/sfc-driverlink | 1133 - patches.xen/sfc-driverlink-conditional | 248 - patches.xen/sfc-endianness | 18 - patches.xen/sfc-external-sram | 299 - patches.xen/sfc-resource-driver | 15053 ---- patches.xen/sfc-set-arch | 38 - patches.xen/tmem | 1410 - patches.xen/xen-balloon-max-target | 78 - patches.xen/xen-blkback-bimodal-suse | 39 - patches.xen/xen-blkback-cdrom | 233 - .../xen-blkback-disable-barriers.patch | 11 - patches.xen/xen-blkfront-cdrom | 709 - patches.xen/xen-blkif-protocol-fallback-hack | 225 - patches.xen/xen-blktap-modular | 47 - patches.xen/xen-blktap-write-barriers | 79 - patches.xen/xen-blktap2-use-after-free | 27 - patches.xen/xen-block-backends-cleanup | 242 - patches.xen/xen-clockevents | 1015 - patches.xen/xen-configurable-guest-devices | 88 - patches.xen/xen-cpufreq-report | 57 - patches.xen/xen-cxgb3 | 151 - patches.xen/xen-dcdbas | 295 - patches.xen/xen-floppy | 28 - patches.xen/xen-ipi-per-cpu-irq | 901 - patches.xen/xen-kconfig-compat | 40 - patches.xen/xen-kzalloc | 186 - patches.xen/xen-mem-hotplug | 285 - patches.xen/xen-netback-generalize | 1317 - patches.xen/xen-netback-kernel-threads | 320 - patches.xen/xen-netback-multiple-tasklets | 183 - patches.xen/xen-netback-notify-multi | 91 - patches.xen/xen-netback-nr-irqs | 61 - patches.xen/xen-op-packet | 287 - patches.xen/xen-pcpu-hotplug | 644 - patches.xen/xen-sections | 105 - patches.xen/xen-setup-gsi | 158 - patches.xen/xen-spinlock-poll-early | 184 - patches.xen/xen-swiotlb-heuristics | 32 - patches.xen/xen-sysdev-suspend | 532 - patches.xen/xen-tmem-v1 | 348 - patches.xen/xen-unpriv-build | 372 - patches.xen/xen-virq-per-cpu-irq | 652 - patches.xen/xen-watchdog | 398 - patches.xen/xen-x86-bigmem | 143 - patches.xen/xen-x86-dcr-fallback | 168 - patches.xen/xen-x86-exit-mmap | 73 - patches.xen/xen-x86-msr-on-pcpu | 822 - patches.xen/xen-x86-no-lapic | 274 - patches.xen/xen-x86-panic-no-reboot | 32 - patches.xen/xen-x86-per-cpu-vcpu-info | 636 - patches.xen/xen-x86-pmd-handling | 617 - patches.xen/xen-x86_64-dump-user-pgt | 51 - patches.xen/xen-x86_64-note-init-p2m | 343 - patches.xen/xen-x86_64-pgd-alloc-order | 337 - patches.xen/xen-x86_64-pgd-pin | 111 - patches.xen/xen-x86_64-unmapped-initrd | 252 - patches.xen/xen3-auto-arch-i386.diff | 193 - patches.xen/xen3-auto-arch-x86.diff | 516 - patches.xen/xen3-auto-arch-x86_64.diff | 211 - patches.xen/xen3-auto-blktap2-pvops.diff | 2373 - patches.xen/xen3-auto-common.diff | 3992 -- .../xen3-auto-include-xen-interface.diff | 6063 -- patches.xen/xen3-auto-xen-arch.diff | 44652 ------------ patches.xen/xen3-auto-xen-drivers.diff | 59006 ---------------- patches.xen/xen3-auto-xen-kconfig.diff | 856 - patches.xen/xen3-fixup-arch-x86 | 44 - patches.xen/xen3-fixup-blktap2-pvops | 150 - patches.xen/xen3-fixup-common | 409 - patches.xen/xen3-fixup-kconfig | 125 - patches.xen/xen3-fixup-xen | 7054 -- patches.xen/xen3-patch-2.6.18 | 591 - patches.xen/xen3-patch-2.6.19 | 12863 ---- patches.xen/xen3-patch-2.6.20 | 7058 -- patches.xen/xen3-patch-2.6.21 | 5018 -- patches.xen/xen3-patch-2.6.22 | 8241 --- patches.xen/xen3-patch-2.6.23 | 5261 -- patches.xen/xen3-patch-2.6.24 | 10604 --- patches.xen/xen3-patch-2.6.25 | 28406 -------- patches.xen/xen3-patch-2.6.26 | 21121 ------ patches.xen/xen3-patch-2.6.27 | 25807 ------- patches.xen/xen3-patch-2.6.28 | 23572 ------ patches.xen/xen3-patch-2.6.29 | 11362 --- patches.xen/xen3-patch-2.6.30 | 18266 ----- patches.xen/xen3-patch-2.6.31 | 7754 -- patches.xen/xen3-patch-2.6.32 | 7001 -- patches.xen/xen3-patch-2.6.33 | 5033 -- patches.xen/xen3-patch-2.6.34 | 3896 - patches.xen/xen3-patch-2.6.35 | 2667 - patches.xen/xen3-patch-2.6.36 | 2833 - patches.xen/xen3-patch-2.6.37 | 7443 -- patches.xen/xen3-patch-2.6.38 | 3670 - patches.xen/xen3-seccomp-disable-tsc-option | 33 - patches.xen/xen3-stack-unwind | 126 - patches.xen/xen3-x86-mark_rodata_rw.patch | 153 - patches.xen/xen3-x86-mcp51-no-dac | 36 - patches.xen/xen3-x86_64-unwind-annotations | 249 - rel-xenlinux | 1 - series-xenlinux.conf | 857 - version-xenlinux | 1 - 397 files changed, 1 insertion(+), 499151 deletions(-) delete mode 100644 config-xenlinux delete mode 100644 linux-2.6.38.3.tar.bz2.sha1sum delete mode 100644 linux-2.6.38.3.tar.bz2.sign delete mode 100644 patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace delete mode 100644 patches.apparmor/apparmor-compatibility-patch-for-v5-interface delete mode 100644 patches.apparmor/apparmor-compatibility-patch-for-v5-network-control delete mode 100644 patches.arch/0001-x86-ioapic-Fix-potential-resume-deadlock.patch delete mode 100644 patches.arch/acpi-export-hotplug_execute delete mode 100644 patches.arch/acpi_ec_provide_non_interrupt_mode_boot_param.patch delete mode 100644 patches.arch/acpi_fix_fadt_32_bit_zero_length.patch delete mode 100644 patches.arch/acpi_srat-pxm-rev-ia64.patch delete mode 100644 patches.arch/acpi_srat-pxm-rev-store.patch delete mode 100644 patches.arch/acpi_srat-pxm-rev-x86-64.patch delete mode 100644 patches.arch/acpi_thermal_passive_blacklist.patch delete mode 100644 patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch delete mode 100644 patches.arch/i386-unwind-annotations delete mode 100644 patches.arch/ia64-page-migration delete mode 100644 patches.arch/ia64-page-migration.fix delete mode 100644 patches.arch/kmsg-fix-parameter-limitations delete mode 100644 patches.arch/kvm-only-export-selected-pv-ops-feature-structs delete mode 100644 patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic delete mode 100644 patches.arch/kvm-split-paravirt-ops-by-functionality delete mode 100644 patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature delete mode 100644 patches.arch/mm-avoid-bad-page-on-lru delete mode 100644 patches.arch/perf_timechart_fix_zero_timestamps.patch delete mode 100644 patches.arch/ppc-ipic-suspend-without-83xx-fix delete mode 100644 patches.arch/ppc-pegasos-console-autodetection.patch delete mode 100644 patches.arch/ppc-prom-nodisplay.patch delete mode 100644 patches.arch/ppc64-xmon-dmesg-printing.patch delete mode 100644 patches.arch/s390-add-FREE_PTE_NR delete mode 100644 patches.arch/s390-message-catalog-fix.diff delete mode 100644 patches.arch/s390-message-catalog.diff delete mode 100644 patches.arch/x86-apic-force-bigsmp-apic-on-IBM-EXA3-4.patch delete mode 100644 patches.arch/x86-hpet-pre-read delete mode 100644 patches.arch/x86-mcp51-no-dac delete mode 100644 patches.arch/x86_64-hpet-64bit-timer.patch delete mode 100644 patches.arch/x86_64-unwind-annotations delete mode 100644 patches.arch/x86_agpgart-g33-stoeln-fix-2.patch delete mode 100644 patches.drivers/0001-drm-i915-Sanitize-the-output-registers-after-resume.patch delete mode 100644 patches.drivers/0001-drm-i915-Use-ACPI-OpRegion-to-determine-lid-status.patch delete mode 100644 patches.drivers/0002-drm-i915-don-t-enable-plane-pipe-and-PLL-prematurely.patch delete mode 100644 patches.drivers/0003-drm-i915-add-pipe-plane-enable-disable-functions.patch delete mode 100644 patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl delete mode 100644 patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo delete mode 100644 patches.drivers/alsa-hda-0019-Increase-default-buffer-size delete mode 100644 patches.drivers/bnx2-entropy-source.patch delete mode 100644 patches.drivers/disable-catas_reset-by-default-to-avoid-problems-with-eeh.patch delete mode 100644 patches.drivers/e1000-entropy-source.patch delete mode 100644 patches.drivers/e1000e-entropy-source.patch delete mode 100644 patches.drivers/ehea-modinfo.patch delete mode 100644 patches.drivers/elousb.patch delete mode 100644 patches.drivers/igb-entropy-source.patch delete mode 100644 patches.drivers/input-Add-LED-support-to-Synaptics-device delete mode 100644 patches.drivers/ixgbe-entropy-source.patch delete mode 100644 patches.drivers/libata-unlock-hpa-by-default delete mode 100644 patches.drivers/megaraid-mbox-fix-SG_IO delete mode 100644 patches.drivers/mpt-fusion-4.22.00.00-update delete mode 100644 patches.drivers/ppc64-adb delete mode 100644 patches.drivers/qla4xxx-5.01.00-k9-5.01.00.00.11.01-k10.patch delete mode 100644 patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch delete mode 100644 patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch delete mode 100644 patches.drivers/tg3-entropy-source.patch delete mode 100644 patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch delete mode 100644 patches.fixes/aggressive-zone-reclaim.patch delete mode 100644 patches.fixes/bonding-Incorrect-TX-queue-offset.patch delete mode 100644 patches.fixes/bridge-module-get-put.patch delete mode 100644 patches.fixes/cdc-phonet-handle-empty-phonet-header.patch delete mode 100644 patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch delete mode 100644 patches.fixes/dm-mpath-reattach-dh delete mode 100644 patches.fixes/dm-release-map_lock-before-set_disk_ro delete mode 100644 patches.fixes/dm-table-switch-to-readonly delete mode 100644 patches.fixes/fix-nf_conntrack_slp delete mode 100644 patches.fixes/flexcop-fix-registering-braindead-stupid-names delete mode 100644 patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops delete mode 100644 patches.fixes/grab-swap-token-oops delete mode 100644 patches.fixes/hfs-avoid-crash-in-hfs_bnode_create delete mode 100644 patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch delete mode 100644 patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch delete mode 100644 patches.fixes/ia64-sparse-fixes.diff delete mode 100644 patches.fixes/input-add-acer-aspire-5710-to-nomux.patch delete mode 100644 patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices delete mode 100644 patches.fixes/kvm-ioapic.patch delete mode 100644 patches.fixes/kvm-macos.patch delete mode 100644 patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files delete mode 100644 patches.fixes/misdn-add-support-for-group-membership-check delete mode 100644 patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack delete mode 100644 patches.fixes/nfs-acl-caching.diff delete mode 100644 patches.fixes/nfs-adaptive-readdir-plus delete mode 100644 patches.fixes/nfs-slot-table-alloc delete mode 100644 patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch delete mode 100644 patches.fixes/novfs-LFS-initialization delete mode 100644 patches.fixes/novfs-bdi-init.diff delete mode 100644 patches.fixes/novfs-dentry-cache-limit.patch delete mode 100644 patches.fixes/novfs-err_ptr-fix.diff delete mode 100644 patches.fixes/novfs-fix-inode-uid delete mode 100644 patches.fixes/novfs-fix-oops-in-scope-finding delete mode 100644 patches.fixes/novfs-fragment-size-fix.patch delete mode 100644 patches.fixes/novfs-incorrect-filesize-fix delete mode 100644 patches.fixes/novfs-lindent delete mode 100644 patches.fixes/novfs-overflow-fixes delete mode 100644 patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir delete mode 100644 patches.fixes/novfs-truncate-fix delete mode 100644 patches.fixes/novfs-unlink-oops delete mode 100644 patches.fixes/novfs-xattr-errcode-cleanup delete mode 100644 patches.fixes/novfs-xattr-errcode-cleanup2 delete mode 100644 patches.fixes/novfs-xattr-memleak delete mode 100644 patches.fixes/oom-warning delete mode 100644 patches.fixes/oprofile_bios_ctr.patch delete mode 100644 patches.fixes/parport-mutex delete mode 100644 patches.fixes/proc-scsi-scsi-fix.diff delete mode 100644 patches.fixes/ptrace-getsiginfo delete mode 100644 patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash delete mode 100644 patches.fixes/reiserfs-remove-2-tb-file-size-limit delete mode 100644 patches.fixes/remount-no-shrink-dcache delete mode 100644 patches.fixes/scsi-add-tgps-setting delete mode 100644 patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file delete mode 100644 patches.fixes/scsi-check-host-lookup-failure delete mode 100644 patches.fixes/scsi-dh-alua-retry-UA delete mode 100644 patches.fixes/scsi-dh-alua-send-stpg delete mode 100644 patches.fixes/scsi-dh-queuedata-accessors delete mode 100644 patches.fixes/scsi-dh-rdac-add-stk delete mode 100644 patches.fixes/scsi-ibmvscsi-module_alias.patch delete mode 100644 patches.fixes/scsi-ibmvscsi-show-config.patch delete mode 100644 patches.fixes/scsi-inquiry-too-short-ratelimit delete mode 100644 patches.fixes/scsi-retry-alua-transition-in-progress delete mode 100644 patches.fixes/scsi-scan-blist-update delete mode 100644 patches.fixes/sd_liberal_28_sense_invalid.diff delete mode 100644 patches.fixes/seccomp-disable-tsc-option delete mode 100644 patches.fixes/tg3-fix-default-wol.patch delete mode 100644 patches.fixes/tulip-quad-NIC-ifdown delete mode 100644 patches.fixes/ubifs-restrict-world-writable-debugfs-files delete mode 100644 patches.fixes/xen-blkfront-connect-overflow.patch delete mode 100644 patches.fixes/xen-disable-cdrom-dbgprints.diff delete mode 100644 patches.fixes/xencons_close_deadlock.patch delete mode 100644 patches.kernel.org/patch-2.6.38.1 delete mode 100644 patches.kernel.org/patch-2.6.38.1-2 delete mode 100644 patches.kernel.org/patch-2.6.38.2-3 delete mode 100644 patches.qubes/nuke_balloon_minimum_target.patch delete mode 100644 patches.rpmify/buildhost delete mode 100644 patches.rpmify/cloneconfig.diff delete mode 100644 patches.rpmify/dw_spi-fix-PPC-build.patch delete mode 100644 patches.rpmify/firmware-path delete mode 100644 patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning delete mode 100644 patches.rpmify/qla4xx-missing-readq-definition delete mode 100644 patches.rpmify/rpm-kernel-config delete mode 100644 patches.rpmify/split-package delete mode 100644 patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings delete mode 100644 patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch delete mode 100644 patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch delete mode 100644 patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch delete mode 100644 patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch delete mode 100644 patches.suse/0005-richacl-Permission-mapping-functions.patch delete mode 100644 patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch delete mode 100644 patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch delete mode 100644 patches.suse/0008-richacl-Permission-check-algorithm.patch delete mode 100644 patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch delete mode 100644 patches.suse/0010-richacl-Create-time-inheritance.patch delete mode 100644 patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch delete mode 100644 patches.suse/0012-richacl-Automatic-Inheritance.patch delete mode 100644 patches.suse/0013-richacl-Restrict-access-check-algorithm.patch delete mode 100644 patches.suse/0014-richacl-xattr-mapping-functions.patch delete mode 100644 patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch delete mode 100644 patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch delete mode 100644 patches.suse/8250-sysrq-ctrl_o.patch delete mode 100644 patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch delete mode 100644 patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch delete mode 100644 patches.suse/SoN-02-doc.patch delete mode 100644 patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch delete mode 100644 patches.suse/SoN-04-page_alloc-reserve.patch delete mode 100644 patches.suse/SoN-05-reserve-slub.patch delete mode 100644 patches.suse/SoN-06-mm-kmem_estimate_pages.patch delete mode 100644 patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch delete mode 100644 patches.suse/SoN-08-mm-page_alloc-emerg.patch delete mode 100644 patches.suse/SoN-08a-mm-page_alloc-emerg.patch delete mode 100644 patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch delete mode 100644 patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch delete mode 100644 patches.suse/SoN-11-mm-reserve.patch delete mode 100644 patches.suse/SoN-12-mm-selinux-emergency.patch delete mode 100644 patches.suse/SoN-13-net-ps_rx.patch delete mode 100644 patches.suse/SoN-14-net-sk_allocation.patch delete mode 100644 patches.suse/SoN-15-netvm-reserve.patch delete mode 100644 patches.suse/SoN-16-netvm-reserve-inet.patch delete mode 100644 patches.suse/SoN-16a-netvm-reserve-inet.patch delete mode 100644 patches.suse/SoN-17-netvm-reserve-inet.patch-fix delete mode 100644 patches.suse/SoN-18-netvm-skbuff-reserve.patch delete mode 100644 patches.suse/SoN-19-netvm-sk_filter.patch delete mode 100644 patches.suse/SoN-20-netvm-tcp-deadlock.patch delete mode 100644 patches.suse/SoN-21-emergency-nf_queue.patch delete mode 100644 patches.suse/SoN-22-netvm.patch delete mode 100644 patches.suse/SoN-23-mm-swapfile.patch delete mode 100644 patches.suse/SoN-24-mm-page_file_methods.patch delete mode 100644 patches.suse/SoN-25-nfs-swapcache.patch delete mode 100644 patches.suse/SoN-25a-nfs-swapcache.patch delete mode 100644 patches.suse/SoN-25b-nfs-swapcache.patch delete mode 100644 patches.suse/SoN-26-nfs-swapper.patch delete mode 100644 patches.suse/SoN-27-nfs-swap_ops.patch delete mode 100644 patches.suse/SoN-27a-nfs-swap_ops.patch delete mode 100644 patches.suse/SoN-28-nfs-alloc-recursions.patch delete mode 100644 patches.suse/SoN-29-fix-swap_sync_page-race delete mode 100644 patches.suse/SoN-30-fix-uninitialized-var.patch delete mode 100644 patches.suse/SoN-31-fix-null-pointer-dereference delete mode 100644 patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles delete mode 100644 patches.suse/SoN-33-slab-leak-fix.patch delete mode 100644 patches.suse/SoN-fix delete mode 100644 patches.suse/acpi-don-t-preempt-until-the-system-is-up delete mode 100644 patches.suse/acpi-dsdt-initrd-v0.9a-2.6.25.patch delete mode 100644 patches.suse/acpi-generic-initramfs-table-override-support delete mode 100644 patches.suse/acpi_osi_sle11_ident.patch delete mode 100644 patches.suse/add-initramfs-file_read_write delete mode 100644 patches.suse/audit-export-logging.patch delete mode 100644 patches.suse/b43-missing-firmware-info.patch delete mode 100644 patches.suse/bootsplash delete mode 100644 patches.suse/bootsplash-console-fix delete mode 100644 patches.suse/bootsplash-keep-multiple-data delete mode 100644 patches.suse/bootsplash-scaler delete mode 100644 patches.suse/connector-read-mostly delete mode 100644 patches.suse/crasher-26.diff delete mode 100644 patches.suse/dm-emulate-blkrrpart-ioctl delete mode 100644 patches.suse/dm-mpath-accept-failed-paths delete mode 100644 patches.suse/dm-mpath-detach-existing-hardware-handler delete mode 100644 patches.suse/dm-mpath-evaluate-request-result-and-sense delete mode 100644 patches.suse/dm-mpath-leastpending-path-update delete mode 100644 patches.suse/dm-mpath-no-activate-for-offlined-paths delete mode 100644 patches.suse/dm-mpath-no-partitions-feature delete mode 100644 patches.suse/dm-mpath-null-pgs delete mode 100644 patches.suse/dm-raid45-26-Nov-2009.patch delete mode 100644 patches.suse/dm-raid45-api-update-no-barriers delete mode 100644 patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md delete mode 100644 patches.suse/dmraid45-dm_dirty_log_create-api-fix delete mode 100644 patches.suse/dmraid45-dm_get_device-takes-fewer-arguments delete mode 100644 patches.suse/elousb-2.6.35-api-changes delete mode 100644 patches.suse/export-release_open_intent delete mode 100644 patches.suse/export-security_inode_permission delete mode 100644 patches.suse/ext3-barrier-default delete mode 100644 patches.suse/file-capabilities-disable-by-default.diff delete mode 100644 patches.suse/files-slab-rcu.patch delete mode 100644 patches.suse/genksyms-add-override-flag.diff delete mode 100644 patches.suse/hung_task_timeout-configurable-default delete mode 100644 patches.suse/init-move-populate_rootfs-back-to-start_kernel delete mode 100644 patches.suse/kbd-ignore-gfx.patch delete mode 100644 patches.suse/kconfig-automate-kernel-desktop delete mode 100644 patches.suse/kdump-dump_after_notifier.patch delete mode 100644 patches.suse/led_classdev.sysfs-name.patch delete mode 100644 patches.suse/linux-2.6.29-dont-wait-for-mouse.patch delete mode 100644 patches.suse/linux-2.6.29-even-faster-kms.patch delete mode 100644 patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch delete mode 100644 patches.suse/linux-2.6.29-kms-after-sata.patch delete mode 100644 patches.suse/linux-2.6.29-touchkit.patch delete mode 100644 patches.suse/mm-devzero-optimisation.patch delete mode 100644 patches.suse/mm-increase-dirty-limits.patch delete mode 100644 patches.suse/mm-tune-dirty-limits.patch delete mode 100644 patches.suse/mpath-fix delete mode 100644 patches.suse/nameif-track-rename.patch delete mode 100644 patches.suse/netfilter-ip_conntrack_slp.patch delete mode 100644 patches.suse/nfsacl-client-cache-CHECK.diff delete mode 100644 patches.suse/no-frame-pointer-select delete mode 100644 patches.suse/no-partition-scan delete mode 100644 patches.suse/novfs-2.6.35-api-changes delete mode 100644 patches.suse/novfs-2.6.37-api-changes delete mode 100644 patches.suse/novfs-build-fix delete mode 100644 patches.suse/novfs-client-module delete mode 100644 patches.suse/novfs-fix-debug-message.patch delete mode 100644 patches.suse/novfs-fix-ioctl-usage delete mode 100644 patches.suse/novfs-use-evict_inode delete mode 100644 patches.suse/osync-error delete mode 100644 patches.suse/panic-on-io-nmi-SLE11-user-space-api.patch delete mode 100644 patches.suse/ppc-no-LDFLAGS_MODULE.patch delete mode 100644 patches.suse/ppc-powerbook-usb-fn-key-default.patch delete mode 100644 patches.suse/radeon-monitor-jsxx-quirk.patch delete mode 100644 patches.suse/raw_device_max_minors_param.diff delete mode 100644 patches.suse/readahead-request-tunables.patch delete mode 100644 patches.suse/reiser4-dependencies delete mode 100644 patches.suse/reiserfs-barrier-default delete mode 100644 patches.suse/richacl-fix delete mode 100644 patches.suse/s390-Kerntypes.diff delete mode 100644 patches.suse/s390-System.map.diff delete mode 100644 patches.suse/sched-revert-latency-defaults delete mode 100644 patches.suse/scsi-error-test-unit-ready-timeout delete mode 100644 patches.suse/scsi-netlink-ml delete mode 100644 patches.suse/setuid-dumpable-wrongdir delete mode 100644 patches.suse/shmall-bigger delete mode 100644 patches.suse/slab-handle-memoryless-nodes-v2a.patch delete mode 100644 patches.suse/stack-unwind delete mode 100644 patches.suse/supported-flag delete mode 100644 patches.suse/supported-flag-enterprise delete mode 100644 patches.suse/suse-ppc64-branding delete mode 100644 patches.suse/unmap_vmas-lat delete mode 100644 patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch delete mode 100644 patches.suse/wireless-no-aes-select delete mode 100644 patches.suse/x86-mark_rodata_rw.patch delete mode 100644 patches.trace/utrace-core delete mode 100644 patches.xen/add-console-use-vt delete mode 100644 patches.xen/ipv6-no-autoconf delete mode 100644 patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-i386.patch delete mode 100644 patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch delete mode 100644 patches.xen/pci-guestdev delete mode 100644 patches.xen/pci-reserve delete mode 100644 patches.xen/sfc-driverlink delete mode 100644 patches.xen/sfc-driverlink-conditional delete mode 100644 patches.xen/sfc-endianness delete mode 100644 patches.xen/sfc-external-sram delete mode 100644 patches.xen/sfc-resource-driver delete mode 100644 patches.xen/sfc-set-arch delete mode 100644 patches.xen/tmem delete mode 100644 patches.xen/xen-balloon-max-target delete mode 100644 patches.xen/xen-blkback-bimodal-suse delete mode 100644 patches.xen/xen-blkback-cdrom delete mode 100644 patches.xen/xen-blkback-disable-barriers.patch delete mode 100644 patches.xen/xen-blkfront-cdrom delete mode 100644 patches.xen/xen-blkif-protocol-fallback-hack delete mode 100644 patches.xen/xen-blktap-modular delete mode 100644 patches.xen/xen-blktap-write-barriers delete mode 100644 patches.xen/xen-blktap2-use-after-free delete mode 100644 patches.xen/xen-block-backends-cleanup delete mode 100644 patches.xen/xen-clockevents delete mode 100644 patches.xen/xen-configurable-guest-devices delete mode 100644 patches.xen/xen-cpufreq-report delete mode 100644 patches.xen/xen-cxgb3 delete mode 100644 patches.xen/xen-dcdbas delete mode 100644 patches.xen/xen-floppy delete mode 100644 patches.xen/xen-ipi-per-cpu-irq delete mode 100644 patches.xen/xen-kconfig-compat delete mode 100644 patches.xen/xen-kzalloc delete mode 100644 patches.xen/xen-mem-hotplug delete mode 100644 patches.xen/xen-netback-generalize delete mode 100644 patches.xen/xen-netback-kernel-threads delete mode 100644 patches.xen/xen-netback-multiple-tasklets delete mode 100644 patches.xen/xen-netback-notify-multi delete mode 100644 patches.xen/xen-netback-nr-irqs delete mode 100644 patches.xen/xen-op-packet delete mode 100644 patches.xen/xen-pcpu-hotplug delete mode 100644 patches.xen/xen-sections delete mode 100644 patches.xen/xen-setup-gsi delete mode 100644 patches.xen/xen-spinlock-poll-early delete mode 100644 patches.xen/xen-swiotlb-heuristics delete mode 100644 patches.xen/xen-sysdev-suspend delete mode 100644 patches.xen/xen-tmem-v1 delete mode 100644 patches.xen/xen-unpriv-build delete mode 100644 patches.xen/xen-virq-per-cpu-irq delete mode 100644 patches.xen/xen-watchdog delete mode 100644 patches.xen/xen-x86-bigmem delete mode 100644 patches.xen/xen-x86-dcr-fallback delete mode 100644 patches.xen/xen-x86-exit-mmap delete mode 100644 patches.xen/xen-x86-msr-on-pcpu delete mode 100644 patches.xen/xen-x86-no-lapic delete mode 100644 patches.xen/xen-x86-panic-no-reboot delete mode 100644 patches.xen/xen-x86-per-cpu-vcpu-info delete mode 100644 patches.xen/xen-x86-pmd-handling delete mode 100644 patches.xen/xen-x86_64-dump-user-pgt delete mode 100644 patches.xen/xen-x86_64-note-init-p2m delete mode 100644 patches.xen/xen-x86_64-pgd-alloc-order delete mode 100644 patches.xen/xen-x86_64-pgd-pin delete mode 100644 patches.xen/xen-x86_64-unmapped-initrd delete mode 100644 patches.xen/xen3-auto-arch-i386.diff delete mode 100644 patches.xen/xen3-auto-arch-x86.diff delete mode 100644 patches.xen/xen3-auto-arch-x86_64.diff delete mode 100644 patches.xen/xen3-auto-blktap2-pvops.diff delete mode 100644 patches.xen/xen3-auto-common.diff delete mode 100644 patches.xen/xen3-auto-include-xen-interface.diff delete mode 100644 patches.xen/xen3-auto-xen-arch.diff delete mode 100644 patches.xen/xen3-auto-xen-drivers.diff delete mode 100644 patches.xen/xen3-auto-xen-kconfig.diff delete mode 100644 patches.xen/xen3-fixup-arch-x86 delete mode 100644 patches.xen/xen3-fixup-blktap2-pvops delete mode 100644 patches.xen/xen3-fixup-common delete mode 100644 patches.xen/xen3-fixup-kconfig delete mode 100644 patches.xen/xen3-fixup-xen delete mode 100644 patches.xen/xen3-patch-2.6.18 delete mode 100644 patches.xen/xen3-patch-2.6.19 delete mode 100644 patches.xen/xen3-patch-2.6.20 delete mode 100644 patches.xen/xen3-patch-2.6.21 delete mode 100644 patches.xen/xen3-patch-2.6.22 delete mode 100644 patches.xen/xen3-patch-2.6.23 delete mode 100644 patches.xen/xen3-patch-2.6.24 delete mode 100644 patches.xen/xen3-patch-2.6.25 delete mode 100644 patches.xen/xen3-patch-2.6.26 delete mode 100644 patches.xen/xen3-patch-2.6.27 delete mode 100644 patches.xen/xen3-patch-2.6.28 delete mode 100644 patches.xen/xen3-patch-2.6.29 delete mode 100644 patches.xen/xen3-patch-2.6.30 delete mode 100644 patches.xen/xen3-patch-2.6.31 delete mode 100644 patches.xen/xen3-patch-2.6.32 delete mode 100644 patches.xen/xen3-patch-2.6.33 delete mode 100644 patches.xen/xen3-patch-2.6.34 delete mode 100644 patches.xen/xen3-patch-2.6.35 delete mode 100644 patches.xen/xen3-patch-2.6.36 delete mode 100644 patches.xen/xen3-patch-2.6.37 delete mode 100644 patches.xen/xen3-patch-2.6.38 delete mode 100644 patches.xen/xen3-seccomp-disable-tsc-option delete mode 100644 patches.xen/xen3-stack-unwind delete mode 100644 patches.xen/xen3-x86-mark_rodata_rw.patch delete mode 100644 patches.xen/xen3-x86-mcp51-no-dac delete mode 100644 patches.xen/xen3-x86_64-unwind-annotations delete mode 100644 rel-xenlinux delete mode 100644 series-xenlinux.conf delete mode 100644 version-xenlinux diff --git a/Makefile b/Makefile index 935dafd..216cd75 100644 --- a/Makefile +++ b/Makefile @@ -11,9 +11,7 @@ SOURCEDIR := $(WORKDIR) NO_OF_CPUS := $(shell grep -c ^processor /proc/cpuinfo) -ifndef BUILD_FLAVOR -$(error "Add BUILD_FLAVOR=pvops or BUILD_FLAVOR=xenlinux to make cmdline") -endif +BUILD_FLAVOR := pvops RPM_DEFINES := --define "_sourcedir $(SOURCEDIR)" \ --define "_specdir $(SPECDIR)" \ diff --git a/config-xenlinux b/config-xenlinux deleted file mode 100644 index 375ea39..0000000 --- a/config-xenlinux +++ /dev/null @@ -1,5371 +0,0 @@ -# -# Automatically generated make config: don't edit -# Linux/x86_64 2.6.38.3 Kernel Configuration -# Sun Apr 17 01:37:01 2011 -# -CONFIG_64BIT=y -# CONFIG_X86_32 is not set -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -CONFIG_MMU=y -CONFIG_ZONE_DMA=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_IOMAP=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_GPIO=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -# CONFIG_RWSEM_GENERIC_SPINLOCK is not set -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_DEFAULT_IDLE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ZONE_DMA32=y -CONFIG_ARCH_POPULATES_NODE_MAP=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_X86_64_SMP=y -CONFIG_X86_NO_TSS=y -CONFIG_X86_NO_IDT=y -CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" -# CONFIG_KTIME_SCALAR is not set -CONFIG_SUSE_KERNEL=y -# CONFIG_ENTERPRISE_SUPPORT is not set -# CONFIG_SPLIT_PACKAGE is not set -# CONFIG_KERNEL_DESKTOP is not set -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_CONSTRUCTORS=y -CONFIG_HAVE_IRQ_WORK=y -CONFIG_IRQ_WORK=y - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_LOCK_KERNEL=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -CONFIG_LOCALVERSION="-12.xenlinux.qubes.x86_64" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_KERNEL_GZIP=y -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y -CONFIG_HAVE_GENERIC_HARDIRQS=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_HARDIRQS=y -# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set -CONFIG_HAVE_SPARSE_IRQ=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_PENDING_IRQ=y -# CONFIG_AUTO_IRQ_AFFINITY is not set -CONFIG_IRQ_PER_CPU=y -# CONFIG_HARDIRQS_SW_RESEND is not set -CONFIG_SPARSE_IRQ=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_PREEMPT_RCU is not set -# CONFIG_RCU_TRACE is not set -CONFIG_RCU_FANOUT=64 -# CONFIG_RCU_FANOUT_EXACT is not set -CONFIG_RCU_FAST_NO_HZ=y -# CONFIG_TREE_RCU_TRACE is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=18 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_NS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_CGROUP_MEM_RES_CTLR=y -# CONFIG_CGROUP_MEM_RES_CTLR_SWAP is not set -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -# CONFIG_SCHED_AUTOGROUP is not set -CONFIG_MM_OWNER=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -# CONFIG_EXPERT is not set -# CONFIG_EMBEDDED is not set -CONFIG_UID16=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -# CONFIG_KALLSYMS_EXTRA_PASS is not set -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_HAVE_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_PERF_COUNTERS is not set -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_PCI_QUIRKS=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -# CONFIG_SLUB is not set -CONFIG_DEFAULT_VM_DIRTY_RATIO=40 -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -CONFIG_OPROFILE=m -# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set -CONFIG_HAVE_OPROFILE=y -CONFIG_KPROBES=y -# CONFIG_JUMP_LABEL is not set -CONFIG_OPTPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_KRETPROBES=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_USE_GENERIC_SMP_HELPERS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_STOP_MACHINE=y -# CONFIG_UTRACE is not set -CONFIG_BLOCK=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_THROTTLING is not set -CONFIG_BLOCK_COMPAT=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_PADATA=y -# CONFIG_INLINE_SPIN_TRYLOCK is not set -# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK is not set -# CONFIG_INLINE_SPIN_LOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK_IRQ is not set -# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set -CONFIG_INLINE_SPIN_UNLOCK=y -# CONFIG_INLINE_SPIN_UNLOCK_BH is not set -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_READ_TRYLOCK is not set -# CONFIG_INLINE_READ_LOCK is not set -# CONFIG_INLINE_READ_LOCK_BH is not set -# CONFIG_INLINE_READ_LOCK_IRQ is not set -# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set -CONFIG_INLINE_READ_UNLOCK=y -# CONFIG_INLINE_READ_UNLOCK_BH is not set -CONFIG_INLINE_READ_UNLOCK_IRQ=y -# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_WRITE_TRYLOCK is not set -# CONFIG_INLINE_WRITE_LOCK is not set -# CONFIG_INLINE_WRITE_LOCK_BH is not set -# CONFIG_INLINE_WRITE_LOCK_IRQ is not set -# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set -CONFIG_INLINE_WRITE_UNLOCK=y -# CONFIG_INLINE_WRITE_UNLOCK_BH is not set -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_FREEZER=y - -# -# Processor type and features -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_SMP=y -CONFIG_X86_MPPARSE=y -CONFIG_X86_64_XEN=y -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_NO_BOOTMEM=y -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_CMPXCHG=y -CONFIG_CMPXCHG_LOCAL=y -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_XADD=y -CONFIG_X86_WP_WORKS_OK=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_DMI=y -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -# CONFIG_IOMMU_API is not set -# CONFIG_MAXSMP is not set -CONFIG_NR_CPUS=512 -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_MCE=y -CONFIG_X86_MCE_INJECT=m -CONFIG_X86_XEN_MCE=y -CONFIG_I8K=m -CONFIG_MICROCODE=m -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=m -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_COMPACTION is not set -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -# CONFIG_KSM is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m -CONFIG_TMEM=y -CONFIG_PRECACHE=y -CONFIG_PRESWAP=y -CONFIG_MTRR=y -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_SECCOMP=y -# CONFIG_SECCOMP_DISABLE_TSC is not set -# CONFIG_CC_STACKPROTECTOR is not set -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_PHYSICAL_START=0x2000 -CONFIG_PHYSICAL_ALIGN=0x2000 -CONFIG_HOTPLUG_CPU=y -CONFIG_COMPAT_VDSO=y -# CONFIG_CMDLINE_BOOL is not set - -# -# Power management and ACPI options -# -CONFIG_PM=y -CONFIG_PM_DEBUG=y -CONFIG_PM_ADVANCED_DEBUG=y -# CONFIG_PM_VERBOSE is not set -CONFIG_CAN_PM_TRACE=y -CONFIG_PM_TRACE=y -CONFIG_PM_TRACE_RTC=y -CONFIG_PM_SLEEP_SMP=y -CONFIG_PM_SLEEP=y -# CONFIG_PM_SLEEP_ADVANCED_DEBUG is not set -CONFIG_SUSPEND=y -# CONFIG_PM_TEST_SUSPEND is not set -CONFIG_SUSPEND_FREEZER=y -CONFIG_PM_RUNTIME=y -CONFIG_PM_OPS=y -CONFIG_ACPI=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_PROCFS=y -CONFIG_ACPI_PROCFS_POWER=y -CONFIG_ACPI_POWER_METER=m -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_PROC_EVENT=y -CONFIG_ACPI_AC=m -CONFIG_ACPI_BATTERY=m -CONFIG_ACPI_BUTTON=m -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=m -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_PROCESSOR=m -# CONFIG_ACPI_IPMI is not set -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=m -CONFIG_ACPI_CUSTOM_DSDT_FILE="" -# CONFIG_ACPI_CUSTOM_DSDT is not set -CONFIG_ACPI_BLACKLIST_YEAR=0 -CONFIG_ACPI_DEBUG=y -# CONFIG_ACPI_DEBUG_FUNC_TRACE is not set -CONFIG_ACPI_PCI_SLOT=m -CONFIG_ACPI_CONTAINER=m -CONFIG_ACPI_HOTPLUG_MEMORY=m -CONFIG_ACPI_SBS=m -# CONFIG_ACPI_HED is not set -# CONFIG_ACPI_APEI is not set -CONFIG_ACPI_PV_SLEEP=y -CONFIG_PROCESSOR_EXTERNAL_CONTROL=y -CONFIG_SFI=y - -# -# CPU Frequency scaling -# - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=m -CONFIG_PCIEAER=y -# CONFIG_PCIE_ECRC is not set -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIE_PME=y -CONFIG_ARCH_SUPPORTS_MSI=y -CONFIG_PCI_MSI=y -# CONFIG_PCI_DEBUG is not set -CONFIG_PCI_GUESTDEV=y -CONFIG_PCI_IOMULTI=y -CONFIG_PCI_RESERVE=y -CONFIG_PCI_STUB=y -CONFIG_XEN_PCIDEV_FRONTEND=y -# CONFIG_XEN_PCIDEV_FE_DEBUG is not set -CONFIG_PCI_IOV=y -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -CONFIG_PCCARD=m -CONFIG_PCMCIA=m -CONFIG_PCMCIA_LOAD_CIS=y -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -CONFIG_PD6729=m -CONFIG_I82092=m -CONFIG_PCCARD_NONSTATIC=y -CONFIG_HOTPLUG_PCI=m -CONFIG_HOTPLUG_PCI_FAKE=m -CONFIG_HOTPLUG_PCI_ACPI=m -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m -CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m -CONFIG_HOTPLUG_PCI_SHPC=m - -# -# Executable file formats / Emulations -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -# CONFIG_HAVE_AOUT is not set -CONFIG_BINFMT_MISC=m -CONFIG_IA32_EMULATION=y -CONFIG_IA32_AOUT=m -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_HAVE_TEXT_POKE_SMP=y -CONFIG_NET=y -CONFIG_COMPAT_NETLINK_MESSAGES=y - -# -# Networking options -# -CONFIG_PACKET=m -CONFIG_UNIX=y -CONFIG_XFRM=y -CONFIG_XFRM_USER=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -# CONFIG_XFRM_STATISTICS is not set -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_ASK_IP_FIB_HASH=y -# CONFIG_IP_FIB_TRIE is not set -CONFIG_IP_FIB_HASH=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_NET_IPIP=m -# CONFIG_NET_IPGRE_DEMUX is not set -CONFIG_IP_MROUTE=y -# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -# CONFIG_ARPD is not set -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_LRO=y -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -CONFIG_IPV6=y -CONFIG_IPV6_PRIVACY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -# CONFIG_IPV6_OPTIMISTIC_DAD is not set -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_SIT=m -# CONFIG_IPV6_SIT_6RD is not set -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -# CONFIG_IPV6_MROUTE is not set -CONFIG_NETWORK_SECMARK=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_GRE=m -CONFIG_NF_CT_PROTO_SCTP=m -CONFIG_NF_CT_PROTO_UDPLITE=m -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CONNTRACK_SLP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m -CONFIG_NETFILTER_XTABLES=m - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m - -# -# Xtables targets -# -# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -# CONFIG_NETFILTER_XT_TARGET_TEE is not set -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -# CONFIG_NETFILTER_XT_MATCH_CPU is not set -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -# CONFIG_NETFILTER_XT_MATCH_IPVS is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -# CONFIG_IP_VS_PE_SIP is not set - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set -CONFIG_IP_NF_QUEUE=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_NF_NAT_NEEDED=y -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_DCCP=m -CONFIG_NF_NAT_PROTO_GRE=m -CONFIG_NF_NAT_PROTO_UDPLITE=m -CONFIG_NF_NAT_PROTO_SCTP=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_NF_NAT_SIP=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_IP6_NF_QUEUE=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m - -# -# DECnet: Netfilter Configuration -# -# CONFIG_DECNET_NF_GRABULATOR is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_IP_DCCP=m -CONFIG_INET_DCCP_DIAG=m - -# -# DCCP CCIDs Configuration (EXPERIMENTAL) -# -# CONFIG_IP_DCCP_CCID2_DEBUG is not set -CONFIG_IP_DCCP_CCID3=y -# CONFIG_IP_DCCP_CCID3_DEBUG is not set -CONFIG_IP_DCCP_TFRC_LIB=y - -# -# DCCP Kernel Hacking -# -# CONFIG_IP_DCCP_DEBUG is not set -# CONFIG_NET_DCCPPROBE is not set -CONFIG_IP_SCTP=m -# CONFIG_NET_SCTPPROBE is not set -# CONFIG_SCTP_DBG_MSG is not set -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_HMAC_NONE is not set -# CONFIG_SCTP_HMAC_SHA1 is not set -CONFIG_SCTP_HMAC_MD5=y -CONFIG_RDS=m -CONFIG_RDS_RDMA=m -CONFIG_RDS_TCP=m -# CONFIG_RDS_DEBUG is not set -# CONFIG_TIPC is not set -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -CONFIG_ATM_MPOA=m -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -# CONFIG_L2TP is not set -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_NET_DSA=y -CONFIG_NET_DSA_TAG_DSA=y -CONFIG_NET_DSA_TAG_EDSA=y -CONFIG_NET_DSA_TAG_TRAILER=y -CONFIG_NET_DSA_MV88E6XXX=y -CONFIG_NET_DSA_MV88E6060=y -CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_DECNET=m -# CONFIG_DECNET_ROUTER is not set -CONFIG_LLC=m -CONFIG_LLC2=m -CONFIG_IPX=m -CONFIG_IPX_INTERN=y -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=m -CONFIG_IPDDP=m -CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y -CONFIG_X25=m -CONFIG_LAPB=m -# CONFIG_ECONET is not set -CONFIG_WAN_ROUTER=m -CONFIG_PHONET=m -# CONFIG_PHONET_PIPECTRLR is not set -CONFIG_IEEE802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_INGRESS=m - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_ROUTE=y -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -# CONFIG_NET_ACT_CSUM is not set -CONFIG_NET_CLS_IND=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=y -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_DEBUG=y -CONFIG_RPS=y -CONFIG_XPS=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m -# CONFIG_NET_DROP_MONITOR is not set -CONFIG_HAMRADIO=y - -# -# Packet Radio protocols -# -CONFIG_AX25=m -CONFIG_AX25_DAMA_SLAVE=y -CONFIG_NETROM=m -CONFIG_ROSE=m - -# -# AX.25 network device drivers -# -CONFIG_MKISS=m -CONFIG_6PACK=m -CONFIG_BPQETHER=m -CONFIG_BAYCOM_SER_FDX=m -CONFIG_BAYCOM_SER_HDX=m -CONFIG_BAYCOM_PAR=m -CONFIG_YAM=m -CONFIG_CAN=m -CONFIG_CAN_RAW=m -CONFIG_CAN_BCM=m - -# -# CAN Device Drivers -# -CONFIG_CAN_VCAN=m -# CONFIG_CAN_SLCAN is not set -CONFIG_CAN_DEV=m -CONFIG_CAN_CALC_BITTIMING=y -CONFIG_CAN_MCP251X=m -CONFIG_CAN_JANZ_ICAN3=m -# CONFIG_PCH_CAN is not set -CONFIG_CAN_SJA1000=m -CONFIG_CAN_SJA1000_PLATFORM=m -CONFIG_CAN_EMS_PCI=m -CONFIG_CAN_KVASER_PCI=m -CONFIG_CAN_PLX_PCI=m - -# -# CAN USB interfaces -# -CONFIG_CAN_EMS_USB=m -# CONFIG_CAN_ESD_USB2 is not set -# CONFIG_CAN_SOFTING is not set -# CONFIG_CAN_DEBUG_DEVICES is not set -CONFIG_IRDA=m - -# -# IrDA protocols -# -CONFIG_IRLAN=m -CONFIG_IRNET=m -CONFIG_IRCOMM=m -CONFIG_IRDA_ULTRA=y - -# -# IrDA options -# -CONFIG_IRDA_CACHE_LAST_LSAP=y -# CONFIG_IRDA_FAST_RR is not set -# CONFIG_IRDA_DEBUG is not set - -# -# Infrared-port device drivers -# - -# -# SIR device drivers -# -CONFIG_IRTTY_SIR=m - -# -# Dongle support -# -CONFIG_DONGLE=y -CONFIG_ESI_DONGLE=m -CONFIG_ACTISYS_DONGLE=m -CONFIG_TEKRAM_DONGLE=m -CONFIG_TOIM3232_DONGLE=m -CONFIG_LITELINK_DONGLE=m -CONFIG_MA600_DONGLE=m -CONFIG_GIRBIL_DONGLE=m -CONFIG_MCP2120_DONGLE=m -CONFIG_OLD_BELKIN_DONGLE=m -CONFIG_ACT200L_DONGLE=m -CONFIG_KINGSUN_DONGLE=m -CONFIG_KSDAZZLE_DONGLE=m -CONFIG_KS959_DONGLE=m - -# -# FIR device drivers -# -CONFIG_USB_IRDA=m -CONFIG_SIGMATEL_FIR=m -CONFIG_NSC_FIR=m -CONFIG_WINBOND_FIR=m -CONFIG_SMC_IRCC_FIR=m -CONFIG_ALI_FIR=m -CONFIG_VLSI_FIR=m -CONFIG_VIA_FIR=m -CONFIG_MCS_FIR=m -CONFIG_BT=m -CONFIG_BT_L2CAP=m -CONFIG_BT_SCO=m -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_CMTP=m -CONFIG_BT_HIDP=m - -# -# Bluetooth device drivers -# -CONFIG_BT_HCIBTUSB=m -CONFIG_BT_HCIBTSDIO=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y -CONFIG_BT_HCIUART_BCSP=y -# CONFIG_BT_HCIUART_ATH3K is not set -CONFIG_BT_HCIUART_LL=y -CONFIG_BT_HCIBCM203X=m -CONFIG_BT_HCIBPA10X=m -CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIDTL1=m -CONFIG_BT_HCIBT3C=m -CONFIG_BT_HCIBLUECARD=m -CONFIG_BT_HCIBTUART=m -CONFIG_BT_HCIVHCI=m -CONFIG_BT_MRVL=m -CONFIG_BT_MRVL_SDIO=m -CONFIG_BT_ATH3K=m -CONFIG_AF_RXRPC=m -# CONFIG_AF_RXRPC_DEBUG is not set -CONFIG_RXKAD=m -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_WIRELESS_EXT=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y -CONFIG_WEXT_SPY=y -CONFIG_WEXT_PRIV=y -CONFIG_CFG80211=m -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_REG_DEBUG is not set -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -# CONFIG_CFG80211_INTERNAL_REGDB is not set -CONFIG_CFG80211_WEXT=y -CONFIG_WIRELESS_EXT_SYSFS=y -CONFIG_LIB80211=m -CONFIG_LIB80211_CRYPT_WEP=m -CONFIG_LIB80211_CRYPT_CCMP=m -CONFIG_LIB80211_CRYPT_TKIP=m -# CONFIG_LIB80211_DEBUG is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -CONFIG_MAC80211_MESH=y -CONFIG_MAC80211_LEDS=y -CONFIG_MAC80211_DEBUGFS=y -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_WIMAX=m -CONFIG_WIMAX_DEBUG_LEVEL=8 -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -CONFIG_NET_9P=m -CONFIG_NET_9P_RDMA=m -# CONFIG_NET_9P_DEBUG is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_NETVM=y - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_STANDALONE is not set -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -CONFIG_SYS_HYPERVISOR=y -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -CONFIG_MTD=m -# CONFIG_MTD_DEBUG is not set -CONFIG_MTD_TESTS=m -CONFIG_MTD_CONCAT=m -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_REDBOOT_PARTS=m -CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 -# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set -# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set -CONFIG_MTD_AR7_PARTS=m - -# -# User Modules And Translation Layers -# -CONFIG_MTD_CHAR=m -CONFIG_HAVE_MTD_OTP=y -CONFIG_MTD_BLKDEVS=m -CONFIG_MTD_BLOCK=m -CONFIG_MTD_BLOCK_RO=m -CONFIG_FTL=m -CONFIG_NFTL=m -CONFIG_NFTL_RW=y -CONFIG_INFTL=m -CONFIG_RFD_FTL=m -CONFIG_SSFDC=m -# CONFIG_SM_FTL is not set -CONFIG_MTD_OOPS=m - -# -# RAM/ROM/Flash chip drivers -# -CONFIG_MTD_CFI=m -CONFIG_MTD_JEDECPROBE=m -CONFIG_MTD_GEN_PROBE=m -CONFIG_MTD_CFI_ADV_OPTIONS=y -CONFIG_MTD_CFI_NOSWAP=y -# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set -# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set -CONFIG_MTD_CFI_GEOMETRY=y -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_MAP_BANK_WIDTH_8=y -CONFIG_MTD_MAP_BANK_WIDTH_16=y -CONFIG_MTD_MAP_BANK_WIDTH_32=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -CONFIG_MTD_CFI_I4=y -CONFIG_MTD_CFI_I8=y -CONFIG_MTD_OTP=y -CONFIG_MTD_CFI_INTELEXT=m -CONFIG_MTD_CFI_AMDSTD=m -CONFIG_MTD_CFI_STAA=m -CONFIG_MTD_CFI_UTIL=m -CONFIG_MTD_RAM=m -CONFIG_MTD_ROM=m -CONFIG_MTD_ABSENT=m - -# -# Mapping drivers for chip access -# -CONFIG_MTD_COMPLEX_MAPPINGS=y -CONFIG_MTD_PHYSMAP=m -CONFIG_MTD_PHYSMAP_COMPAT=y -CONFIG_MTD_PHYSMAP_START=0x8000000 -CONFIG_MTD_PHYSMAP_LEN=0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=2 -CONFIG_MTD_SC520CDP=m -CONFIG_MTD_NETSC520=m -CONFIG_MTD_TS5500=m -CONFIG_MTD_SBC_GXX=m -CONFIG_MTD_AMD76XROM=m -CONFIG_MTD_ICHXROM=m -CONFIG_MTD_ESB2ROM=m -CONFIG_MTD_CK804XROM=m -CONFIG_MTD_SCB2_FLASH=m -CONFIG_MTD_NETtel=m -CONFIG_MTD_L440GX=m -CONFIG_MTD_PCI=m -# CONFIG_MTD_PCMCIA is not set -CONFIG_MTD_GPIO_ADDR=m -CONFIG_MTD_INTEL_VR_NOR=m -CONFIG_MTD_PLATRAM=m - -# -# Self-contained MTD device drivers -# -CONFIG_MTD_PMC551=m -CONFIG_MTD_PMC551_BUGFIX=y -# CONFIG_MTD_PMC551_DEBUG is not set -CONFIG_MTD_DATAFLASH=m -CONFIG_MTD_DATAFLASH_WRITE_VERIFY=y -CONFIG_MTD_DATAFLASH_OTP=y -CONFIG_MTD_M25P80=m -CONFIG_M25PXX_USE_FAST_READ=y -CONFIG_MTD_SST25L=m -CONFIG_MTD_SLRAM=m -CONFIG_MTD_PHRAM=m -CONFIG_MTD_MTDRAM=m -CONFIG_MTDRAM_TOTAL_SIZE=4096 -CONFIG_MTDRAM_ERASE_SIZE=128 -CONFIG_MTD_BLOCK2MTD=m - -# -# Disk-On-Chip Device Drivers -# -CONFIG_MTD_DOC2000=m -CONFIG_MTD_DOC2001=m -CONFIG_MTD_DOC2001PLUS=m -CONFIG_MTD_DOCPROBE=m -CONFIG_MTD_DOCECC=m -CONFIG_MTD_DOCPROBE_ADVANCED=y -CONFIG_MTD_DOCPROBE_ADDRESS=0x0000 -CONFIG_MTD_DOCPROBE_HIGH=y -CONFIG_MTD_DOCPROBE_55AA=y -CONFIG_MTD_NAND_ECC=m -CONFIG_MTD_NAND_ECC_SMC=y -CONFIG_MTD_NAND=m -CONFIG_MTD_NAND_VERIFY_WRITE=y -# CONFIG_MTD_SM_COMMON is not set -CONFIG_MTD_NAND_MUSEUM_IDS=y -# CONFIG_MTD_NAND_DENALI is not set -CONFIG_MTD_NAND_IDS=m -# CONFIG_MTD_NAND_RICOH is not set -CONFIG_MTD_NAND_DISKONCHIP=m -CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y -CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0 -CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH=y -CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y -CONFIG_MTD_NAND_CAFE=m -CONFIG_MTD_NAND_NANDSIM=m -CONFIG_MTD_NAND_PLATFORM=m -CONFIG_MTD_ALAUDA=m -CONFIG_MTD_ONENAND=m -CONFIG_MTD_ONENAND_VERIFY_WRITE=y -CONFIG_MTD_ONENAND_GENERIC=m -CONFIG_MTD_ONENAND_OTP=y -CONFIG_MTD_ONENAND_2X_PROGRAM=y -CONFIG_MTD_ONENAND_SIM=m - -# -# LPDDR flash memory drivers -# -CONFIG_MTD_LPDDR=m -CONFIG_MTD_QINFO_PROBE=m -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_RESERVE=1 -CONFIG_MTD_UBI_GLUEBI=m - -# -# UBI debugging options -# -# CONFIG_MTD_UBI_DEBUG is not set -CONFIG_PARPORT=m -CONFIG_PARPORT_PC=m -CONFIG_PARPORT_SERIAL=m -CONFIG_PARPORT_PC_FIFO=y -CONFIG_PARPORT_PC_SUPERIO=y -CONFIG_PARPORT_PC_PCMCIA=m -# CONFIG_PARPORT_GSC is not set -CONFIG_PARPORT_AX88796=m -CONFIG_PARPORT_1284=y -CONFIG_PARPORT_NOT_PC=y -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_FD=m -CONFIG_PARIDE=m - -# -# Parallel IDE high-level drivers -# -CONFIG_PARIDE_PD=m -CONFIG_PARIDE_PCD=m -CONFIG_PARIDE_PF=m -CONFIG_PARIDE_PT=m -CONFIG_PARIDE_PG=m - -# -# Parallel IDE protocol modules -# -CONFIG_PARIDE_ATEN=m -CONFIG_PARIDE_BPCK=m -CONFIG_PARIDE_COMM=m -CONFIG_PARIDE_DSTR=m -CONFIG_PARIDE_FIT2=m -CONFIG_PARIDE_FIT3=m -CONFIG_PARIDE_EPAT=m -CONFIG_PARIDE_EPATC8=y -CONFIG_PARIDE_EPIA=m -CONFIG_PARIDE_FRIQ=m -CONFIG_PARIDE_FRPW=m -CONFIG_PARIDE_KBIC=m -CONFIG_PARIDE_KTTI=m -CONFIG_PARIDE_ON20=m -CONFIG_PARIDE_ON26=m -CONFIG_BLK_CPQ_DA=m -CONFIG_BLK_CPQ_CISS_DA=m -CONFIG_CISS_SCSI_TAPE=y -CONFIG_BLK_DEV_DAC960=m -CONFIG_BLK_DEV_UMEM=m -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m -# CONFIG_DRBD_FAULT_INJECTION is not set -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_OSD=m -CONFIG_BLK_DEV_SX8=m -# CONFIG_BLK_DEV_UB is not set -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_BLK_DEV_XIP=y -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -CONFIG_CDROM_PKTCDVD_WCACHE=y -CONFIG_ATA_OVER_ETH=m -# CONFIG_BLK_DEV_HD is not set -# CONFIG_BLK_DEV_RBD is not set -CONFIG_MISC_DEVICES=y -CONFIG_AD525X_DPOT=m -# CONFIG_AD525X_DPOT_I2C is not set -# CONFIG_AD525X_DPOT_SPI is not set -CONFIG_IBM_ASM=m -CONFIG_PHANTOM=m -CONFIG_SGI_IOC4=m -CONFIG_TIFM_CORE=m -CONFIG_TIFM_7XX1=m -CONFIG_ICS932S401=m -CONFIG_ENCLOSURE_SERVICES=m -CONFIG_CS5535_MFGPT=m -CONFIG_CS5535_MFGPT_DEFAULT_IRQ=7 -CONFIG_CS5535_CLOCK_EVENT_SRC=m -CONFIG_HP_ILO=m -CONFIG_APDS9802ALS=m -# CONFIG_ISL29003 is not set -CONFIG_ISL29020=m -CONFIG_SENSORS_TSL2550=m -CONFIG_SENSORS_BH1780=m -CONFIG_SENSORS_BH1770=m -CONFIG_SENSORS_APDS990X=m -CONFIG_HMC6352=m -CONFIG_DS1682=m -CONFIG_TI_DAC7512=m -CONFIG_BMP085=m -CONFIG_PCH_PHUB=m -CONFIG_C2PORT=m -CONFIG_C2PORT_DURAMAR_2150=m - -# -# EEPROM support -# -CONFIG_EEPROM_AT24=m -CONFIG_EEPROM_AT25=m -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y -CONFIG_IWMC3200TOP=m -# CONFIG_IWMC3200TOP_DEBUG is not set -# CONFIG_IWMC3200TOP_DEBUGFS is not set - -# -# Texas Instruments shared transport line discipline -# -CONFIG_TI_ST=m -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=m -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=m -CONFIG_SCSI_DMA=y -CONFIG_SCSI_TGT=m -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=m -# CONFIG_BLK_DEV_SR_VENDOR is not set -CONFIG_CHR_DEV_SG=m -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_MULTI_LUN=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -# CONFIG_SCSI_SCAN_ASYNC is not set -CONFIG_SCSI_WAIT_SCAN=m - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_FC_TGT_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_SRP_TGT_ATTRS=y -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -CONFIG_SCSI_CXGB3_ISCSI=m -CONFIG_SCSI_CXGB4_ISCSI=m -CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_BE2ISCSI=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_HPSA=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_3W_SAS=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -CONFIG_AIC7XXX_DEBUG_MASK=0 -CONFIG_AIC7XXX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC7XXX_OLD=m -CONFIG_SCSI_AIC79XX=m -CONFIG_AIC79XX_CMDS_PER_DEVICE=32 -CONFIG_AIC79XX_RESET_DELAY_MS=5000 -# CONFIG_AIC79XX_DEBUG_ENABLE is not set -CONFIG_AIC79XX_DEBUG_MASK=0 -CONFIG_AIC79XX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC94XX=m -# CONFIG_AIC94XX_DEBUG is not set -CONFIG_SCSI_MVSAS=m -# CONFIG_SCSI_MVSAS_DEBUG is not set -CONFIG_SCSI_DPT_I2O=m -CONFIG_SCSI_ADVANSYS=m -CONFIG_SCSI_ARCMSR=m -CONFIG_SCSI_ARCMSR_AER=y -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=m -CONFIG_MEGARAID_MAILBOX=m -CONFIG_MEGARAID_LEGACY=m -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS_LOGGING is not set -CONFIG_SCSI_HPTIOP=m -CONFIG_SCSI_BUSLOGIC=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -CONFIG_FCOE_FNIC=m -CONFIG_SCSI_DMX3191D=m -CONFIG_SCSI_EATA=m -CONFIG_SCSI_EATA_TAGGED_QUEUE=y -CONFIG_SCSI_EATA_LINKED_COMMANDS=y -CONFIG_SCSI_EATA_MAX_TAGS=16 -CONFIG_SCSI_FUTURE_DOMAIN=m -CONFIG_SCSI_GDTH=m -CONFIG_SCSI_IPS=m -CONFIG_SCSI_INITIO=m -CONFIG_SCSI_INIA100=m -CONFIG_SCSI_PPA=m -CONFIG_SCSI_IMM=m -# CONFIG_SCSI_IZIP_EPP16 is not set -# CONFIG_SCSI_IZIP_SLOW_CTR is not set -CONFIG_SCSI_STEX=m -CONFIG_SCSI_SYM53C8XX_2=m -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 -CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 -CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 -CONFIG_SCSI_SYM53C8XX_MMIO=y -CONFIG_SCSI_IPR=m -CONFIG_SCSI_IPR_TRACE=y -CONFIG_SCSI_IPR_DUMP=y -CONFIG_SCSI_QLOGIC_1280=m -CONFIG_SCSI_QLA_FC=m -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -CONFIG_SCSI_DC395x=m -CONFIG_SCSI_DC390T=m -CONFIG_SCSI_DEBUG=m -CONFIG_SCSI_PMCRAID=m -CONFIG_SCSI_PM8001=m -CONFIG_SCSI_SRP=m -CONFIG_SCSI_BFA_FC=m -CONFIG_SCSI_LOWLEVEL_PCMCIA=y -CONFIG_PCMCIA_AHA152X=m -CONFIG_PCMCIA_FDOMAIN=m -CONFIG_PCMCIA_QLOGIC=m -CONFIG_PCMCIA_SYM53C500=m -CONFIG_SCSI_DH=m -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_ATA=m -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=m -CONFIG_SATA_AHCI_PLATFORM=m -CONFIG_SATA_INIC162X=m -CONFIG_SATA_ACARD_AHCI=m -CONFIG_SATA_SIL24=m -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=m -CONFIG_SATA_QSTOR=m -CONFIG_SATA_SX4=m -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=m -CONFIG_SATA_MV=m -CONFIG_SATA_NV=m -CONFIG_SATA_PROMISE=m -CONFIG_SATA_SIL=m -CONFIG_SATA_SIS=m -CONFIG_SATA_SVW=m -CONFIG_SATA_ULI=m -CONFIG_SATA_VIA=m -CONFIG_SATA_VITESSE=m - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=m -CONFIG_PATA_AMD=m -CONFIG_PATA_ARTOP=m -CONFIG_PATA_ATIIXP=m -CONFIG_PATA_ATP867X=m -CONFIG_PATA_CMD64X=m -CONFIG_PATA_CS5520=m -CONFIG_PATA_CS5530=m -CONFIG_PATA_CS5536=m -CONFIG_PATA_CYPRESS=m -CONFIG_PATA_EFAR=m -CONFIG_PATA_HPT366=m -CONFIG_PATA_HPT37X=m -CONFIG_PATA_HPT3X2N=m -CONFIG_PATA_HPT3X3=m -# CONFIG_PATA_HPT3X3_DMA is not set -CONFIG_PATA_IT8213=m -CONFIG_PATA_IT821X=m -CONFIG_PATA_JMICRON=m -CONFIG_PATA_MARVELL=m -CONFIG_PATA_NETCELL=m -CONFIG_PATA_NINJA32=m -CONFIG_PATA_NS87415=m -CONFIG_PATA_OLDPIIX=m -CONFIG_PATA_OPTIDMA=m -CONFIG_PATA_PDC2027X=m -CONFIG_PATA_PDC_OLD=m -CONFIG_PATA_RADISYS=m -CONFIG_PATA_RDC=m -CONFIG_PATA_SC1200=m -CONFIG_PATA_SCH=m -CONFIG_PATA_SERVERWORKS=m -CONFIG_PATA_SIL680=m -CONFIG_PATA_SIS=m -CONFIG_PATA_TOSHIBA=m -CONFIG_PATA_TRIFLEX=m -CONFIG_PATA_VIA=m -CONFIG_PATA_WINBOND=m - -# -# PIO-only SFF controllers -# -CONFIG_PATA_CMD640_PCI=m -CONFIG_PATA_MPIIX=m -CONFIG_PATA_NS87410=m -CONFIG_PATA_OPTI=m -CONFIG_PATA_PCMCIA=m -CONFIG_PATA_RZ1000=m - -# -# Generic fallback / legacy drivers -# -CONFIG_PATA_ACPI=m -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MULTICORE_RAID456 is not set -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_DEBUG is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -CONFIG_DM_RAID45=m -CONFIG_DM_UEVENT=y -# CONFIG_TARGET_CORE is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -CONFIG_FUSION_FC=m -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -CONFIG_FUSION_CTL=m -CONFIG_FUSION_LAN=m -# CONFIG_FUSION_LOGGING is not set - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=m -CONFIG_FIREWIRE_OHCI=m -CONFIG_FIREWIRE_OHCI_DEBUG=y -CONFIG_FIREWIRE_SBP2=m -CONFIG_FIREWIRE_NET=m -# CONFIG_FIREWIRE_NOSY is not set -CONFIG_I2O=m -CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y -CONFIG_I2O_EXT_ADAPTEC=y -CONFIG_I2O_EXT_ADAPTEC_DMA64=y -CONFIG_I2O_CONFIG=m -CONFIG_I2O_CONFIG_OLD_IOCTL=y -CONFIG_I2O_BUS=m -CONFIG_I2O_BLOCK=m -CONFIG_I2O_SCSI=m -CONFIG_I2O_PROC=m -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_IFB=m -CONFIG_DUMMY=m -CONFIG_BONDING=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_EQUALIZER=m -CONFIG_TUN=m -CONFIG_VETH=m -# CONFIG_NET_SB1000 is not set -CONFIG_ARCNET=m -CONFIG_ARCNET_1201=m -CONFIG_ARCNET_1051=m -CONFIG_ARCNET_RAW=m -CONFIG_ARCNET_CAP=m -CONFIG_ARCNET_COM90xx=m -CONFIG_ARCNET_COM90xxIO=m -CONFIG_ARCNET_RIM_I=m -# CONFIG_ARCNET_COM20020 is not set -CONFIG_MII=y -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_BROADCOM_PHY=m -CONFIG_BCM63XX_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_NATIONAL_PHY=m -CONFIG_STE10XP=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MICREL_PHY=m -CONFIG_FIXED_PHY=y -CONFIG_MDIO_BITBANG=m -CONFIG_MDIO_GPIO=m -CONFIG_NET_ETHERNET=y -CONFIG_HAPPYMEAL=m -CONFIG_SUNGEM=m -CONFIG_CASSINI=m -CONFIG_NET_VENDOR_3COM=y -CONFIG_VORTEX=m -CONFIG_TYPHOON=m -CONFIG_ENC28J60=m -# CONFIG_ENC28J60_WRITEVERIFY is not set -CONFIG_ETHOC=m -CONFIG_DNET=m -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_DE2104X_DSL=0 -CONFIG_TULIP=m -# CONFIG_TULIP_MWI is not set -# CONFIG_TULIP_MMIO is not set -CONFIG_TULIP_NAPI=y -CONFIG_TULIP_NAPI_HW_MITIGATION=y -CONFIG_DE4X5=m -CONFIG_WINBOND_840=m -CONFIG_DM9102=m -CONFIG_ULI526X=m -CONFIG_PCMCIA_XIRCOM=m -CONFIG_HP100=m -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set -# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set -# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set -CONFIG_NET_PCI=y -CONFIG_PCNET32=m -CONFIG_AMD8111_ETH=m -CONFIG_ADAPTEC_STARFIRE=m -CONFIG_KSZ884X_PCI=m -CONFIG_B44=m -CONFIG_B44_PCI_AUTOSELECT=y -CONFIG_B44_PCICORE_AUTOSELECT=y -CONFIG_B44_PCI=y -CONFIG_FORCEDETH=m -CONFIG_E100=m -CONFIG_FEALNX=m -CONFIG_NATSEMI=m -CONFIG_NE2K_PCI=m -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R6040=m -CONFIG_SIS900=m -CONFIG_EPIC100=m -CONFIG_SMSC9420=m -CONFIG_SUNDANCE=m -# CONFIG_SUNDANCE_MMIO is not set -CONFIG_TLAN=m -CONFIG_KS8842=m -CONFIG_KS8851=m -CONFIG_KS8851_MLL=m -CONFIG_VIA_RHINE=m -CONFIG_VIA_RHINE_MMIO=y -CONFIG_SC92031=m -CONFIG_NET_POCKET=y -CONFIG_ATP=m -CONFIG_DE600=m -CONFIG_DE620=m -CONFIG_ATL2=m -CONFIG_NETDEV_1000=y -CONFIG_ACENIC=m -# CONFIG_ACENIC_OMIT_TIGON_I is not set -CONFIG_DL2K=m -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IP1000=m -CONFIG_IGB=m -CONFIG_IGBVF=m -CONFIG_NS83820=m -CONFIG_HAMACHI=m -CONFIG_YELLOWFIN=m -CONFIG_R8169=m -CONFIG_R8169_VLAN=y -CONFIG_SIS190=m -CONFIG_SKGE=m -# CONFIG_SKGE_DEBUG is not set -CONFIG_SKY2=m -# CONFIG_SKY2_DEBUG is not set -CONFIG_VIA_VELOCITY=m -CONFIG_TIGON3=m -CONFIG_BNX2=m -CONFIG_CNIC=m -CONFIG_QLA3XXX=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_JME=m -CONFIG_STMMAC_ETH=m -# CONFIG_STMMAC_DA is not set -# CONFIG_STMMAC_DUAL_MAC is not set -CONFIG_PCH_GBE=m -CONFIG_NETDEV_10000=y -CONFIG_MDIO=m -CONFIG_CHELSIO_T1=m -CONFIG_CHELSIO_T1_1G=y -CONFIG_CHELSIO_T3_DEPENDS=y -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4_DEPENDS=y -CONFIG_CHELSIO_T4=m -CONFIG_CHELSIO_T4VF_DEPENDS=y -CONFIG_CHELSIO_T4VF=m -CONFIG_ENIC=m -CONFIG_IXGBE=m -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_IXGB=m -CONFIG_S2IO=m -CONFIG_VXGE=m -# CONFIG_VXGE_DEBUG_TRACE_ALL is not set -CONFIG_MYRI10GE=m -CONFIG_NETXEN_NIC=m -CONFIG_NIU=m -CONFIG_MLX4_EN=m -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -CONFIG_TEHUTI=m -CONFIG_BNX2X=m -CONFIG_QLCNIC=m -CONFIG_QLGE=m -CONFIG_BNA=m -CONFIG_SFC=m -CONFIG_SFC_MTD=y -CONFIG_BE2NET=m -CONFIG_TR=m -CONFIG_IBMOL=m -CONFIG_3C359=m -CONFIG_TMS380TR=m -CONFIG_TMSPCI=m -CONFIG_ABYSS=m -CONFIG_WLAN=y -CONFIG_PCMCIA_RAYCS=m -CONFIG_LIBERTAS_THINFIRM=m -# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set -CONFIG_LIBERTAS_THINFIRM_USB=m -CONFIG_AIRO=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_PCMCIA_ATMEL=m -CONFIG_AT76C50X_USB=m -CONFIG_AIRO_CS=m -CONFIG_PCMCIA_WL3501=m -CONFIG_PRISM54=m -CONFIG_USB_ZD1201=m -CONFIG_USB_NET_RNDIS_WLAN=m -CONFIG_RTL8180=m -CONFIG_RTL8187=m -CONFIG_RTL8187_LEDS=y -CONFIG_ADM8211=m -CONFIG_MAC80211_HWSIM=m -CONFIG_MWL8K=m -CONFIG_ATH_COMMON=m -# CONFIG_ATH_DEBUG is not set -CONFIG_ATH5K=m -# CONFIG_ATH5K_DEBUG is not set -CONFIG_ATH5K_PCI=y -CONFIG_ATH9K_HW=m -CONFIG_ATH9K_COMMON=m -CONFIG_ATH9K=m -# CONFIG_ATH9K_DEBUGFS is not set -CONFIG_ATH9K_RATE_CONTROL=y -CONFIG_ATH9K_HTC=m -# CONFIG_ATH9K_HTC_DEBUGFS is not set -CONFIG_AR9170_USB=m -CONFIG_AR9170_LEDS=y -CONFIG_CARL9170=m -CONFIG_CARL9170_LEDS=y -# CONFIG_CARL9170_DEBUGFS is not set -CONFIG_CARL9170_WPC=y -CONFIG_B43=m -CONFIG_B43_PCI_AUTOSELECT=y -CONFIG_B43_PCICORE_AUTOSELECT=y -CONFIG_B43_PCMCIA=y -CONFIG_B43_SDIO=y -CONFIG_B43_PIO=y -CONFIG_B43_PHY_N=y -CONFIG_B43_PHY_LP=y -CONFIG_B43_LEDS=y -CONFIG_B43_HWRNG=y -# CONFIG_B43_DEBUG is not set -CONFIG_B43LEGACY=m -CONFIG_B43LEGACY_PCI_AUTOSELECT=y -CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y -CONFIG_B43LEGACY_LEDS=y -CONFIG_B43LEGACY_HWRNG=y -# CONFIG_B43LEGACY_DEBUG is not set -CONFIG_B43LEGACY_DMA=y -CONFIG_B43LEGACY_PIO=y -CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y -# CONFIG_B43LEGACY_DMA_MODE is not set -# CONFIG_B43LEGACY_PIO_MODE is not set -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_HOSTAP_CS=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y -CONFIG_IPW2100_DEBUG=y -CONFIG_IPW2200=m -CONFIG_IPW2200_MONITOR=y -CONFIG_IPW2200_RADIOTAP=y -CONFIG_IPW2200_PROMISCUOUS=y -CONFIG_IPW2200_QOS=y -CONFIG_IPW2200_DEBUG=y -CONFIG_LIBIPW=m -CONFIG_LIBIPW_DEBUG=y -CONFIG_IWLWIFI=m - -# -# Debugging Options -# -CONFIG_IWLWIFI_DEBUG=y -CONFIG_IWLWIFI_DEBUGFS=y -# CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE is not set -# CONFIG_IWLWIFI_DEVICE_TRACING is not set -CONFIG_IWLAGN=m -CONFIG_IWL4965=y -CONFIG_IWL5000=y -CONFIG_IWL3945=m -CONFIG_IWM=m -# CONFIG_IWM_DEBUG is not set -# CONFIG_IWM_TRACING is not set -CONFIG_LIBERTAS=m -CONFIG_LIBERTAS_USB=m -CONFIG_LIBERTAS_CS=m -CONFIG_LIBERTAS_SDIO=m -CONFIG_LIBERTAS_SPI=m -# CONFIG_LIBERTAS_DEBUG is not set -CONFIG_LIBERTAS_MESH=y -CONFIG_HERMES=m -# CONFIG_HERMES_PRISM is not set -CONFIG_HERMES_CACHE_FW_ON_INIT=y -CONFIG_PLX_HERMES=m -CONFIG_TMD_HERMES=m -CONFIG_NORTEL_HERMES=m -CONFIG_PCMCIA_HERMES=m -CONFIG_PCMCIA_SPECTRUM=m -CONFIG_ORINOCO_USB=m -CONFIG_P54_COMMON=m -CONFIG_P54_USB=m -CONFIG_P54_PCI=m -CONFIG_P54_SPI=m -# CONFIG_P54_SPI_DEFAULT_EEPROM is not set -CONFIG_P54_LEDS=y -CONFIG_RT2X00=m -CONFIG_RT2400PCI=m -CONFIG_RT2500PCI=m -CONFIG_RT61PCI=m -# CONFIG_RT2800PCI is not set -CONFIG_RT2500USB=m -CONFIG_RT73USB=m -CONFIG_RT2800USB=m -CONFIG_RT2800USB_RT33XX=y -# CONFIG_RT2800USB_RT35XX is not set -# CONFIG_RT2800USB_UNKNOWN is not set -CONFIG_RT2800_LIB=m -CONFIG_RT2X00_LIB_PCI=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_HT=y -CONFIG_RT2X00_LIB_FIRMWARE=y -CONFIG_RT2X00_LIB_CRYPTO=y -CONFIG_RT2X00_LIB_LEDS=y -# CONFIG_RT2X00_LIB_DEBUGFS is not set -# CONFIG_RT2X00_DEBUG is not set -CONFIG_RTL8192CE=m -CONFIG_RTLWIFI=m -CONFIG_WL1251=m -CONFIG_WL1251_SPI=m -CONFIG_WL1251_SDIO=m -CONFIG_WL12XX_MENU=m -CONFIG_WL12XX=m -# CONFIG_WL12XX_HT is not set -# CONFIG_WL12XX_SPI is not set -# CONFIG_WL12XX_SDIO is not set -# CONFIG_WL12XX_SDIO_TEST is not set -CONFIG_WL12XX_PLATFORM_DATA=y -CONFIG_ZD1211RW=m -# CONFIG_ZD1211RW_DEBUG is not set - -# -# WiMAX Wireless Broadband devices -# -CONFIG_WIMAX_I2400M=m -CONFIG_WIMAX_I2400M_USB=m -CONFIG_WIMAX_I2400M_SDIO=m -CONFIG_WIMAX_IWMC3200_SDIO=y -CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8 - -# -# USB Network Adapters -# -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_CDC_PHONET=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_NET_PCMCIA=y -CONFIG_PCMCIA_3C589=m -CONFIG_PCMCIA_3C574=m -CONFIG_PCMCIA_FMVJ18X=m -CONFIG_PCMCIA_PCNET=m -CONFIG_PCMCIA_NMCLAN=m -CONFIG_PCMCIA_SMC91C92=m -CONFIG_PCMCIA_XIRC2PS=m -CONFIG_PCMCIA_AXNET=m -CONFIG_PCMCIA_IBMTR=m -CONFIG_WAN=y -CONFIG_LANMEDIA=m -CONFIG_HDLC=m -CONFIG_HDLC_RAW=m -CONFIG_HDLC_RAW_ETH=m -CONFIG_HDLC_CISCO=m -CONFIG_HDLC_FR=m -CONFIG_HDLC_PPP=m -CONFIG_HDLC_X25=m -CONFIG_PCI200SYN=m -CONFIG_WANXL=m -CONFIG_PC300TOO=m -CONFIG_FARSYNC=m -CONFIG_DSCC4=m -CONFIG_DSCC4_PCISYNC=y -CONFIG_DSCC4_PCI_RST=y -CONFIG_DLCI=m -CONFIG_DLCI_MAX=8 -# CONFIG_WAN_ROUTER_DRIVERS is not set -CONFIG_LAPBETHER=m -CONFIG_X25_ASY=m -CONFIG_SBNI=m -CONFIG_SBNI_MULTILINE=y -CONFIG_ATM_DRIVERS=y -CONFIG_ATM_DUMMY=m -CONFIG_ATM_TCP=m -CONFIG_ATM_LANAI=m -CONFIG_ATM_ENI=m -# CONFIG_ATM_ENI_DEBUG is not set -CONFIG_ATM_ENI_TUNE_BURST=y -CONFIG_ATM_ENI_BURST_TX_16W=y -CONFIG_ATM_ENI_BURST_TX_8W=y -CONFIG_ATM_ENI_BURST_TX_4W=y -CONFIG_ATM_ENI_BURST_TX_2W=y -CONFIG_ATM_ENI_BURST_RX_16W=y -CONFIG_ATM_ENI_BURST_RX_8W=y -CONFIG_ATM_ENI_BURST_RX_4W=y -CONFIG_ATM_ENI_BURST_RX_2W=y -CONFIG_ATM_FIRESTREAM=m -CONFIG_ATM_ZATM=m -# CONFIG_ATM_ZATM_DEBUG is not set -CONFIG_ATM_NICSTAR=m -# CONFIG_ATM_NICSTAR_USE_SUNI is not set -# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set -CONFIG_ATM_IDT77252=m -# CONFIG_ATM_IDT77252_DEBUG is not set -# CONFIG_ATM_IDT77252_RCV_ALL is not set -CONFIG_ATM_IDT77252_USE_SUNI=y -CONFIG_ATM_AMBASSADOR=m -# CONFIG_ATM_AMBASSADOR_DEBUG is not set -CONFIG_ATM_HORIZON=m -# CONFIG_ATM_HORIZON_DEBUG is not set -CONFIG_ATM_IA=m -# CONFIG_ATM_IA_DEBUG is not set -CONFIG_ATM_FORE200E=m -CONFIG_ATM_FORE200E_USE_TASKLET=y -CONFIG_ATM_FORE200E_TX_RETRY=16 -CONFIG_ATM_FORE200E_DEBUG=0 -CONFIG_ATM_HE=m -CONFIG_ATM_HE_USE_SUNI=y -CONFIG_ATM_SOLOS=m -CONFIG_IEEE802154_DRIVERS=m -CONFIG_IEEE802154_FAKEHARD=m - -# -# CAIF transport drivers -# -CONFIG_FDDI=m -CONFIG_DEFXX=m -CONFIG_DEFXX_MMIO=y -CONFIG_SKFP=m -CONFIG_HIPPI=y -CONFIG_ROADRUNNER=m -CONFIG_ROADRUNNER_LARGE_RINGS=y -CONFIG_PLIP=m -CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPPOATM=m -CONFIG_SLIP=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLHC=m -CONFIG_SLIP_SMART=y -CONFIG_SLIP_MODE_SLIP6=y -CONFIG_NET_FC=y -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL=y -CONFIG_NETPOLL_TRAP=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_ISDN=y -CONFIG_ISDN_I4L=m -CONFIG_ISDN_PPP=y -CONFIG_ISDN_PPP_VJ=y -CONFIG_ISDN_MPP=y -CONFIG_IPPP_FILTER=y -CONFIG_ISDN_PPP_BSDCOMP=m -CONFIG_ISDN_AUDIO=y -CONFIG_ISDN_TTY_FAX=y -CONFIG_ISDN_X25=y - -# -# ISDN feature submodules -# -CONFIG_ISDN_DIVERSION=m - -# -# ISDN4Linux hardware drivers -# - -# -# Passive cards -# -CONFIG_ISDN_DRV_HISAX=m - -# -# D-channel protocol features -# -CONFIG_HISAX_EURO=y -CONFIG_DE_AOC=y -# CONFIG_HISAX_NO_SENDCOMPLETE is not set -# CONFIG_HISAX_NO_LLC is not set -# CONFIG_HISAX_NO_KEYPAD is not set -CONFIG_HISAX_1TR6=y -CONFIG_HISAX_NI1=y -CONFIG_HISAX_MAX_CARDS=8 - -# -# HiSax supported cards -# -CONFIG_HISAX_16_3=y -CONFIG_HISAX_TELESPCI=y -CONFIG_HISAX_S0BOX=y -CONFIG_HISAX_FRITZPCI=y -CONFIG_HISAX_AVM_A1_PCMCIA=y -CONFIG_HISAX_ELSA=y -CONFIG_HISAX_DIEHLDIVA=y -CONFIG_HISAX_SEDLBAUER=y -CONFIG_HISAX_NETJET=y -CONFIG_HISAX_NETJET_U=y -CONFIG_HISAX_NICCY=y -CONFIG_HISAX_BKM_A4T=y -CONFIG_HISAX_SCT_QUADRO=y -CONFIG_HISAX_GAZEL=y -CONFIG_HISAX_HFC_PCI=y -CONFIG_HISAX_W6692=y -CONFIG_HISAX_HFC_SX=y -CONFIG_HISAX_ENTERNOW_PCI=y -# CONFIG_HISAX_DEBUG is not set - -# -# HiSax PCMCIA card service modules -# -CONFIG_HISAX_SEDLBAUER_CS=m -CONFIG_HISAX_ELSA_CS=m -CONFIG_HISAX_AVM_A1_CS=m -CONFIG_HISAX_TELES_CS=m - -# -# HiSax sub driver modules -# -CONFIG_HISAX_ST5481=m -CONFIG_HISAX_HFCUSB=m -CONFIG_HISAX_HFC4S8S=m -CONFIG_HISAX_FRITZ_PCIPNP=m - -# -# Active cards -# -CONFIG_ISDN_CAPI=m -CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y -CONFIG_CAPI_TRACE=y -CONFIG_ISDN_CAPI_MIDDLEWARE=y -CONFIG_ISDN_CAPI_CAPI20=m -CONFIG_ISDN_CAPI_CAPIFS_BOOL=y -CONFIG_ISDN_CAPI_CAPIFS=m -CONFIG_ISDN_CAPI_CAPIDRV=m - -# -# CAPI hardware drivers -# -CONFIG_CAPI_AVM=y -CONFIG_ISDN_DRV_AVMB1_B1PCI=m -CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y -CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m -CONFIG_ISDN_DRV_AVMB1_AVM_CS=m -CONFIG_ISDN_DRV_AVMB1_T1PCI=m -CONFIG_ISDN_DRV_AVMB1_C4=m -CONFIG_CAPI_EICON=y -CONFIG_ISDN_DIVAS=m -CONFIG_ISDN_DIVAS_BRIPCI=y -CONFIG_ISDN_DIVAS_PRIPCI=y -CONFIG_ISDN_DIVAS_DIVACAPI=m -CONFIG_ISDN_DIVAS_USERIDI=m -CONFIG_ISDN_DIVAS_MAINT=m -CONFIG_ISDN_DRV_GIGASET=m -# CONFIG_GIGASET_CAPI is not set -CONFIG_GIGASET_I4L=y -# CONFIG_GIGASET_DUMMYLL is not set -CONFIG_GIGASET_BASE=m -CONFIG_GIGASET_M105=m -CONFIG_GIGASET_M101=m -# CONFIG_GIGASET_DEBUG is not set -CONFIG_HYSDN=m -CONFIG_HYSDN_CAPI=y -CONFIG_MISDN=m -CONFIG_MISDN_DSP=m -CONFIG_MISDN_L1OIP=m - -# -# mISDN hardware drivers -# -CONFIG_MISDN_HFCPCI=m -CONFIG_MISDN_HFCMULTI=m -CONFIG_MISDN_HFCUSB=m -CONFIG_MISDN_AVMFRITZ=m -CONFIG_MISDN_SPEEDFAX=m -CONFIG_MISDN_INFINEON=m -CONFIG_MISDN_W6692=m -CONFIG_MISDN_NETJET=m -CONFIG_MISDN_IPAC=m -CONFIG_MISDN_ISAR=m -CONFIG_ISDN_HDLC=m -CONFIG_PHONE=m -CONFIG_PHONE_IXJ=m -CONFIG_PHONE_IXJ_PCMCIA=m - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -CONFIG_INPUT_JOYDEV=m -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -CONFIG_KEYBOARD_ADP5588=m -CONFIG_KEYBOARD_ATKBD=y -CONFIG_KEYBOARD_QT2160=m -# CONFIG_KEYBOARD_LKKBD is not set -CONFIG_KEYBOARD_GPIO=m -CONFIG_KEYBOARD_GPIO_POLLED=m -CONFIG_KEYBOARD_TCA6416=m -CONFIG_KEYBOARD_MATRIX=m -CONFIG_KEYBOARD_LM8323=m -CONFIG_KEYBOARD_MAX7359=m -CONFIG_KEYBOARD_MCS=m -CONFIG_KEYBOARD_NEWTON=m -CONFIG_KEYBOARD_OPENCORES=m -# CONFIG_KEYBOARD_STOWAWAY is not set -CONFIG_KEYBOARD_SUNKBD=m -CONFIG_KEYBOARD_XTKBD=m -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=y -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_SYNAPTICS_LED=y -CONFIG_MOUSE_PS2_LIFEBOOK=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -CONFIG_MOUSE_PS2_ELANTECH=y -CONFIG_MOUSE_PS2_SENTELIC=y -CONFIG_MOUSE_PS2_TOUCHKIT=y -CONFIG_MOUSE_SERIAL=m -CONFIG_MOUSE_APPLETOUCH=m -CONFIG_MOUSE_BCM5974=m -CONFIG_MOUSE_VSXXXAA=m -CONFIG_MOUSE_GPIO=m -CONFIG_MOUSE_SYNAPTICS_I2C=m -CONFIG_INPUT_JOYSTICK=y -CONFIG_JOYSTICK_ANALOG=m -CONFIG_JOYSTICK_A3D=m -CONFIG_JOYSTICK_ADI=m -CONFIG_JOYSTICK_COBRA=m -CONFIG_JOYSTICK_GF2K=m -CONFIG_JOYSTICK_GRIP=m -CONFIG_JOYSTICK_GRIP_MP=m -CONFIG_JOYSTICK_GUILLEMOT=m -CONFIG_JOYSTICK_INTERACT=m -CONFIG_JOYSTICK_SIDEWINDER=m -CONFIG_JOYSTICK_TMDC=m -CONFIG_JOYSTICK_IFORCE=m -CONFIG_JOYSTICK_IFORCE_USB=y -CONFIG_JOYSTICK_IFORCE_232=y -CONFIG_JOYSTICK_WARRIOR=m -CONFIG_JOYSTICK_MAGELLAN=m -CONFIG_JOYSTICK_SPACEORB=m -CONFIG_JOYSTICK_SPACEBALL=m -CONFIG_JOYSTICK_STINGER=m -CONFIG_JOYSTICK_TWIDJOY=m -CONFIG_JOYSTICK_ZHENHUA=m -CONFIG_JOYSTICK_DB9=m -CONFIG_JOYSTICK_GAMECON=m -CONFIG_JOYSTICK_TURBOGRAFX=m -CONFIG_JOYSTICK_AS5011=m -CONFIG_JOYSTICK_JOYDUMP=m -CONFIG_JOYSTICK_XPAD=m -CONFIG_JOYSTICK_XPAD_FF=y -CONFIG_JOYSTICK_XPAD_LEDS=y -CONFIG_JOYSTICK_WALKERA0701=m -CONFIG_INPUT_TABLET=y -CONFIG_TABLET_USB_ACECAD=m -CONFIG_TABLET_USB_AIPTEK=m -CONFIG_TABLET_USB_GTCO=m -CONFIG_TABLET_USB_HANWANG=m -CONFIG_TABLET_USB_KBTAB=m -CONFIG_TABLET_USB_WACOM=m -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ADS7846=m -CONFIG_TOUCHSCREEN_AD7877=m -CONFIG_TOUCHSCREEN_AD7879=m -CONFIG_TOUCHSCREEN_AD7879_I2C=m -CONFIG_TOUCHSCREEN_AD7879_SPI=m -CONFIG_TOUCHSCREEN_BU21013=m -CONFIG_TOUCHSCREEN_CY8CTMG110=m -CONFIG_TOUCHSCREEN_DYNAPRO=m -CONFIG_TOUCHSCREEN_HAMPSHIRE=m -CONFIG_TOUCHSCREEN_EETI=m -CONFIG_TOUCHSCREEN_FUJITSU=m -CONFIG_TOUCHSCREEN_GUNZE=m -CONFIG_TOUCHSCREEN_ELO=m -CONFIG_TOUCHSCREEN_ELOUSB=m -CONFIG_TOUCHSCREEN_WACOM_W8001=m -CONFIG_TOUCHSCREEN_MCS5000=m -CONFIG_TOUCHSCREEN_MTOUCH=m -CONFIG_TOUCHSCREEN_INEXIO=m -CONFIG_TOUCHSCREEN_MK712=m -CONFIG_TOUCHSCREEN_PENMOUNT=m -CONFIG_TOUCHSCREEN_QT602240=m -CONFIG_TOUCHSCREEN_TOUCHRIGHT=m -CONFIG_TOUCHSCREEN_TOUCHWIN=m -CONFIG_TOUCHSCREEN_UCB1400=m -CONFIG_TOUCHSCREEN_WM97XX=m -CONFIG_TOUCHSCREEN_WM9705=y -CONFIG_TOUCHSCREEN_WM9712=y -CONFIG_TOUCHSCREEN_WM9713=y -CONFIG_TOUCHSCREEN_USB_COMPOSITE=m -CONFIG_TOUCHSCREEN_MC13783=m -CONFIG_TOUCHSCREEN_USB_EGALAX=y -CONFIG_TOUCHSCREEN_USB_PANJIT=y -CONFIG_TOUCHSCREEN_USB_3M=y -CONFIG_TOUCHSCREEN_USB_ITM=y -CONFIG_TOUCHSCREEN_USB_ETURBO=y -CONFIG_TOUCHSCREEN_USB_GUNZE=y -CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y -CONFIG_TOUCHSCREEN_USB_IRTOUCH=y -CONFIG_TOUCHSCREEN_USB_IDEALTEK=y -CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y -CONFIG_TOUCHSCREEN_USB_GOTOP=y -CONFIG_TOUCHSCREEN_USB_JASTEC=y -CONFIG_TOUCHSCREEN_USB_E2I=y -CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y -CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y -CONFIG_TOUCHSCREEN_USB_NEXIO=y -CONFIG_TOUCHSCREEN_TOUCHIT213=m -CONFIG_TOUCHSCREEN_TSC2007=m -CONFIG_TOUCHSCREEN_PCAP=m -CONFIG_TOUCHSCREEN_ST1232=m -CONFIG_TOUCHSCREEN_TPS6507X=m -CONFIG_INPUT_MISC=y -CONFIG_INPUT_AD714X=m -CONFIG_INPUT_AD714X_I2C=m -CONFIG_INPUT_AD714X_SPI=m -CONFIG_INPUT_PCSPKR=m -CONFIG_INPUT_APANEL=m -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_UINPUT=m -CONFIG_INPUT_PCF8574=m -CONFIG_INPUT_GPIO_ROTARY_ENCODER=m -CONFIG_INPUT_PCAP=m -CONFIG_INPUT_ADXL34X=m -CONFIG_INPUT_ADXL34X_I2C=m -CONFIG_INPUT_ADXL34X_SPI=m -CONFIG_INPUT_CMA3000=m -CONFIG_INPUT_CMA3000_I2C=m - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=m -CONFIG_SERIO_CT82C710=m -CONFIG_SERIO_PARKBD=m -CONFIG_SERIO_PCIPS2=m -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -CONFIG_SERIO_PS2MULT=m -CONFIG_GAMEPORT=m -CONFIG_GAMEPORT_NS558=m -CONFIG_GAMEPORT_L4=m -CONFIG_GAMEPORT_EMU10K1=m -CONFIG_GAMEPORT_FM801=m - -# -# Character devices -# -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_DEVKMEM=y -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_COMPUTONE=m -CONFIG_ROCKETPORT=m -CONFIG_CYCLADES=m -# CONFIG_CYZ_INTR is not set -CONFIG_DIGIEPCA=m -CONFIG_MOXA_INTELLIO=m -CONFIG_MOXA_SMARTIO=m -CONFIG_ISI=m -CONFIG_SYNCLINK=m -CONFIG_SYNCLINKMP=m -CONFIG_SYNCLINK_GT=m -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -CONFIG_RISCOM8=m -CONFIG_SPECIALIX=m -CONFIG_STALDRV=y -CONFIG_STALLION=m -CONFIG_ISTALLION=m -CONFIG_NOZOMI=m - -# -# Serial drivers -# -CONFIG_SERIAL_8250=m -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_SERIAL_8250_PCI=m -CONFIG_SERIAL_8250_PNP=m -CONFIG_SERIAL_8250_CS=m -CONFIG_SERIAL_8250_NR_UARTS=16 -CONFIG_SERIAL_8250_RUNTIME_UARTS=8 -# CONFIG_SERIAL_8250_EXTENDED is not set - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX3107 is not set -# CONFIG_SERIAL_MRST_MAX3110 is not set -# CONFIG_SERIAL_MFD_HSU is not set -CONFIG_SERIAL_UARTLITE=m -CONFIG_SERIAL_CORE=m -CONFIG_SERIAL_JSM=m -CONFIG_SERIAL_TIMBERDALE=m -CONFIG_SERIAL_ALTERA_JTAGUART=m -CONFIG_SERIAL_ALTERA_UART=m -CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4 -CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200 -CONFIG_SERIAL_IFX6X60=m -CONFIG_SERIAL_PCH_UART=m -CONFIG_UNIX98_PTYS=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_PRINTER=m -# CONFIG_LP_CONSOLE is not set -CONFIG_PPDEV=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_PANIC_EVENT=y -# CONFIG_IPMI_PANIC_STRING is not set -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -CONFIG_HW_RANDOM_VIA=m -CONFIG_NVRAM=y -CONFIG_R3964=m -CONFIG_APPLICOM=m - -# -# PCMCIA character devices -# -CONFIG_SYNCLINK_CS=m -CONFIG_CARDMAN_4000=m -CONFIG_CARDMAN_4040=m -CONFIG_IPWIRELESS=m -CONFIG_MWAVE=m -CONFIG_RAW_DRIVER=m -CONFIG_MAX_RAW_DEVS=4096 -CONFIG_HANGCHECK_TIMER=m -CONFIG_TCG_TPM=m -CONFIG_TCG_TIS=m -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -CONFIG_TCG_XEN=m -CONFIG_TELCLOCK=m -CONFIG_DEVPORT=y -CONFIG_RAMOOPS=y -CONFIG_CRASHER=m -CONFIG_I2C=m -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_MUX=m - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_MUX_GPIO=m -CONFIG_I2C_MUX_PCA9541=m -CONFIG_I2C_MUX_PCA954x=m -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -CONFIG_I2C_ALI1535=m -CONFIG_I2C_ALI1563=m -CONFIG_I2C_ALI15X3=m -CONFIG_I2C_AMD756=m -CONFIG_I2C_AMD756_S4882=m -CONFIG_I2C_AMD8111=m -CONFIG_I2C_I801=m -CONFIG_I2C_ISCH=m -CONFIG_I2C_PIIX4=m -CONFIG_I2C_NFORCE2=m -CONFIG_I2C_NFORCE2_S4985=m -CONFIG_I2C_SIS5595=m -CONFIG_I2C_SIS630=m -CONFIG_I2C_SIS96X=m -CONFIG_I2C_VIA=m -CONFIG_I2C_VIAPRO=m - -# -# ACPI drivers -# -CONFIG_I2C_SCMI=m - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -CONFIG_I2C_GPIO=m -CONFIG_I2C_INTEL_MID=m -CONFIG_I2C_OCORES=m -CONFIG_I2C_PCA_PLATFORM=m -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set -CONFIG_I2C_EG20T=m - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_PARPORT=m -CONFIG_I2C_PARPORT_LIGHT=m -CONFIG_I2C_TAOS_EVM=m -CONFIG_I2C_TINY_USB=m - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_STUB=m -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y - -# -# SPI Master Controller Drivers -# -CONFIG_SPI_BITBANG=m -CONFIG_SPI_BUTTERFLY=m -CONFIG_SPI_GPIO=m -CONFIG_SPI_LM70_LLP=m -# CONFIG_SPI_PXA2XX_PCI is not set -CONFIG_SPI_TOPCLIFF_PCH=m -CONFIG_SPI_XILINX=m -CONFIG_SPI_DESIGNWARE=y -CONFIG_SPI_DW_PCI=m -# CONFIG_SPI_DW_MID_DMA is not set - -# -# SPI Protocol Masters -# -CONFIG_SPI_SPIDEV=m -CONFIG_SPI_TLE62X0=m - -# -# PPS support -# -CONFIG_PPS=m -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_PARPORT=m - -# -# PPS generators support -# -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_GPIOLIB=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_MAX730X=m - -# -# Memory mapped GPIO expanders: -# -CONFIG_GPIO_BASIC_MMIO=m -CONFIG_GPIO_IT8761E=m -CONFIG_GPIO_SCH=m -CONFIG_GPIO_VX855=m - -# -# I2C GPIO expanders: -# -CONFIG_GPIO_MAX7300=m -CONFIG_GPIO_MAX732X=m -CONFIG_GPIO_PCA953X=m -CONFIG_GPIO_PCF857X=m -CONFIG_GPIO_ADP5588=m - -# -# PCI GPIO expanders: -# -CONFIG_GPIO_CS5535=m -CONFIG_GPIO_LANGWELL=y -CONFIG_GPIO_PCH=m -CONFIG_GPIO_ML_IOH=m -# CONFIG_GPIO_TIMBERDALE is not set -CONFIG_GPIO_RDC321X=m - -# -# SPI GPIO expanders: -# -CONFIG_GPIO_MAX7301=m -CONFIG_GPIO_MCP23S08=m -CONFIG_GPIO_MC33880=m -CONFIG_GPIO_74X164=m - -# -# AC97 GPIO expanders: -# -CONFIG_GPIO_UCB1400=y - -# -# MODULbus GPIO expanders: -# -CONFIG_GPIO_JANZ_TTL=m -CONFIG_W1=m -CONFIG_W1_CON=y - -# -# 1-wire Bus Masters -# -CONFIG_W1_MASTER_MATROX=m -CONFIG_W1_MASTER_DS2490=m -CONFIG_W1_MASTER_DS2482=m -CONFIG_W1_MASTER_GPIO=m - -# -# 1-wire Slaves -# -CONFIG_W1_SLAVE_THERM=m -CONFIG_W1_SLAVE_SMEM=m -CONFIG_W1_SLAVE_DS2423=m -CONFIG_W1_SLAVE_DS2431=m -CONFIG_W1_SLAVE_DS2433=m -CONFIG_W1_SLAVE_DS2433_CRC=y -CONFIG_W1_SLAVE_DS2760=m -CONFIG_W1_SLAVE_BQ27000=m -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_PDA_POWER=m -# CONFIG_TEST_POWER is not set -CONFIG_BATTERY_DS2760=m -CONFIG_BATTERY_DS2782=m -CONFIG_BATTERY_BQ20Z75=m -CONFIG_BATTERY_BQ27x00=m -CONFIG_BATTERY_MAX17040=m -CONFIG_BATTERY_MAX17042=m -CONFIG_CHARGER_GPIO=m -CONFIG_HWMON=m -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_ABITUGURU=m -CONFIG_SENSORS_ABITUGURU3=m -CONFIG_SENSORS_AD7414=m -CONFIG_SENSORS_AD7418=m -CONFIG_SENSORS_ADCXX=m -CONFIG_SENSORS_ADM1021=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1029=m -CONFIG_SENSORS_ADM1031=m -CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ADT7462=m -CONFIG_SENSORS_ADT7470=m -CONFIG_SENSORS_ADT7475=m -CONFIG_SENSORS_ASC7621=m -CONFIG_SENSORS_K8TEMP=m -CONFIG_SENSORS_K10TEMP=m -CONFIG_SENSORS_ASB100=m -CONFIG_SENSORS_ATXP1=m -CONFIG_SENSORS_DS620=m -CONFIG_SENSORS_DS1621=m -CONFIG_SENSORS_I5K_AMB=m -CONFIG_SENSORS_F71805F=m -CONFIG_SENSORS_F71882FG=m -CONFIG_SENSORS_F75375S=m -CONFIG_SENSORS_FSCHMD=m -CONFIG_SENSORS_G760A=m -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_GL520SM=m -CONFIG_SENSORS_GPIO_FAN=m -CONFIG_SENSORS_CORETEMP=m -CONFIG_SENSORS_PKGTEMP=m -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -CONFIG_SENSORS_IT87=m -CONFIG_SENSORS_JC42=m -CONFIG_SENSORS_LM63=m -CONFIG_SENSORS_LM70=m -CONFIG_SENSORS_LM73=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_LM93=m -CONFIG_SENSORS_LTC4215=m -CONFIG_SENSORS_LTC4245=m -CONFIG_SENSORS_LTC4261=m -CONFIG_SENSORS_LM95241=m -CONFIG_SENSORS_MAX1111=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_MAX6650=m -CONFIG_SENSORS_PC87360=m -CONFIG_SENSORS_PC87427=m -CONFIG_SENSORS_PCF8591=m -CONFIG_SENSORS_SHT15=m -CONFIG_SENSORS_SHT21=m -CONFIG_SENSORS_SIS5595=m -CONFIG_SENSORS_SMM665=m -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -CONFIG_SENSORS_EMC2103=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_ADS7828=m -CONFIG_SENSORS_ADS7871=m -CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_THMC50=m -CONFIG_SENSORS_TMP102=m -CONFIG_SENSORS_TMP401=m -CONFIG_SENSORS_TMP421=m -CONFIG_SENSORS_VIA_CPUTEMP=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_VT1211=m -CONFIG_SENSORS_VT8231=m -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83791D=m -CONFIG_SENSORS_W83792D=m -CONFIG_SENSORS_W83793=m -CONFIG_SENSORS_W83795=m -CONFIG_SENSORS_W83795_FANCTRL=y -CONFIG_SENSORS_W83L785TS=m -CONFIG_SENSORS_W83L786NG=m -CONFIG_SENSORS_W83627HF=m -CONFIG_SENSORS_W83627EHF=m -CONFIG_SENSORS_LIS3_I2C=m -CONFIG_SENSORS_APPLESMC=m -CONFIG_SENSORS_MC13783_ADC=m - -# -# ACPI drivers -# -CONFIG_SENSORS_ATK0110=m -CONFIG_SENSORS_LIS3LV02D=m -CONFIG_THERMAL=m -CONFIG_THERMAL_HWMON=y -CONFIG_WATCHDOG=y -# CONFIG_WATCHDOG_NOWAYOUT is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -CONFIG_ACQUIRE_WDT=m -CONFIG_ADVANTECH_WDT=m -CONFIG_ALIM1535_WDT=m -CONFIG_ALIM7101_WDT=m -CONFIG_F71808E_WDT=m -CONFIG_SP5100_TCO=m -CONFIG_GEODE_WDT=m -CONFIG_SC520_WDT=m -CONFIG_SBC_FITPC2_WATCHDOG=m -CONFIG_EUROTECH_WDT=m -CONFIG_IB700_WDT=m -CONFIG_IBMASR=m -CONFIG_WAFER_WDT=m -CONFIG_I6300ESB_WDT=m -CONFIG_ITCO_WDT=m -CONFIG_ITCO_VENDOR_SUPPORT=y -CONFIG_IT8712F_WDT=m -CONFIG_IT87_WDT=m -CONFIG_HP_WATCHDOG=m -# CONFIG_HPWDT_NMI_DECODING is not set -CONFIG_SC1200_WDT=m -CONFIG_PC87413_WDT=m -CONFIG_NV_TCO=m -CONFIG_60XX_WDT=m -CONFIG_SBC8360_WDT=m -CONFIG_CPU5_WDT=m -CONFIG_SMSC_SCH311X_WDT=m -CONFIG_SMSC37B787_WDT=m -CONFIG_W83627HF_WDT=m -CONFIG_W83697HF_WDT=m -CONFIG_W83697UG_WDT=m -CONFIG_W83877F_WDT=m -CONFIG_W83977F_WDT=m -CONFIG_MACHZ_WDT=m -CONFIG_SBC_EPX_C3_WATCHDOG=m -CONFIG_XEN_WDT=m - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -CONFIG_SSB=m -CONFIG_SSB_SPROM=y -CONFIG_SSB_BLOCKIO=y -CONFIG_SSB_PCIHOST_POSSIBLE=y -CONFIG_SSB_PCIHOST=y -CONFIG_SSB_B43_PCI_BRIDGE=y -CONFIG_SSB_PCMCIAHOST_POSSIBLE=y -CONFIG_SSB_PCMCIAHOST=y -CONFIG_SSB_SDIOHOST_POSSIBLE=y -CONFIG_SSB_SDIOHOST=y -# CONFIG_SSB_DEBUG is not set -CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y -CONFIG_SSB_DRIVER_PCICORE=y -CONFIG_MFD_SUPPORT=y -CONFIG_MFD_CORE=m -CONFIG_MFD_SM501=m -CONFIG_MFD_SM501_GPIO=y -CONFIG_HTC_PASIC3=m -CONFIG_UCB1400_CORE=m -CONFIG_TPS65010=m -CONFIG_TPS6507X=m -# CONFIG_MFD_TMIO is not set -CONFIG_MFD_WM8400=m -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_PCF50633 is not set -CONFIG_MFD_MC13783=m -CONFIG_MFD_MC13XXX=m -# CONFIG_ABX500_CORE is not set -CONFIG_EZX_PCAP=y -CONFIG_MFD_CS5535=m -CONFIG_MFD_TIMBERDALE=m -CONFIG_LPC_SCH=m -CONFIG_MFD_RDC321X=m -CONFIG_MFD_JANZ_CMODIO=m -CONFIG_MFD_VX855=m -CONFIG_MFD_WL1273_CORE=m -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -CONFIG_REGULATOR_DUMMY=y -# CONFIG_REGULATOR_FIXED_VOLTAGE is not set -CONFIG_REGULATOR_VIRTUAL_CONSUMER=m -CONFIG_REGULATOR_USERSPACE_CONSUMER=m -CONFIG_REGULATOR_BQ24022=m -CONFIG_REGULATOR_MAX1586=m -CONFIG_REGULATOR_MAX8649=m -CONFIG_REGULATOR_MAX8660=m -CONFIG_REGULATOR_MAX8952=m -CONFIG_REGULATOR_WM8400=m -CONFIG_REGULATOR_LP3971=m -CONFIG_REGULATOR_LP3972=m -CONFIG_REGULATOR_PCAP=m -CONFIG_REGULATOR_MC13XXX_CORE=m -CONFIG_REGULATOR_MC13783=m -CONFIG_REGULATOR_MC13892=m -CONFIG_REGULATOR_TPS65023=m -CONFIG_REGULATOR_TPS6507X=m -CONFIG_REGULATOR_ISL6271A=m -CONFIG_REGULATOR_AD5398=m -CONFIG_REGULATOR_TPS6524X=m -CONFIG_MEDIA_SUPPORT=m - -# -# Multimedia core support -# -CONFIG_VIDEO_DEV=m -CONFIG_VIDEO_V4L2_COMMON=m -CONFIG_DVB_CORE=m -CONFIG_VIDEO_MEDIA=m - -# -# Multimedia drivers -# -CONFIG_VIDEO_SAA7146=m -CONFIG_VIDEO_SAA7146_VV=m -CONFIG_RC_CORE=m -CONFIG_LIRC=m -CONFIG_RC_MAP=m -CONFIG_IR_NEC_DECODER=m -CONFIG_IR_RC5_DECODER=m -CONFIG_IR_RC6_DECODER=m -CONFIG_IR_JVC_DECODER=m -CONFIG_IR_SONY_DECODER=m -CONFIG_IR_RC5_SZ_DECODER=m -CONFIG_IR_LIRC_CODEC=m -# CONFIG_IR_ENE is not set -CONFIG_IR_IMON=m -CONFIG_IR_MCEUSB=m -CONFIG_IR_NUVOTON=m -CONFIG_IR_STREAMZAP=m -CONFIG_IR_WINBOND_CIR=m -CONFIG_RC_LOOPBACK=m -CONFIG_MEDIA_ATTACH=y -CONFIG_MEDIA_TUNER=m -# CONFIG_MEDIA_TUNER_CUSTOMISE is not set -CONFIG_MEDIA_TUNER_SIMPLE=m -CONFIG_MEDIA_TUNER_TDA8290=m -CONFIG_MEDIA_TUNER_TDA827X=m -CONFIG_MEDIA_TUNER_TDA18271=m -CONFIG_MEDIA_TUNER_TDA9887=m -CONFIG_MEDIA_TUNER_TEA5761=m -CONFIG_MEDIA_TUNER_TEA5767=m -CONFIG_MEDIA_TUNER_MT20XX=m -CONFIG_MEDIA_TUNER_MT2060=m -CONFIG_MEDIA_TUNER_MT2266=m -CONFIG_MEDIA_TUNER_MT2131=m -CONFIG_MEDIA_TUNER_QT1010=m -CONFIG_MEDIA_TUNER_XC2028=m -CONFIG_MEDIA_TUNER_XC5000=m -CONFIG_MEDIA_TUNER_MXL5005S=m -CONFIG_MEDIA_TUNER_MXL5007T=m -CONFIG_MEDIA_TUNER_MC44S803=m -CONFIG_MEDIA_TUNER_MAX2165=m -CONFIG_MEDIA_TUNER_TDA18218=m -CONFIG_VIDEO_V4L2=m -CONFIG_VIDEOBUF_GEN=m -CONFIG_VIDEOBUF_DMA_SG=m -CONFIG_VIDEOBUF_VMALLOC=m -CONFIG_VIDEOBUF_DMA_CONTIG=m -CONFIG_VIDEOBUF_DVB=m -CONFIG_VIDEO_BTCX=m -CONFIG_VIDEO_TVEEPROM=m -CONFIG_VIDEO_TUNER=m -CONFIG_VIDEO_CAPTURE_DRIVERS=y -# CONFIG_VIDEO_ADV_DEBUG is not set -# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -CONFIG_VIDEO_HELPER_CHIPS_AUTO=y -CONFIG_VIDEO_IR_I2C=m - -# -# Audio decoders -# -CONFIG_VIDEO_TVAUDIO=m -CONFIG_VIDEO_TDA7432=m -CONFIG_VIDEO_TDA9840=m -CONFIG_VIDEO_TEA6415C=m -CONFIG_VIDEO_TEA6420=m -CONFIG_VIDEO_MSP3400=m -CONFIG_VIDEO_CS5345=m -CONFIG_VIDEO_CS53L32A=m -CONFIG_VIDEO_M52790=m -CONFIG_VIDEO_WM8775=m -CONFIG_VIDEO_WM8739=m -CONFIG_VIDEO_VP27SMPX=m - -# -# RDS decoders -# -CONFIG_VIDEO_SAA6588=m - -# -# Video decoders -# -CONFIG_VIDEO_ADV7180=m -CONFIG_VIDEO_BT819=m -CONFIG_VIDEO_BT856=m -CONFIG_VIDEO_BT866=m -CONFIG_VIDEO_KS0127=m -CONFIG_VIDEO_OV7670=m -CONFIG_VIDEO_MT9V011=m -CONFIG_VIDEO_SAA7110=m -CONFIG_VIDEO_SAA711X=m -CONFIG_VIDEO_SAA717X=m -CONFIG_VIDEO_TVP5150=m -CONFIG_VIDEO_VPX3220=m - -# -# Video and audio decoders -# -CONFIG_VIDEO_CX25840=m - -# -# MPEG video encoders -# -CONFIG_VIDEO_CX2341X=m - -# -# Video encoders -# -CONFIG_VIDEO_SAA7127=m -CONFIG_VIDEO_SAA7185=m -CONFIG_VIDEO_ADV7170=m -CONFIG_VIDEO_ADV7175=m - -# -# Video improvement chips -# -CONFIG_VIDEO_UPD64031A=m -CONFIG_VIDEO_UPD64083=m -CONFIG_VIDEO_VIVI=m -CONFIG_VIDEO_BT848=m -CONFIG_VIDEO_BT848_DVB=y -CONFIG_VIDEO_BWQCAM=m -CONFIG_VIDEO_CQCAM=m -CONFIG_VIDEO_W9966=m -CONFIG_VIDEO_CPIA2=m -CONFIG_VIDEO_ZORAN=m -CONFIG_VIDEO_ZORAN_DC30=m -CONFIG_VIDEO_ZORAN_ZR36060=m -CONFIG_VIDEO_ZORAN_BUZ=m -CONFIG_VIDEO_ZORAN_DC10=m -CONFIG_VIDEO_ZORAN_LML33=m -CONFIG_VIDEO_ZORAN_LML33R10=m -CONFIG_VIDEO_ZORAN_AVS6EYES=m -CONFIG_VIDEO_MEYE=m -CONFIG_VIDEO_SAA7134=m -CONFIG_VIDEO_SAA7134_ALSA=m -CONFIG_VIDEO_SAA7134_RC=y -CONFIG_VIDEO_SAA7134_DVB=m -CONFIG_VIDEO_MXB=m -CONFIG_VIDEO_HEXIUM_ORION=m -CONFIG_VIDEO_HEXIUM_GEMINI=m -CONFIG_VIDEO_TIMBERDALE=m -CONFIG_VIDEO_CX88=m -CONFIG_VIDEO_CX88_ALSA=m -CONFIG_VIDEO_CX88_BLACKBIRD=m -CONFIG_VIDEO_CX88_DVB=m -CONFIG_VIDEO_CX88_MPEG=m -CONFIG_VIDEO_CX88_VP3054=m -CONFIG_VIDEO_CX23885=m -CONFIG_VIDEO_AU0828=m -CONFIG_VIDEO_IVTV=m -CONFIG_VIDEO_FB_IVTV=m -CONFIG_VIDEO_CX18=m -CONFIG_VIDEO_CX18_ALSA=m -CONFIG_VIDEO_SAA7164=m -CONFIG_VIDEO_CAFE_CCIC=m -CONFIG_VIDEO_SR030PC30=m -CONFIG_VIDEO_VIA_CAMERA=m -CONFIG_SOC_CAMERA=m -CONFIG_SOC_CAMERA_IMX074=m -CONFIG_SOC_CAMERA_MT9M001=m -CONFIG_SOC_CAMERA_MT9M111=m -CONFIG_SOC_CAMERA_MT9T031=m -CONFIG_SOC_CAMERA_MT9T112=m -CONFIG_SOC_CAMERA_MT9V022=m -CONFIG_SOC_CAMERA_RJ54N1=m -CONFIG_SOC_CAMERA_TW9910=m -CONFIG_SOC_CAMERA_PLATFORM=m -CONFIG_SOC_CAMERA_OV2640=m -CONFIG_SOC_CAMERA_OV6650=m -CONFIG_SOC_CAMERA_OV772X=m -CONFIG_SOC_CAMERA_OV9640=m -CONFIG_V4L_USB_DRIVERS=y -CONFIG_USB_VIDEO_CLASS=m -CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y -CONFIG_USB_GSPCA=m -CONFIG_USB_M5602=m -CONFIG_USB_STV06XX=m -CONFIG_USB_GL860=m -CONFIG_USB_GSPCA_BENQ=m -CONFIG_USB_GSPCA_CONEX=m -CONFIG_USB_GSPCA_CPIA1=m -CONFIG_USB_GSPCA_ETOMS=m -CONFIG_USB_GSPCA_FINEPIX=m -CONFIG_USB_GSPCA_JEILINJ=m -CONFIG_USB_GSPCA_KONICA=m -CONFIG_USB_GSPCA_MARS=m -CONFIG_USB_GSPCA_MR97310A=m -CONFIG_USB_GSPCA_OV519=m -CONFIG_USB_GSPCA_OV534=m -CONFIG_USB_GSPCA_OV534_9=m -CONFIG_USB_GSPCA_PAC207=m -CONFIG_USB_GSPCA_PAC7302=m -CONFIG_USB_GSPCA_PAC7311=m -CONFIG_USB_GSPCA_SN9C2028=m -CONFIG_USB_GSPCA_SN9C20X=m -CONFIG_USB_GSPCA_SONIXB=m -CONFIG_USB_GSPCA_SONIXJ=m -CONFIG_USB_GSPCA_SPCA500=m -CONFIG_USB_GSPCA_SPCA501=m -CONFIG_USB_GSPCA_SPCA505=m -CONFIG_USB_GSPCA_SPCA506=m -CONFIG_USB_GSPCA_SPCA508=m -CONFIG_USB_GSPCA_SPCA561=m -CONFIG_USB_GSPCA_SPCA1528=m -CONFIG_USB_GSPCA_SQ905=m -CONFIG_USB_GSPCA_SQ905C=m -CONFIG_USB_GSPCA_SQ930X=m -CONFIG_USB_GSPCA_STK014=m -CONFIG_USB_GSPCA_STV0680=m -CONFIG_USB_GSPCA_SUNPLUS=m -CONFIG_USB_GSPCA_T613=m -CONFIG_USB_GSPCA_TV8532=m -CONFIG_USB_GSPCA_VC032X=m -CONFIG_USB_GSPCA_XIRLINK_CIT=m -CONFIG_USB_GSPCA_ZC3XX=m -CONFIG_VIDEO_PVRUSB2=m -CONFIG_VIDEO_PVRUSB2_SYSFS=y -CONFIG_VIDEO_PVRUSB2_DVB=y -# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set -CONFIG_VIDEO_HDPVR=m -CONFIG_VIDEO_EM28XX=m -CONFIG_VIDEO_EM28XX_ALSA=m -CONFIG_VIDEO_EM28XX_DVB=m -CONFIG_VIDEO_TLG2300=m -CONFIG_VIDEO_CX231XX=m -CONFIG_VIDEO_CX231XX_RC=y -CONFIG_VIDEO_CX231XX_ALSA=m -CONFIG_VIDEO_CX231XX_DVB=m -CONFIG_VIDEO_USBVISION=m -CONFIG_USB_ET61X251=m -CONFIG_USB_SN9C102=m -CONFIG_USB_PWC=m -# CONFIG_USB_PWC_DEBUG is not set -CONFIG_USB_PWC_INPUT_EVDEV=y -CONFIG_USB_ZR364XX=m -CONFIG_USB_STKWEBCAM=m -CONFIG_USB_S2255=m -# CONFIG_V4L_MEM2MEM_DRIVERS is not set -CONFIG_RADIO_ADAPTERS=y -CONFIG_RADIO_MAXIRADIO=m -CONFIG_RADIO_MAESTRO=m -CONFIG_I2C_SI4713=m -CONFIG_RADIO_SI4713=m -CONFIG_USB_DSBR=m -CONFIG_RADIO_SI470X=y -CONFIG_USB_SI470X=m -CONFIG_I2C_SI470X=m -CONFIG_USB_MR800=m -CONFIG_RADIO_TEA5764=m -CONFIG_RADIO_SAA7706H=m -CONFIG_RADIO_TEF6862=m -CONFIG_RADIO_TIMBERDALE=m -CONFIG_RADIO_WL1273=m -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_DVB_DYNAMIC_MINORS=y -CONFIG_DVB_CAPTURE_DRIVERS=y - -# -# Supported SAA7146 based PCI Adapters -# -CONFIG_TTPCI_EEPROM=m -CONFIG_DVB_AV7110=m -CONFIG_DVB_AV7110_OSD=y -CONFIG_DVB_BUDGET_CORE=m -CONFIG_DVB_BUDGET=m -CONFIG_DVB_BUDGET_CI=m -CONFIG_DVB_BUDGET_AV=m -CONFIG_DVB_BUDGET_PATCH=m - -# -# Supported USB Adapters -# -CONFIG_DVB_USB=m -# CONFIG_DVB_USB_DEBUG is not set -CONFIG_DVB_USB_A800=m -CONFIG_DVB_USB_DIBUSB_MB=m -# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set -CONFIG_DVB_USB_DIBUSB_MC=m -CONFIG_DVB_USB_DIB0700=m -CONFIG_DVB_USB_UMT_010=m -CONFIG_DVB_USB_CXUSB=m -CONFIG_DVB_USB_M920X=m -CONFIG_DVB_USB_GL861=m -CONFIG_DVB_USB_AU6610=m -CONFIG_DVB_USB_DIGITV=m -CONFIG_DVB_USB_VP7045=m -CONFIG_DVB_USB_VP702X=m -CONFIG_DVB_USB_GP8PSK=m -CONFIG_DVB_USB_NOVA_T_USB2=m -CONFIG_DVB_USB_TTUSB2=m -CONFIG_DVB_USB_DTT200U=m -CONFIG_DVB_USB_OPERA1=m -CONFIG_DVB_USB_AF9005=m -CONFIG_DVB_USB_AF9005_REMOTE=m -CONFIG_DVB_USB_DW2102=m -CONFIG_DVB_USB_CINERGY_T2=m -CONFIG_DVB_USB_ANYSEE=m -CONFIG_DVB_USB_DTV5100=m -CONFIG_DVB_USB_AF9015=m -CONFIG_DVB_USB_CE6230=m -CONFIG_DVB_USB_FRIIO=m -CONFIG_DVB_USB_EC168=m -CONFIG_DVB_USB_AZ6027=m -CONFIG_DVB_USB_LME2510=m -CONFIG_DVB_TTUSB_BUDGET=m -CONFIG_DVB_TTUSB_DEC=m -CONFIG_SMS_SIANO_MDTV=m - -# -# Siano module components -# -CONFIG_SMS_USB_DRV=m -CONFIG_SMS_SDIO_DRV=m - -# -# Supported FlexCopII (B2C2) Adapters -# -CONFIG_DVB_B2C2_FLEXCOP=m -CONFIG_DVB_B2C2_FLEXCOP_PCI=m -CONFIG_DVB_B2C2_FLEXCOP_USB=m -# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set - -# -# Supported BT878 Adapters -# -CONFIG_DVB_BT8XX=m - -# -# Supported Pluto2 Adapters -# -CONFIG_DVB_PLUTO2=m - -# -# Supported SDMC DM1105 Adapters -# -CONFIG_DVB_DM1105=m -CONFIG_DVB_FIREDTV=m -CONFIG_DVB_FIREDTV_FIREWIRE=y -# CONFIG_DVB_FIREDTV_IEEE1394 is not set -CONFIG_DVB_FIREDTV_INPUT=y - -# -# Supported Earthsoft PT1 Adapters -# -CONFIG_DVB_PT1=m - -# -# Supported Mantis Adapters -# -CONFIG_MANTIS_CORE=m -CONFIG_DVB_MANTIS=m -CONFIG_DVB_HOPPER=m - -# -# Supported nGene Adapters -# -CONFIG_DVB_NGENE=m - -# -# Supported DVB Frontends -# -# CONFIG_DVB_FE_CUSTOMISE is not set - -# -# Multistandard (satellite) frontends -# -CONFIG_DVB_STB0899=m -CONFIG_DVB_STB6100=m -CONFIG_DVB_STV090x=m -CONFIG_DVB_STV6110x=m - -# -# DVB-S (satellite) frontends -# -CONFIG_DVB_CX24110=m -CONFIG_DVB_CX24123=m -CONFIG_DVB_MT312=m -CONFIG_DVB_ZL10036=m -CONFIG_DVB_ZL10039=m -CONFIG_DVB_S5H1420=m -CONFIG_DVB_STV0288=m -CONFIG_DVB_STB6000=m -CONFIG_DVB_STV0299=m -CONFIG_DVB_STV6110=m -CONFIG_DVB_STV0900=m -CONFIG_DVB_TDA8083=m -CONFIG_DVB_TDA10086=m -CONFIG_DVB_TDA8261=m -CONFIG_DVB_VES1X93=m -CONFIG_DVB_TUNER_ITD1000=m -CONFIG_DVB_TUNER_CX24113=m -CONFIG_DVB_TDA826X=m -CONFIG_DVB_TUA6100=m -CONFIG_DVB_CX24116=m -CONFIG_DVB_SI21XX=m -CONFIG_DVB_DS3000=m -CONFIG_DVB_MB86A16=m - -# -# DVB-T (terrestrial) frontends -# -CONFIG_DVB_SP8870=m -CONFIG_DVB_SP887X=m -CONFIG_DVB_CX22700=m -CONFIG_DVB_CX22702=m -CONFIG_DVB_L64781=m -CONFIG_DVB_TDA1004X=m -CONFIG_DVB_NXT6000=m -CONFIG_DVB_MT352=m -CONFIG_DVB_ZL10353=m -CONFIG_DVB_DIB3000MB=m -CONFIG_DVB_DIB3000MC=m -CONFIG_DVB_DIB7000M=m -CONFIG_DVB_DIB7000P=m -CONFIG_DVB_TDA10048=m -CONFIG_DVB_AF9013=m -CONFIG_DVB_EC100=m - -# -# DVB-C (cable) frontends -# -CONFIG_DVB_VES1820=m -CONFIG_DVB_TDA10021=m -CONFIG_DVB_TDA10023=m -CONFIG_DVB_STV0297=m - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# -CONFIG_DVB_NXT200X=m -CONFIG_DVB_OR51211=m -CONFIG_DVB_OR51132=m -CONFIG_DVB_BCM3510=m -CONFIG_DVB_LGDT330X=m -CONFIG_DVB_LGDT3305=m -CONFIG_DVB_S5H1409=m -CONFIG_DVB_AU8522=m -CONFIG_DVB_S5H1411=m - -# -# ISDB-T (terrestrial) frontends -# -CONFIG_DVB_S921=m -CONFIG_DVB_DIB8000=m -CONFIG_DVB_MB86A20S=m - -# -# Digital terrestrial only tuners/PLL -# -CONFIG_DVB_PLL=m -CONFIG_DVB_TUNER_DIB0070=m -CONFIG_DVB_TUNER_DIB0090=m - -# -# SEC control devices for DVB-S -# -CONFIG_DVB_LNBP21=m -CONFIG_DVB_ISL6405=m -CONFIG_DVB_ISL6421=m -CONFIG_DVB_ISL6423=m -CONFIG_DVB_LGS8GXX=m -CONFIG_DVB_ATBM8830=m -CONFIG_DVB_TDA665x=m -CONFIG_DVB_IX2505V=m - -# -# Tools to develop new frontends -# -# CONFIG_DVB_DUMMY_FE is not set - -# -# Graphics support -# -CONFIG_AGP=m -CONFIG_AGP_AMD64=m -CONFIG_AGP_INTEL=m -CONFIG_AGP_SIS=m -CONFIG_AGP_VIA=m -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_VGA_SWITCHEROO=y -CONFIG_DRM=m -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_TTM=m -CONFIG_DRM_TDFX=m -CONFIG_DRM_R128=m -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_KMS=y -CONFIG_DRM_I810=m -CONFIG_DRM_I830=m -CONFIG_DRM_I915=m -CONFIG_DRM_I915_KMS=y -CONFIG_DRM_MGA=m -CONFIG_DRM_SIS=m -CONFIG_DRM_VIA=m -CONFIG_DRM_SAVAGE=m -CONFIG_STUB_POULSBO=m -CONFIG_VGASTATE=m -CONFIG_VIDEO_OUTPUT_CONTROL=m -CONFIG_FB=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_DDC=m -CONFIG_FB_BOOT_VESA_SUPPORT=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -# CONFIG_FB_WMT_GE_ROPS is not set -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_HECUBA=m -CONFIG_FB_SVGALIB=m -# CONFIG_FB_MACMODES is not set -CONFIG_FB_BACKLIGHT=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -CONFIG_FB_CIRRUS=m -CONFIG_FB_PM2=m -CONFIG_FB_PM2_FIFO_DISCONNECT=y -CONFIG_FB_CYBER2000=m -CONFIG_FB_ARC=m -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -CONFIG_FB_VGA16=m -CONFIG_FB_UVESA=m -CONFIG_FB_VESA=y -CONFIG_FB_N411=m -CONFIG_FB_HGA=m -CONFIG_FB_S1D13XXX=m -CONFIG_FB_NVIDIA=m -CONFIG_FB_NVIDIA_I2C=y -# CONFIG_FB_NVIDIA_DEBUG is not set -CONFIG_FB_NVIDIA_BACKLIGHT=y -CONFIG_FB_RIVA=m -CONFIG_FB_RIVA_I2C=y -# CONFIG_FB_RIVA_DEBUG is not set -CONFIG_FB_RIVA_BACKLIGHT=y -CONFIG_FB_LE80578=m -CONFIG_FB_CARILLO_RANCH=m -CONFIG_FB_MATROX=m -CONFIG_FB_MATROX_MILLENIUM=y -CONFIG_FB_MATROX_MYSTIQUE=y -CONFIG_FB_MATROX_G=y -CONFIG_FB_MATROX_I2C=m -CONFIG_FB_MATROX_MAVEN=m -CONFIG_FB_RADEON=m -CONFIG_FB_RADEON_I2C=y -CONFIG_FB_RADEON_BACKLIGHT=y -# CONFIG_FB_RADEON_DEBUG is not set -CONFIG_FB_ATY128=m -CONFIG_FB_ATY128_BACKLIGHT=y -CONFIG_FB_ATY=m -CONFIG_FB_ATY_CT=y -CONFIG_FB_ATY_GENERIC_LCD=y -CONFIG_FB_ATY_GX=y -CONFIG_FB_ATY_BACKLIGHT=y -CONFIG_FB_S3=m -CONFIG_FB_SAVAGE=m -CONFIG_FB_SAVAGE_I2C=y -CONFIG_FB_SAVAGE_ACCEL=y -CONFIG_FB_SIS=m -CONFIG_FB_SIS_300=y -CONFIG_FB_SIS_315=y -CONFIG_FB_VIA=m -# CONFIG_FB_VIA_DIRECT_PROCFS is not set -CONFIG_FB_NEOMAGIC=m -CONFIG_FB_KYRO=m -CONFIG_FB_3DFX=m -CONFIG_FB_3DFX_ACCEL=y -CONFIG_FB_3DFX_I2C=y -CONFIG_FB_VOODOO1=m -CONFIG_FB_VT8623=m -CONFIG_FB_TRIDENT=m -CONFIG_FB_ARK=m -CONFIG_FB_PM3=m -CONFIG_FB_CARMINE=m -CONFIG_FB_CARMINE_DRAM_EVAL=y -# CONFIG_CARMINE_DRAM_CUSTOM is not set -CONFIG_FB_GEODE=y -CONFIG_FB_GEODE_LX=m -CONFIG_FB_GEODE_GX=m -CONFIG_FB_GEODE_GX1=m -CONFIG_FB_TMIO=m -CONFIG_FB_TMIO_ACCELL=y -CONFIG_FB_SM501=m -CONFIG_FB_UDL=m -CONFIG_FB_VIRTUAL=m -CONFIG_FB_METRONOME=m -CONFIG_FB_MB862XX=m -CONFIG_FB_MB862XX_PCI_GDC=y -CONFIG_FB_BROADSHEET=m -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_LCD_CLASS_DEVICE=m -CONFIG_LCD_L4F00242T03=m -CONFIG_LCD_LMS283GF05=m -CONFIG_LCD_LTV350QV=m -CONFIG_LCD_ILI9320=m -CONFIG_LCD_TDO24M=m -CONFIG_LCD_VGG2432A4=m -CONFIG_LCD_PLATFORM=m -CONFIG_LCD_S6E63M0=m -CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_GENERIC=m -CONFIG_BACKLIGHT_PROGEAR=m -CONFIG_BACKLIGHT_CARILLO_RANCH=m -CONFIG_BACKLIGHT_MBP_NVIDIA=m -CONFIG_BACKLIGHT_SAHARA=m -CONFIG_BACKLIGHT_ADP8860=m - -# -# Display device support -# -CONFIG_DISPLAY_SUPPORT=m - -# -# Display hardware drivers -# - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -# CONFIG_LOGO is not set - -# -# Bootsplash configuration -# -CONFIG_BOOTSPLASH=y -CONFIG_SOUND=m -CONFIG_SOUND_OSS_CORE=y -# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set -CONFIG_SND=m -CONFIG_SND_TIMER=m -CONFIG_SND_PCM=m -CONFIG_SND_HWDEP=m -CONFIG_SND_RAWMIDI=m -CONFIG_SND_JACK=y -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_OSSEMUL=y -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_PCM_OSS_PLUGINS=y -CONFIG_SND_SEQUENCER_OSS=y -CONFIG_SND_HRTIMER=m -CONFIG_SND_SEQ_HRTIMER_DEFAULT=y -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_VERBOSE_PROCFS=y -CONFIG_SND_VERBOSE_PRINTK=y -CONFIG_SND_DEBUG=y -# CONFIG_SND_DEBUG_VERBOSE is not set -CONFIG_SND_PCM_XRUN_DEBUG=y -CONFIG_SND_VMASTER=y -CONFIG_SND_DMA_SGBUF=y -CONFIG_SND_RAWMIDI_SEQ=m -CONFIG_SND_OPL3_LIB_SEQ=m -# CONFIG_SND_OPL4_LIB_SEQ is not set -# CONFIG_SND_SBAWE_SEQ is not set -CONFIG_SND_EMU10K1_SEQ=m -CONFIG_SND_MPU401_UART=m -CONFIG_SND_OPL3_LIB=m -CONFIG_SND_VX_LIB=m -CONFIG_SND_AC97_CODEC=m -CONFIG_SND_DRIVERS=y -# CONFIG_SND_PCSP is not set -CONFIG_SND_DUMMY=m -CONFIG_SND_ALOOP=m -CONFIG_SND_VIRMIDI=m -CONFIG_SND_MTPAV=m -CONFIG_SND_MTS64=m -CONFIG_SND_SERIAL_U16550=m -CONFIG_SND_MPU401=m -CONFIG_SND_PORTMAN2X4=m -CONFIG_SND_AC97_POWER_SAVE=y -CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 -CONFIG_SND_SB_COMMON=m -CONFIG_SND_SB16_DSP=m -CONFIG_SND_PCI=y -CONFIG_SND_AD1889=m -CONFIG_SND_ALS300=m -CONFIG_SND_ALS4000=m -CONFIG_SND_ALI5451=m -CONFIG_SND_ASIHPI=m -CONFIG_SND_ATIIXP=m -CONFIG_SND_ATIIXP_MODEM=m -CONFIG_SND_AU8810=m -CONFIG_SND_AU8820=m -CONFIG_SND_AU8830=m -CONFIG_SND_AW2=m -CONFIG_SND_AZT3328=m -CONFIG_SND_BT87X=m -# CONFIG_SND_BT87X_OVERCLOCK is not set -CONFIG_SND_CA0106=m -CONFIG_SND_CMIPCI=m -CONFIG_SND_OXYGEN_LIB=m -CONFIG_SND_OXYGEN=m -CONFIG_SND_CS4281=m -CONFIG_SND_CS46XX=m -CONFIG_SND_CS46XX_NEW_DSP=y -CONFIG_SND_CS5530=m -CONFIG_SND_CS5535AUDIO=m -CONFIG_SND_CTXFI=m -CONFIG_SND_DARLA20=m -CONFIG_SND_GINA20=m -CONFIG_SND_LAYLA20=m -CONFIG_SND_DARLA24=m -CONFIG_SND_GINA24=m -CONFIG_SND_LAYLA24=m -CONFIG_SND_MONA=m -CONFIG_SND_MIA=m -CONFIG_SND_ECHO3G=m -CONFIG_SND_INDIGO=m -CONFIG_SND_INDIGOIO=m -CONFIG_SND_INDIGODJ=m -CONFIG_SND_INDIGOIOX=m -CONFIG_SND_INDIGODJX=m -CONFIG_SND_EMU10K1=m -CONFIG_SND_EMU10K1X=m -CONFIG_SND_ENS1370=m -CONFIG_SND_ENS1371=m -CONFIG_SND_ES1938=m -CONFIG_SND_ES1968=m -CONFIG_SND_ES1968_INPUT=y -CONFIG_SND_FM801=m -CONFIG_SND_FM801_TEA575X_BOOL=y -CONFIG_SND_FM801_TEA575X=m -CONFIG_SND_HDA_INTEL=m -CONFIG_SND_HDA_HWDEP=y -CONFIG_SND_HDA_RECONFIG=y -CONFIG_SND_HDA_INPUT_BEEP=y -CONFIG_SND_HDA_INPUT_BEEP_MODE=2 -CONFIG_SND_HDA_INPUT_JACK=y -CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_CODEC_ANALOG=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_VIA=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CIRRUS=y -CONFIG_SND_HDA_CODEC_CONEXANT=y -CONFIG_SND_HDA_CODEC_CA0110=y -CONFIG_SND_HDA_CODEC_CMEDIA=y -CONFIG_SND_HDA_CODEC_SI3054=y -CONFIG_SND_HDA_GENERIC=y -CONFIG_SND_HDA_POWER_SAVE=y -CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 -CONFIG_SND_HDSP=m -CONFIG_SND_HDSPM=m -CONFIG_SND_ICE1712=m -CONFIG_SND_ICE1724=m -CONFIG_SND_INTEL8X0=m -CONFIG_SND_INTEL8X0M=m -CONFIG_SND_KORG1212=m -CONFIG_SND_LX6464ES=m -CONFIG_SND_MAESTRO3=m -CONFIG_SND_MAESTRO3_INPUT=y -CONFIG_SND_MIXART=m -CONFIG_SND_NM256=m -CONFIG_SND_PCXHR=m -CONFIG_SND_RIPTIDE=m -CONFIG_SND_RME32=m -CONFIG_SND_RME96=m -CONFIG_SND_RME9652=m -CONFIG_SND_SONICVIBES=m -CONFIG_SND_TRIDENT=m -CONFIG_SND_VIA82XX=m -CONFIG_SND_VIA82XX_MODEM=m -CONFIG_SND_VIRTUOSO=m -CONFIG_SND_VX222=m -CONFIG_SND_YMFPCI=m -CONFIG_SND_SPI=y -CONFIG_SND_USB=y -CONFIG_SND_USB_AUDIO=m -CONFIG_SND_USB_UA101=m -CONFIG_SND_USB_USX2Y=m -CONFIG_SND_USB_CAIAQ=m -CONFIG_SND_USB_CAIAQ_INPUT=y -CONFIG_SND_USB_US122L=m -CONFIG_SND_PCMCIA=y -CONFIG_SND_VXPOCKET=m -CONFIG_SND_PDAUDIOCF=m -# CONFIG_SND_SOC is not set -CONFIG_SOUND_PRIME=m -CONFIG_SOUND_OSS=m -CONFIG_SOUND_TRACEINIT=y -CONFIG_SOUND_DMAP=y -CONFIG_SOUND_VMIDI=m -CONFIG_SOUND_TRIX=m -CONFIG_SOUND_MSS=m -CONFIG_SOUND_MPU401=m -CONFIG_SOUND_PAS=m -CONFIG_SOUND_PSS=m -CONFIG_PSS_MIXER=y -# CONFIG_PSS_HAVE_BOOT is not set -# CONFIG_SOUND_SB is not set -CONFIG_SOUND_YM3812=m -CONFIG_SOUND_UART6850=m -CONFIG_SOUND_AEDSP16=m -CONFIG_SC6600=y -CONFIG_SC6600_JOY=y -CONFIG_SC6600_CDROM=4 -CONFIG_SC6600_CDROMBASE=0 -CONFIG_AC97_BUS=m -CONFIG_HID_SUPPORT=y -CONFIG_HID=m -CONFIG_HIDRAW=y - -# -# USB Input Devices -# -CONFIG_USB_HID=m -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y - -# -# Special HID drivers -# -CONFIG_HID_3M_PCT=m -CONFIG_HID_A4TECH=m -CONFIG_HID_ACRUX=m -CONFIG_HID_ACRUX_FF=m -CONFIG_HID_APPLE=m -CONFIG_HID_BELKIN=m -CONFIG_HID_CANDO=m -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_PRODIKEYS=m -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -CONFIG_DRAGONRISE_FF=y -CONFIG_HID_EMS_FF=m -CONFIG_HID_EGALAX=m -CONFIG_HID_ELECOM=m -CONFIG_HID_EZKEY=m -CONFIG_HID_KYE=m -CONFIG_HID_UCLOGIC=m -CONFIG_HID_WALTOP=m -CONFIG_HID_GYRATION=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LOGITECH=m -CONFIG_LOGITECH_FF=y -CONFIG_LOGIRUMBLEPAD2_FF=y -CONFIG_LOGIG940_FF=y -CONFIG_LOGIWII_FF=y -CONFIG_HID_MAGICMOUSE=m -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MOSART=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_MULTITOUCH=m -CONFIG_HID_NTRIG=m -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -CONFIG_PANTHERLORD_FF=y -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -CONFIG_HID_QUANTA=m -CONFIG_HID_ROCCAT=m -CONFIG_HID_ROCCAT_KONE=m -CONFIG_HID_ROCCAT_KONEPLUS=m -CONFIG_HID_ROCCAT_PYRA=m -CONFIG_HID_SAMSUNG=m -CONFIG_HID_SONY=m -CONFIG_HID_STANTUM=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_GREENASIA=m -CONFIG_GREENASIA_FF=y -CONFIG_HID_SMARTJOYPLUS=m -CONFIG_SMARTJOYPLUS_FF=y -CONFIG_HID_TOPSEED=m -CONFIG_HID_THRUSTMASTER=m -CONFIG_THRUSTMASTER_FF=y -CONFIG_HID_WACOM=m -CONFIG_HID_WACOM_POWER_SUPPLY=y -CONFIG_HID_ZEROPLUS=m -CONFIG_ZEROPLUS_FF=y -CONFIG_HID_ZYDACRON=m -CONFIG_USB_SUPPORT=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB_ARCH_HAS_OHCI=y -CONFIG_USB_ARCH_HAS_EHCI=y -CONFIG_USB=m -# CONFIG_USB_DEBUG is not set -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -CONFIG_USB_SUSPEND=y -# CONFIG_USB_OTG is not set -CONFIG_USB_MON=m -CONFIG_USB_WUSB=m -CONFIG_USB_WUSB_CBAF=m -# CONFIG_USB_WUSB_CBAF_DEBUG is not set - -# -# USB Host Controller Drivers -# -CONFIG_USB_C67X00_HCD=m -CONFIG_USB_XHCI_HCD=m -# CONFIG_USB_XHCI_HCD_DEBUGGING is not set -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_OXU210HP_HCD=m -CONFIG_USB_ISP116X_HCD=m -CONFIG_USB_ISP1760_HCD=m -CONFIG_USB_ISP1362_HCD=m -CONFIG_USB_OHCI_HCD=m -# CONFIG_USB_OHCI_HCD_SSB is not set -# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set -# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_UHCI_HCD=m -CONFIG_USB_U132_HCD=m -CONFIG_USB_SL811_HCD=m -CONFIG_USB_SL811_CS=m -CONFIG_USB_R8A66597_HCD=m -CONFIG_USB_WHCI_HCD=m -CONFIG_USB_HWA_HCD=m - -# -# Enable Host or Gadget support to see Inventra options -# - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_UAS=m -# CONFIG_USB_LIBUSUAL is not set - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m - -# -# USB port drivers -# -CONFIG_USB_USS720=m -CONFIG_USB_SERIAL=m -CONFIG_USB_EZUSB=y -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_FUNSOFT=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7715_PARPORT=y -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MOTOROLA=m -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_HP4X=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SAMBA=m -CONFIG_USB_SERIAL_SIEMENS_MPI=m -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_XIRCOM=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m -CONFIG_USB_SERIAL_ZIO=m -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -CONFIG_USB_RIO500=m -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -CONFIG_USB_LED=m -CONFIG_USB_CYPRESS_CY7C63=m -CONFIG_USB_CYTHERM=m -CONFIG_USB_IDMOUSE=m -CONFIG_USB_FTDI_ELAN=m -CONFIG_USB_APPLEDISPLAY=m -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y -CONFIG_USB_LD=m -CONFIG_USB_TRANCEVIBRATOR=m -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -CONFIG_USB_ISIGHTFW=m -CONFIG_USB_YUREX=m -CONFIG_USB_ATM=m -CONFIG_USB_SPEEDTOUCH=m -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m -# CONFIG_USB_GADGET is not set - -# -# OTG and related infrastructure -# -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_NOP_USB_XCEIV is not set -CONFIG_UWB=m -CONFIG_UWB_HWA=m -CONFIG_UWB_WHCI=m -CONFIG_UWB_I1480U=m -CONFIG_MMC=m -# CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_UNSAFE_RESUME is not set -# CONFIG_MMC_CLKGATE is not set - -# -# MMC/SD/SDIO Card Drivers -# -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_MMC_BLOCK_BOUNCE=y -CONFIG_SDIO_UART=m -CONFIG_MMC_TEST=m - -# -# MMC/SD/SDIO Host Controller Drivers -# -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_PLTFM=m -CONFIG_MMC_WBSD=m -CONFIG_MMC_TIFM_SD=m -CONFIG_MMC_SPI=m -CONFIG_MMC_SDRICOH_CS=m -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_USHC=m -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y - -# -# LED drivers -# -CONFIG_LEDS_NET5501=m -CONFIG_LEDS_ALIX2=m -CONFIG_LEDS_PCA9532=m -CONFIG_LEDS_GPIO=m -CONFIG_LEDS_GPIO_PLATFORM=y -CONFIG_LEDS_LP3944=m -CONFIG_LEDS_LP5521=m -CONFIG_LEDS_LP5523=m -CONFIG_LEDS_CLEVO_MAIL=m -CONFIG_LEDS_PCA955X=m -CONFIG_LEDS_DAC124S085=m -CONFIG_LEDS_REGULATOR=m -CONFIG_LEDS_BD2802=m -CONFIG_LEDS_INTEL_SS4200=m -CONFIG_LEDS_LT3593=m -CONFIG_LEDS_DELL_NETBOOKS=m -CONFIG_LEDS_MC13783=m -CONFIG_LEDS_TRIGGERS=y - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -CONFIG_LEDS_TRIGGER_GPIO=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -CONFIG_NFC_DEVICES=y -CONFIG_PN544_NFC=m -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_MTHCA_DEBUG=y -CONFIG_INFINIBAND_QIB=m -CONFIG_INFINIBAND_AMSO1100=m -# CONFIG_INFINIBAND_AMSO1100_DEBUG is not set -CONFIG_INFINIBAND_CXGB3=m -# CONFIG_INFINIBAND_CXGB3_DEBUG is not set -CONFIG_INFINIBAND_CXGB4=m -CONFIG_MLX4_INFINIBAND=m -CONFIG_INFINIBAND_NES=m -# CONFIG_INFINIBAND_NES_DEBUG is not set -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_ISER=m -CONFIG_EDAC=y - -# -# Reporting subsystems -# -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=m -# CONFIG_EDAC_MCE_INJ is not set -CONFIG_EDAC_MM_EDAC=m -CONFIG_EDAC_MCE=y -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I5000=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -CONFIG_RTC_INTF_DEV_UIE_EMUL=y -CONFIG_RTC_DRV_TEST=m - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_DS1307=m -CONFIG_RTC_DRV_DS1374=m -CONFIG_RTC_DRV_DS1672=m -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_MAX6900=m -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -CONFIG_RTC_DRV_S35390A=m -CONFIG_RTC_DRV_FM3130=m -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m - -# -# SPI RTC drivers -# -CONFIG_RTC_DRV_M41T94=m -CONFIG_RTC_DRV_DS1305=m -CONFIG_RTC_DRV_DS1390=m -CONFIG_RTC_DRV_MAX6902=m -CONFIG_RTC_DRV_R9701=m -CONFIG_RTC_DRV_RS5C348=m -CONFIG_RTC_DRV_DS3234=m -CONFIG_RTC_DRV_PCF2123=m - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_STK17TA8=m -CONFIG_RTC_DRV_M48T86=m -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_BQ4802=m -CONFIG_RTC_DRV_RP5C01=m -CONFIG_RTC_DRV_V3020=m - -# -# on-CPU RTC drivers -# -CONFIG_RTC_DRV_PCAP=m -CONFIG_RTC_DRV_MC13XXX=m -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_INTEL_MID_DMAC=m -CONFIG_INTEL_IOATDMA=m -CONFIG_TIMB_DMA=m -CONFIG_PCH_DMA=m -CONFIG_DMA_ENGINE=y - -# -# DMA Clients -# -CONFIG_NET_DMA=y -CONFIG_ASYNC_TX_DMA=y -CONFIG_DMATEST=m -CONFIG_AUXDISPLAY=y -CONFIG_KS0108=m -CONFIG_KS0108_PORT=0x378 -CONFIG_KS0108_DELAY=2 -CONFIG_CFAG12864B=m -CONFIG_CFAG12864B_RATE=20 -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_UIO_PDRV=m -CONFIG_UIO_PDRV_GENIRQ=m -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -CONFIG_UIO_NETX=m -CONFIG_XEN=y -CONFIG_XEN_INTERFACE_VERSION=0x00030207 - -# -# XEN -# -CONFIG_XEN_PRIVILEGED_GUEST=y -CONFIG_XEN_PRIVCMD=y -CONFIG_XEN_DOMCTL=y -CONFIG_XEN_XENBUS_DEV=y -CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL=m -CONFIG_XEN_BACKEND=m -CONFIG_XEN_BLKDEV_BACKEND=m -CONFIG_XEN_BLKDEV_TAP=m -CONFIG_XEN_BLKDEV_TAP2=m -CONFIG_XEN_BLKBACK_PAGEMAP=m -CONFIG_XEN_NETDEV_BACKEND=m -CONFIG_XEN_NETDEV_TX_SHIFT=10 -# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set -CONFIG_XEN_NETDEV_LOOPBACK=m -CONFIG_XEN_PCIDEV_BACKEND=m -CONFIG_XEN_PCIDEV_BACKEND_VPCI=y -# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set -# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set -# CONFIG_XEN_PCIDEV_BE_DEBUG is not set -CONFIG_XEN_TPMDEV_BACKEND=m -CONFIG_XEN_SCSI_BACKEND=m -CONFIG_XEN_USB_BACKEND=m -CONFIG_XEN_BLKDEV_FRONTEND=m -CONFIG_XEN_NETDEV_FRONTEND=m -CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND=m -CONFIG_XEN_SCSI_FRONTEND=m -CONFIG_XEN_USB_FRONTEND=m -# CONFIG_XEN_USB_FRONTEND_HCD_STATS is not set -# CONFIG_XEN_USB_FRONTEND_HCD_PM is not set -CONFIG_XEN_GRANT_DEV=m -CONFIG_XEN_FRAMEBUFFER=y -CONFIG_XEN_KEYBOARD=y -# CONFIG_XEN_DISABLE_SERIAL is not set -CONFIG_XEN_SYSFS=y -CONFIG_XEN_NR_GUEST_DEVICES=2048 -# CONFIG_XEN_COMPAT_030002_AND_LATER is not set -# CONFIG_XEN_COMPAT_030004_AND_LATER is not set -# CONFIG_XEN_COMPAT_030100_AND_LATER is not set -# CONFIG_XEN_COMPAT_030200_AND_LATER is not set -# CONFIG_XEN_COMPAT_030300_AND_LATER is not set -CONFIG_XEN_COMPAT_030400_AND_LATER=y -# CONFIG_XEN_COMPAT_040000_AND_LATER is not set -# CONFIG_XEN_COMPAT_040100_AND_LATER is not set -# CONFIG_XEN_COMPAT_LATEST_ONLY is not set -CONFIG_XEN_COMPAT=0x030400 -CONFIG_XEN_VCPU_INFO_PLACEMENT=y -CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y -CONFIG_ARCH_HAS_WALK_MEMORY=y -CONFIG_XEN_SMPBOOT=y -CONFIG_XEN_DEVMEM=y - -# -# Xen driver support -# -CONFIG_XEN_BALLOON=y -CONFIG_XEN_SCRUB_PAGES=y -CONFIG_XEN_DEV_EVTCHN=m -CONFIG_STAGING=y -# CONFIG_STAGING_EXCLUDE_BUILD is not set -CONFIG_ET131X=m -# CONFIG_ET131X_DEBUG is not set -CONFIG_SLICOSS=m -# CONFIG_VIDEO_GO7007 is not set -CONFIG_VIDEO_CX25821=m -CONFIG_VIDEO_CX25821_ALSA=m -CONFIG_VIDEO_TM6000=m -CONFIG_VIDEO_TM6000_ALSA=m -CONFIG_VIDEO_TM6000_DVB=m -CONFIG_USB_DABUSB=m -CONFIG_USB_SE401=m -CONFIG_VIDEO_USBVIDEO=m -CONFIG_USB_VICAM=m -CONFIG_USB_IP_COMMON=m -CONFIG_USB_IP_VHCI_HCD=m -CONFIG_USB_IP_HOST=m -# CONFIG_USB_IP_DEBUG_ENABLE is not set -CONFIG_W35UND=m -CONFIG_PRISM2_USB=m -CONFIG_ECHO=m -CONFIG_BRCM80211=m -CONFIG_BRCM80211_PCI=y -# CONFIG_BRCMFMAC is not set -CONFIG_RT2860=m -CONFIG_RT2870=m -# CONFIG_COMEDI is not set -CONFIG_ASUS_OLED=m -CONFIG_PANEL=m -CONFIG_PANEL_PARPORT=0 -CONFIG_PANEL_PROFILE=5 -# CONFIG_PANEL_CHANGE_MESSAGE is not set -CONFIG_R8187SE=m -CONFIG_RTL8192U=m -CONFIG_RTL8192E=m -CONFIG_R8712U=m -CONFIG_R8712_AP=y -CONFIG_TRANZPORT=m -CONFIG_POHMELFS=m -# CONFIG_POHMELFS_DEBUG is not set -CONFIG_POHMELFS_CRYPTO=y -CONFIG_AUTOFS_FS=m -CONFIG_IDE_PHISON=m -CONFIG_LINE6_USB=m -# CONFIG_LINE6_USB_DEBUG is not set -# CONFIG_LINE6_USB_DUMP_CTRL is not set -# CONFIG_LINE6_USB_DUMP_MIDI is not set -# CONFIG_LINE6_USB_DUMP_PCM is not set -# CONFIG_LINE6_USB_RAW is not set -# CONFIG_LINE6_USB_IMPULSE_RESPONSE is not set -CONFIG_DRM_NOUVEAU=m -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -# CONFIG_DRM_NOUVEAU_DEBUG is not set - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -CONFIG_DRM_I2C_SIL164=m -CONFIG_USB_SERIAL_QUATECH2=m -CONFIG_USB_SERIAL_QUATECH_USB2=m -CONFIG_VT6655=m -CONFIG_VT6656=m -CONFIG_VME_BUS=m - -# -# VME Bridge Drivers -# -CONFIG_VME_CA91CX42=m -CONFIG_VME_TSI148=m - -# -# VME Device Drivers -# -CONFIG_VME_USER=m - -# -# VME Board Drivers -# -CONFIG_VMIVME_7805=m -CONFIG_DX_SEP=m -CONFIG_IIO=m -CONFIG_IIO_RING_BUFFER=y -CONFIG_IIO_SW_RING=m -CONFIG_IIO_TRIGGER=y - -# -# Accelerometers -# -CONFIG_ADIS16201=m -CONFIG_ADIS16203=m -CONFIG_ADIS16204=m -CONFIG_ADIS16209=m -CONFIG_ADIS16220=m -CONFIG_ADIS16240=m -CONFIG_KXSD9=m -CONFIG_LIS3L02DQ=m -CONFIG_SCA3000=m - -# -# Analog to digital convertors -# -CONFIG_MAX1363=m -CONFIG_MAX1363_RING_BUFFER=y -CONFIG_AD7150=m -CONFIG_AD7152=m -CONFIG_AD7291=m -CONFIG_AD7298=m -CONFIG_AD7314=m -CONFIG_AD799X=m -CONFIG_AD799X_RING_BUFFER=y -CONFIG_AD7476=m -CONFIG_AD7887=m -CONFIG_AD7745=m -CONFIG_AD7816=m -CONFIG_ADT75=m -CONFIG_ADT7310=m -CONFIG_ADT7410=m - -# -# Analog digital bi-direction convertors -# -CONFIG_ADT7316=m -CONFIG_ADT7316_SPI=m -CONFIG_ADT7316_I2C=m - -# -# Digital to analog convertors -# -CONFIG_AD5624R_SPI=m -CONFIG_AD5446=m - -# -# Direct Digital Synthesis -# -CONFIG_AD5930=m -CONFIG_AD9832=m -CONFIG_AD9834=m -CONFIG_AD9850=m -CONFIG_AD9852=m -CONFIG_AD9910=m -CONFIG_AD9951=m - -# -# Digital gyroscope sensors -# -CONFIG_ADIS16060=m -CONFIG_ADIS16080=m -CONFIG_ADIS16130=m -CONFIG_ADIS16260=m -CONFIG_ADIS16251=m - -# -# Inertial measurement units -# -CONFIG_ADIS16300=m -CONFIG_ADIS16350=m -CONFIG_ADIS16400=m - -# -# Light sensors -# -CONFIG_SENSORS_TSL2563=m -CONFIG_SENSORS_ISL29018=m - -# -# Magnetometer sensors -# -CONFIG_SENSORS_AK8975=m -CONFIG_SENSORS_HMC5843=m - -# -# Active energy metering IC -# -CONFIG_ADE7753=m -CONFIG_ADE7754=m -CONFIG_ADE7758=m -CONFIG_ADE7759=m -CONFIG_ADE7854=m -CONFIG_ADE7854_I2C=m -CONFIG_ADE7854_SPI=m - -# -# Resolver to digital converters -# -CONFIG_AD2S90=m -CONFIG_AD2S120X=m -CONFIG_AD2S1210=m -# CONFIG_AD2S1210_GPIO_INPUT is not set -# CONFIG_AD2S1210_GPIO_OUTPUT is not set -CONFIG_AD2S1210_GPIO_NONE=y - -# -# Triggers - standalone -# -CONFIG_IIO_PERIODIC_RTC_TRIGGER=m -CONFIG_IIO_GPIO_TRIGGER=m -CONFIG_ZRAM=m -CONFIG_WLAGS49_H2=m -CONFIG_WLAGS49_H25=m -CONFIG_SAMSUNG_LAPTOP=m -CONFIG_FB_SM7XX=m -CONFIG_VIDEO_DT3155=m -CONFIG_DT3155_CCIR=y -CONFIG_CRYSTALHD=m -CONFIG_CXT1E1=m -# CONFIG_SBE_PMCC4_NCOMM is not set - -# -# Texas Instruments shared transport line discipline -# -CONFIG_ST_BT=m -CONFIG_FB_XGI=m -CONFIG_LIRC_STAGING=y -CONFIG_LIRC_BT829=m -CONFIG_LIRC_IGORPLUGUSB=m -CONFIG_LIRC_IMON=m -CONFIG_LIRC_IT87=m -CONFIG_LIRC_ITE8709=m -CONFIG_LIRC_PARALLEL=m -CONFIG_LIRC_SASEM=m -CONFIG_LIRC_SERIAL=m -CONFIG_LIRC_SERIAL_TRANSMITTER=y -CONFIG_LIRC_SIR=m -CONFIG_LIRC_TTUSBIR=m -CONFIG_LIRC_ZILOG=m -# CONFIG_SMB_FS is not set -CONFIG_EASYCAP=m -CONFIG_SOLO6X10=m -CONFIG_ACPI_QUICKSTART=m -CONFIG_MACH_NO_WESTBRIDGE=y -CONFIG_SBE_2T3E3=m -CONFIG_ATH6K_LEGACY=m -CONFIG_AR600x_SD31_XXX=y -# CONFIG_AR600x_WB31_XXX is not set -# CONFIG_AR600x_SD32_XXX is not set -# CONFIG_AR600x_CUSTOM_XXX is not set -CONFIG_ATH6KL_ENABLE_COEXISTENCE=y -CONFIG_AR600x_DUAL_ANTENNA=y -# CONFIG_AR600x_SINGLE_ANTENNA is not set -# CONFIG_AR600x_BT_QCOM is not set -# CONFIG_AR600x_BT_CSR is not set -CONFIG_AR600x_BT_AR3001=y -CONFIG_ATH6KL_HCI_BRIDGE=y -# CONFIG_ATH6KL_CONFIG_GPIO_BT_RESET is not set -CONFIG_ATH6KL_CFG80211=y -# CONFIG_ATH6KL_HTC_RAW_INTERFACE is not set -# CONFIG_ATH6KL_VIRTUAL_SCATTER_GATHER is not set -# CONFIG_ATH6KL_SKIP_ABI_VERSION_CHECK is not set -# CONFIG_ATH6KL_DEBUG is not set -CONFIG_USB_ENESTORAGE=m -CONFIG_BCM_WIMAX=m -CONFIG_FT1000=m -CONFIG_FT1000_USB=m - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -CONFIG_TOUCHSCREEN_CLEARPAD_TM1217=m -CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=m -CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_ACER_WMI=m -CONFIG_ACERHDF=m -CONFIG_ASUS_LAPTOP=m -CONFIG_DELL_LAPTOP=m -CONFIG_DELL_WMI=m -CONFIG_FUJITSU_LAPTOP=m -# CONFIG_FUJITSU_LAPTOP_DEBUG is not set -CONFIG_HP_WMI=m -CONFIG_MSI_LAPTOP=m -CONFIG_PANASONIC_LAPTOP=m -CONFIG_COMPAL_LAPTOP=m -CONFIG_SONY_LAPTOP=m -CONFIG_SONYPI_COMPAT=y -CONFIG_IDEAPAD_LAPTOP=m -CONFIG_THINKPAD_ACPI=m -CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y -# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set -# CONFIG_THINKPAD_ACPI_DEBUG is not set -# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set -CONFIG_THINKPAD_ACPI_VIDEO=y -CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y -CONFIG_SENSORS_HDAPS=m -CONFIG_INTEL_MENLOW=m -CONFIG_EEEPC_LAPTOP=m -CONFIG_EEEPC_WMI=m -CONFIG_ACPI_WMI=m -CONFIG_MSI_WMI=m -# CONFIG_ACPI_ASUS is not set -CONFIG_TOPSTAR_LAPTOP=m -CONFIG_ACPI_TOSHIBA=m -CONFIG_TOSHIBA_BT_RFKILL=m -CONFIG_ACPI_CMPC=m -CONFIG_INTEL_IPS=m -CONFIG_IBM_RTL=m - -# -# Firmware Drivers -# -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DELL_RBU=m -CONFIG_DCDBAS=m -CONFIG_DMIID=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m - -# -# File systems -# -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -# CONFIG_EXT2_FS_XIP is not set -CONFIG_EXT3_FS=y -CONFIG_EXT3_DEFAULTS_TO_ORDERED=y -CONFIG_EXT3_DEFAULTS_TO_BARRIERS_ENABLED=y -CONFIG_EXT3_FS_XATTR=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=m -CONFIG_EXT4_FS_XATTR=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -# CONFIG_EXT4_FS_RICHACL is not set -CONFIG_JBD=y -# CONFIG_JBD_DEBUG is not set -CONFIG_JBD2=m -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -CONFIG_REISERFS_FS=m -# CONFIG_REISERFS_CHECK is not set -# CONFIG_REISERFS_PROC_INFO is not set -CONFIG_REISERFS_DEFAULTS_TO_BARRIERS_ENABLED=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -# CONFIG_JFS_DEBUG is not set -CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -# CONFIG_XFS_DEBUG is not set -CONFIG_GFS2_FS=m -# CONFIG_GFS2_FS_LOCKING_DLM is not set -CONFIG_OCFS2_FS=m -CONFIG_OCFS2_FS_O2CB=m -CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m -CONFIG_OCFS2_FS_STATS=y -# CONFIG_OCFS2_DEBUG_MASKLOG is not set -# CONFIG_OCFS2_DEBUG_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -CONFIG_NILFS2_FS=m -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=m -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_PRINT_QUOTA_WARNING=y -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=m -CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m -CONFIG_QUOTACTL=y -CONFIG_QUOTACTL_COMPAT=y -CONFIG_AUTOFS4_FS=m -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_GENERIC_ACL=y - -# -# Caches -# -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -CONFIG_FSCACHE_OBJECT_LIST=y -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_UDF_NLS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -# CONFIG_HUGETLB_PAGE is not set -CONFIG_CONFIGFS_FS=m -CONFIG_MISC_FILESYSTEMS=y -CONFIG_ADFS_FS=m -CONFIG_ADFS_FS_RW=y -CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_BEFS_FS=m -# CONFIG_BEFS_DEBUG is not set -CONFIG_BFS_FS=m -CONFIG_EFS_FS=m -CONFIG_JFFS2_FS=m -CONFIG_JFFS2_FS_DEBUG=0 -CONFIG_JFFS2_FS_WRITEBUFFER=y -# CONFIG_JFFS2_FS_WBUF_VERIFY is not set -CONFIG_JFFS2_SUMMARY=y -CONFIG_JFFS2_FS_XATTR=y -CONFIG_JFFS2_FS_POSIX_ACL=y -CONFIG_JFFS2_FS_SECURITY=y -CONFIG_JFFS2_COMPRESSION_OPTIONS=y -CONFIG_JFFS2_ZLIB=y -# CONFIG_JFFS2_LZO is not set -CONFIG_JFFS2_RTIME=y -# CONFIG_JFFS2_RUBIN is not set -# CONFIG_JFFS2_CMODE_NONE is not set -CONFIG_JFFS2_CMODE_PRIORITY=y -# CONFIG_JFFS2_CMODE_SIZE is not set -# CONFIG_JFFS2_CMODE_FAVOURLZO is not set -CONFIG_UBIFS_FS=m -CONFIG_UBIFS_FS_XATTR=y -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_UBIFS_FS_LZO=y -CONFIG_UBIFS_FS_ZLIB=y -# CONFIG_UBIFS_FS_DEBUG is not set -CONFIG_LOGFS=m -CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -CONFIG_VXFS_FS=m -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m -CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_ROMFS_FS=m -# CONFIG_ROMFS_BACKED_BY_BLOCK is not set -# CONFIG_ROMFS_BACKED_BY_MTD is not set -CONFIG_ROMFS_BACKED_BY_BOTH=y -CONFIG_ROMFS_ON_BLOCK=y -CONFIG_ROMFS_ON_MTD=y -CONFIG_SYSV_FS=m -CONFIG_UFS_FS=m -CONFIG_UFS_FS_WRITE=y -# CONFIG_UFS_DEBUG is not set -CONFIG_EXOFS_FS=m -# CONFIG_EXOFS_DEBUG is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y -# CONFIG_NFS_V4_1 is not set -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -# CONFIG_NFS_USE_NEW_IDMAPPER is not set -CONFIG_NFSD=m -CONFIG_NFSD_DEPRECATED=y -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_SUNRPC_SWAP=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_CEPH_FS=m -CONFIG_CIFS=m -CONFIG_CIFS_STATS=y -CONFIG_CIFS_STATS2=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -# CONFIG_CIFS_DEBUG2 is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_FSCACHE is not set -CONFIG_CIFS_ACL=y -CONFIG_CIFS_EXPERIMENTAL=y -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_SMALLDOS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y -CONFIG_CODA_FS=m -CONFIG_AFS_FS=m -# CONFIG_AFS_DEBUG is not set -CONFIG_AFS_FSCACHE=y -CONFIG_9P_FS=m -# CONFIG_9P_FSCACHE is not set -CONFIG_9P_FS_POSIX_ACL=y -CONFIG_NOVFS=m - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -CONFIG_OSF_PARTITION=y -# CONFIG_AMIGA_PARTITION is not set -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -# CONFIG_MINIX_SUBPARTITION is not set -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_DLM_DEBUG=y - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_PRINTK_TIME=y -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_MAGIC_SYSRQ=y -CONFIG_STRIP_ASM_SYMS=y -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y -CONFIG_DEBUG_KERNEL=y -# CONFIG_DEBUG_SHIRQ is not set -# CONFIG_LOCKUP_DETECTOR is not set -# CONFIG_HARDLOCKUP_DETECTOR is not set -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=0 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -CONFIG_BKL=y -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_SPINLOCK_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_STACKTRACE=y -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_INFO is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VIRTUAL is not set -# CONFIG_DEBUG_WRITECOUNT is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set -CONFIG_ARCH_WANT_FRAME_POINTERS=y -# CONFIG_FRAME_POINTER is not set -CONFIG_UNWIND_INFO=y -CONFIG_STACK_UNWIND=y -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y -CONFIG_LKDTM=m -# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set -# CONFIG_FAULT_INJECTION is not set -CONFIG_LATENCYTOP=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y -CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_EVENT_POWER_TRACING_DEPRECATED=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_RING_BUFFER_ALLOW_SWAP=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENT=y -# CONFIG_FTRACE_STARTUP_TEST is not set -CONFIG_RING_BUFFER_BENCHMARK=m -CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -CONFIG_FIREWIRE_OHCI_REMOTE_DMA=y -CONFIG_BUILD_DOCSRC=y -CONFIG_DYNAMIC_DEBUG=y -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_ATOMIC64_SELFTEST is not set -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KMEMCHECK=y -# CONFIG_KMEMCHECK is not set -# CONFIG_STRICT_DEVMEM is not set -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -# CONFIG_DEBUG_STACKOVERFLOW is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -# CONFIG_X86_PTDUMP is not set -CONFIG_DEBUG_RODATA=y -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set -# CONFIG_DEBUG_NX_TEST is not set -# CONFIG_IOMMU_STRESS is not set -# CONFIG_X86_DECODER_SELFTEST is not set -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -# CONFIG_CPA_DEBUG is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_TRUSTED_KEYS=m -CONFIG_ENCRYPTED_KEYS=m -# CONFIG_KEYS_DEBUG_PROC_KEYS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -# CONFIG_SECURITY is not set -CONFIG_SECURITYFS=y -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEFAULT_SECURITY="" -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y -CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=m -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=m -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=m -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=m -CONFIG_CRYPTO_PCOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_SEQIV=m - -# -# Block modes -# -CONFIG_CRYPTO_CBC=m -CONFIG_CRYPTO_CTR=m -CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m -CONFIG_CRYPTO_FPU=m - -# -# Hash modes -# -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=m -CONFIG_CRYPTO_CRC32C_INTEL=m -CONFIG_CRYPTO_GHASH=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m - -# -# Ciphers -# -CONFIG_CRYPTO_AES=m -CONFIG_CRYPTO_AES_X86_64=m -CONFIG_CRYPTO_AES_NI_INTEL=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SALSA20_X86_64=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -# CONFIG_CRYPTO_TWOFISH is not set -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_TWOFISH_X86_64=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_ZLIB=m -CONFIG_CRYPTO_LZO=m - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -# CONFIG_CRYPTO_USER_API_HASH is not set -# CONFIG_CRYPTO_USER_API_SKCIPHER is not set -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -CONFIG_CRYPTO_DEV_HIFN_795X=m -CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_BITREVERSE=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_FIND_LAST_BIT=y -CONFIG_CRC_CCITT=m -CONFIG_CRC16=m -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=m -CONFIG_LZO_COMPRESS=m -CONFIG_LZO_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=m -CONFIG_REED_SOLOMON_DEC16=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_AVERAGE=y diff --git a/linux-2.6.38.3.tar.bz2.sha1sum b/linux-2.6.38.3.tar.bz2.sha1sum deleted file mode 100644 index 4d7c4d6..0000000 --- a/linux-2.6.38.3.tar.bz2.sha1sum +++ /dev/null @@ -1 +0,0 @@ -fa296255b40c9191589c0e35c2e11606a57751d3 linux-2.6.38.3.tar.bz2 diff --git a/linux-2.6.38.3.tar.bz2.sign b/linux-2.6.38.3.tar.bz2.sign deleted file mode 100644 index b204915..0000000 --- a/linux-2.6.38.3.tar.bz2.sign +++ /dev/null @@ -1,8 +0,0 @@ ------BEGIN PGP SIGNATURE----- -Version: GnuPG v1.4.11 (GNU/Linux) -Comment: See http://www.kernel.org/signature.html for info - -iD8DBQBNp1wdyGugalF9Dw4RAg5cAJ4hXA7sV09J6xM9UhJ8iPz19FJQRQCgkHwx -G/W+0jNvIVjd11rG2GPYddc= -=kOWG ------END PGP SIGNATURE----- diff --git a/patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace b/patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace deleted file mode 100644 index 4f33e83..0000000 --- a/patches.apparmor/apparmor-allow-dfa-backward-compatibility-with-broken-userspace +++ /dev/null @@ -1,64 +0,0 @@ -From: John Johansen -Date: Tue, 20 Jul 2010 06:57:08 -0700 -Subject: AppArmor: Allow dfa backward compatibility with broken userspace -Patch-mainline: 2.6.37? - -The apparmor_parser when compiling policy could generate invalid dfas -that did not have sufficient padding to avoid invalid references, when -used by the kernel. The kernels check to verify the next/check table -size was broken meaning invalid dfas were being created by userspace -and not caught. - -To remain compatible with old tools that are not fixed, pad the loaded -dfas next/check table. The dfa's themselves are valid except for the -high padding for potentially invalid transitions (high bounds error), -which have a maximimum is 256 entries. So just allocate an extra null filled -256 entries for the next/check tables. This will guarentee all bounds -are good and invalid transitions go to the null (0) state. - -Signed-off-by: John Johansen -Acked-by: Jeff Mahoney ---- - security/apparmor/match.c | 17 +++++++++++++++++ - 1 file changed, 17 insertions(+) - ---- a/security/apparmor/match.c -+++ b/security/apparmor/match.c -@@ -57,8 +57,17 @@ static struct table_header *unpack_table - if (bsize < tsize) - goto out; - -+ /* Pad table allocation for next/check by 256 entries to remain -+ * backwards compatible with old (buggy) tools and remain safe without -+ * run time checks -+ */ -+ if (th.td_id == YYTD_ID_NXT || th.td_id == YYTD_ID_CHK) -+ tsize += 256 * th.td_flags; -+ - table = kvmalloc(tsize); - if (table) { -+ /* ensure the pad is clear, else there will be errors */ -+ memset(table, 0, tsize); - *table = th; - if (th.td_flags == YYTD_DATA8) - UNPACK_ARRAY(table->td_data, blob, th.td_lolen, -@@ -134,11 +143,19 @@ static int verify_dfa(struct aa_dfa *dfa - goto out; - - if (flags & DFA_FLAG_VERIFY_STATES) { -+ int warning = 0; - for (i = 0; i < state_count; i++) { - if (DEFAULT_TABLE(dfa)[i] >= state_count) - goto out; - /* TODO: do check that DEF state recursion terminates */ - if (BASE_TABLE(dfa)[i] + 255 >= trans_count) { -+ if (warning) -+ continue; -+ printk(KERN_WARNING "AppArmor DFA next/check " -+ "upper bounds error fixed, upgrade " -+ "user space tools \n"); -+ warning = 1; -+ } else if (BASE_TABLE(dfa)[i] >= trans_count) { - printk(KERN_ERR "AppArmor DFA next/check upper " - "bounds error\n"); - goto out; diff --git a/patches.apparmor/apparmor-compatibility-patch-for-v5-interface b/patches.apparmor/apparmor-compatibility-patch-for-v5-interface deleted file mode 100644 index 94ddec3..0000000 --- a/patches.apparmor/apparmor-compatibility-patch-for-v5-interface +++ /dev/null @@ -1,379 +0,0 @@ -From: John Johansen -Date: Thu, 22 Jul 2010 02:32:02 -0700 -Subject: AppArmor: compatibility patch for v5 interface -Patch-mainline: 2.6.37? - -Signed-off-by: John Johansen -Acked-by: Jeff Mahoney ---- - security/apparmor/Kconfig | 9 + - security/apparmor/Makefile | 2 - security/apparmor/apparmorfs-24.c | 287 +++++++++++++++++++++++++++++++++ - security/apparmor/apparmorfs.c | 18 +- - security/apparmor/include/apparmorfs.h | 6 - 5 files changed, 320 insertions(+), 2 deletions(-) - create mode 100644 security/apparmor/apparmorfs-24.c - ---- a/security/apparmor/Kconfig -+++ b/security/apparmor/Kconfig -@@ -29,3 +29,12 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE - boot. - - If you are unsure how to answer this question, answer 1. -+ -+config SECURITY_APPARMOR_COMPAT_24 -+ bool "Enable AppArmor 2.4 compatability" -+ depends on SECURITY_APPARMOR -+ default y -+ help -+ This option enables compatability with AppArmor 2.4. It is -+ recommended if compatability with older versions of AppArmor -+ is desired. ---- a/security/apparmor/Makefile -+++ b/security/apparmor/Makefile -@@ -6,6 +6,8 @@ apparmor-y := apparmorfs.o audit.o capab - path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ - resource.o sid.o file.o net.o - -+apparmor-$(CONFIG_SECURITY_APPARMOR_COMPAT_24) += apparmorfs-24.o -+ - clean-files: capability_names.h af_names.h - - quiet_cmd_make-caps = GEN $@ ---- /dev/null -+++ b/security/apparmor/apparmorfs-24.c -@@ -0,0 +1,287 @@ -+/* -+ * AppArmor security module -+ * -+ * This file contains AppArmor /sys/kernel/secrutiy/apparmor interface functions -+ * -+ * Copyright (C) 1998-2008 Novell/SUSE -+ * Copyright 2009-2010 Canonical Ltd. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation, version 2 of the -+ * License. -+ * -+ * -+ * This file contain functions providing an interface for <= AppArmor 2.4 -+ * compatibility. It is dependent on CONFIG_SECURITY_APPARMOR_COMPAT_24 -+ * being set (see Makefile). -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "include/apparmor.h" -+#include "include/audit.h" -+#include "include/context.h" -+#include "include/policy.h" -+ -+ -+/* apparmor/matching */ -+static ssize_t aa_matching_read(struct file *file, char __user *buf, -+ size_t size, loff_t *ppos) -+{ -+ const char matching[] = "pattern=aadfa audit perms=crwxamlk/ " -+ "user::other"; -+ -+ return simple_read_from_buffer(buf, size, ppos, matching, -+ sizeof(matching) - 1); -+} -+ -+const struct file_operations aa_fs_matching_fops = { -+ .read = aa_matching_read, -+}; -+ -+/* apparmor/features */ -+static ssize_t aa_features_read(struct file *file, char __user *buf, -+ size_t size, loff_t *ppos) -+{ -+ const char features[] = "file=3.1 capability=2.0 network=1.0 " -+ "change_hat=1.5 change_profile=1.1 " "aanamespaces=1.1 rlimit=1.1"; -+ -+ return simple_read_from_buffer(buf, size, ppos, features, -+ sizeof(features) - 1); -+} -+ -+const struct file_operations aa_fs_features_fops = { -+ .read = aa_features_read, -+}; -+ -+/** -+ * __next_namespace - find the next namespace to list -+ * @root: root namespace to stop search at (NOT NULL) -+ * @ns: current ns position (NOT NULL) -+ * -+ * Find the next namespace from @ns under @root and handle all locking needed -+ * while switching current namespace. -+ * -+ * Returns: next namespace or NULL if at last namespace under @root -+ * NOTE: will not unlock root->lock -+ */ -+static struct aa_namespace *__next_namespace(struct aa_namespace *root, -+ struct aa_namespace *ns) -+{ -+ struct aa_namespace *parent; -+ -+ /* is next namespace a child */ -+ if (!list_empty(&ns->sub_ns)) { -+ struct aa_namespace *next; -+ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list); -+ read_lock(&next->lock); -+ return next; -+ } -+ -+ /* check if the next ns is a sibling, parent, gp, .. */ -+ parent = ns->parent; -+ while (parent) { -+ read_unlock(&ns->lock); -+ list_for_each_entry_continue(ns, &parent->sub_ns, base.list) { -+ read_lock(&ns->lock); -+ return ns; -+ } -+ if (parent == root) -+ return NULL; -+ ns = parent; -+ parent = parent->parent; -+ } -+ -+ return NULL; -+} -+ -+/** -+ * __first_profile - find the first profile in a namespace -+ * @root: namespace that is root of profiles being displayed (NOT NULL) -+ * @ns: namespace to start in (NOT NULL) -+ * -+ * Returns: unrefcounted profile or NULL if no profile -+ */ -+static struct aa_profile *__first_profile(struct aa_namespace *root, -+ struct aa_namespace *ns) -+{ -+ for ( ; ns; ns = __next_namespace(root, ns)) { -+ if (!list_empty(&ns->base.profiles)) -+ return list_first_entry(&ns->base.profiles, -+ struct aa_profile, base.list); -+ } -+ return NULL; -+} -+ -+/** -+ * __next_profile - step to the next profile in a profile tree -+ * @profile: current profile in tree (NOT NULL) -+ * -+ * Perform a depth first taversal on the profile tree in a namespace -+ * -+ * Returns: next profile or NULL if done -+ * Requires: profile->ns.lock to be held -+ */ -+static struct aa_profile *__next_profile(struct aa_profile *p) -+{ -+ struct aa_profile *parent; -+ struct aa_namespace *ns = p->ns; -+ -+ /* is next profile a child */ -+ if (!list_empty(&p->base.profiles)) -+ return list_first_entry(&p->base.profiles, typeof(*p), -+ base.list); -+ -+ /* is next profile a sibling, parent sibling, gp, subling, .. */ -+ parent = p->parent; -+ while (parent) { -+ list_for_each_entry_continue(p, &parent->base.profiles, -+ base.list) -+ return p; -+ p = parent; -+ parent = parent->parent; -+ } -+ -+ /* is next another profile in the namespace */ -+ list_for_each_entry_continue(p, &ns->base.profiles, base.list) -+ return p; -+ -+ return NULL; -+} -+ -+/** -+ * next_profile - step to the next profile in where ever it may be -+ * @root: root namespace (NOT NULL) -+ * @profile: current profile (NOT NULL) -+ * -+ * Returns: next profile or NULL if there isn't one -+ */ -+static struct aa_profile *next_profile(struct aa_namespace *root, -+ struct aa_profile *profile) -+{ -+ struct aa_profile *next = __next_profile(profile); -+ if (next) -+ return next; -+ -+ /* finished all profiles in namespace move to next namespace */ -+ return __first_profile(root, __next_namespace(root, profile->ns)); -+} -+ -+/** -+ * p_start - start a depth first traversal of profile tree -+ * @f: seq_file to fill -+ * @pos: current position -+ * -+ * Returns: first profile under current namespace or NULL if none found -+ * -+ * acquires first ns->lock -+ */ -+static void *p_start(struct seq_file *f, loff_t *pos) -+ __acquires(root->lock) -+{ -+ struct aa_profile *profile = NULL; -+ struct aa_namespace *root = aa_current_profile()->ns; -+ loff_t l = *pos; -+ f->private = aa_get_namespace(root); -+ -+ -+ /* find the first profile */ -+ read_lock(&root->lock); -+ profile = __first_profile(root, root); -+ -+ /* skip to position */ -+ for (; profile && l > 0; l--) -+ profile = next_profile(root, profile); -+ -+ return profile; -+} -+ -+/** -+ * p_next - read the next profile entry -+ * @f: seq_file to fill -+ * @p: profile previously returned -+ * @pos: current position -+ * -+ * Returns: next profile after @p or NULL if none -+ * -+ * may acquire/release locks in namespace tree as necessary -+ */ -+static void *p_next(struct seq_file *f, void *p, loff_t *pos) -+{ -+ struct aa_profile *profile = p; -+ struct aa_namespace *root = f->private; -+ (*pos)++; -+ -+ return next_profile(root, profile); -+} -+ -+/** -+ * p_stop - stop depth first traversal -+ * @f: seq_file we are filling -+ * @p: the last profile writen -+ * -+ * Release all locking done by p_start/p_next on namespace tree -+ */ -+static void p_stop(struct seq_file *f, void *p) -+ __releases(root->lock) -+{ -+ struct aa_profile *profile = p; -+ struct aa_namespace *root = f->private, *ns; -+ -+ if (profile) { -+ for (ns = profile->ns; ns && ns != root; ns = ns->parent) -+ read_unlock(&ns->lock); -+ } -+ read_unlock(&root->lock); -+ aa_put_namespace(root); -+} -+ -+/** -+ * seq_show_profile - show a profile entry -+ * @f: seq_file to file -+ * @p: current position (profile) (NOT NULL) -+ * -+ * Returns: error on failure -+ */ -+static int seq_show_profile(struct seq_file *f, void *p) -+{ -+ struct aa_profile *profile = (struct aa_profile *)p; -+ struct aa_namespace *root = f->private; -+ -+ if (profile->ns != root) -+ seq_printf(f, ":%s://", aa_ns_name(root, profile->ns)); -+ seq_printf(f, "%s (%s)\n", profile->base.hname, -+ COMPLAIN_MODE(profile) ? "complain" : "enforce"); -+ -+ return 0; -+} -+ -+static const struct seq_operations aa_fs_profiles_op = { -+ .start = p_start, -+ .next = p_next, -+ .stop = p_stop, -+ .show = seq_show_profile, -+}; -+ -+static int profiles_open(struct inode *inode, struct file *file) -+{ -+ return seq_open(file, &aa_fs_profiles_op); -+} -+ -+static int profiles_release(struct inode *inode, struct file *file) -+{ -+ return seq_release(inode, file); -+} -+ -+const struct file_operations aa_fs_profiles_fops = { -+ .open = profiles_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = profiles_release, -+}; ---- a/security/apparmor/apparmorfs.c -+++ b/security/apparmor/apparmorfs.c -@@ -182,7 +182,11 @@ void __init aa_destroy_aafs(void) - aafs_remove(".remove"); - aafs_remove(".replace"); - aafs_remove(".load"); -- -+#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24 -+ aafs_remove("profiles"); -+ aafs_remove("matching"); -+ aafs_remove("features"); -+#endif - securityfs_remove(aa_fs_dentry); - aa_fs_dentry = NULL; - } -@@ -213,7 +217,17 @@ int __init aa_create_aafs(void) - aa_fs_dentry = NULL; - goto error; - } -- -+#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24 -+ error = aafs_create("matching", 0444, &aa_fs_matching_fops); -+ if (error) -+ goto error; -+ error = aafs_create("features", 0444, &aa_fs_features_fops); -+ if (error) -+ goto error; -+#endif -+ error = aafs_create("profiles", 0440, &aa_fs_profiles_fops); -+ if (error) -+ goto error; - error = aafs_create(".load", 0640, &aa_fs_profile_load); - if (error) - goto error; ---- a/security/apparmor/include/apparmorfs.h -+++ b/security/apparmor/include/apparmorfs.h -@@ -17,4 +17,10 @@ - - extern void __init aa_destroy_aafs(void); - -+#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24 -+extern const struct file_operations aa_fs_matching_fops; -+extern const struct file_operations aa_fs_features_fops; -+extern const struct file_operations aa_fs_profiles_fops; -+#endif -+ - #endif /* __AA_APPARMORFS_H */ diff --git a/patches.apparmor/apparmor-compatibility-patch-for-v5-network-control b/patches.apparmor/apparmor-compatibility-patch-for-v5-network-control deleted file mode 100644 index 354a8bc..0000000 --- a/patches.apparmor/apparmor-compatibility-patch-for-v5-network-control +++ /dev/null @@ -1,518 +0,0 @@ -From: John Johansen -Date: Mon, 4 Oct 2010 15:03:36 -0700 -Subject: AppArmor: compatibility patch for v5 network control -Patch-mainline: 2.6.37? - -Add compatibility for v5 network rules. - -Signed-off-by: John Johansen -Acked-by: Jeff Mahoney ---- - include/linux/lsm_audit.h | 4 - security/apparmor/Makefile | 6 + - security/apparmor/include/net.h | 40 ++++++++ - security/apparmor/include/policy.h | 3 - security/apparmor/lsm.c | 112 ++++++++++++++++++++++ - security/apparmor/net.c | 170 ++++++++++++++++++++++++++++++++++ - security/apparmor/policy.c | 1 - security/apparmor/policy_unpack.c | 48 +++++++++ - 8 files changed, 381 insertions(+), 2 deletions(-) - create mode 100644 security/apparmor/include/net.h - create mode 100644 security/apparmor/net.c - ---- a/include/linux/lsm_audit.h -+++ b/include/linux/lsm_audit.h -@@ -123,6 +123,10 @@ struct common_audit_data { - u32 denied; - uid_t ouid; - } fs; -+ struct { -+ int type, protocol; -+ struct sock *sk; -+ } net; - }; - } apparmor_audit_data; - #endif ---- a/security/apparmor/Makefile -+++ b/security/apparmor/Makefile -@@ -4,17 +4,21 @@ obj-$(CONFIG_SECURITY_APPARMOR) += appar - - apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ - path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ -- resource.o sid.o file.o -+ resource.o sid.o file.o net.o - - clean-files: capability_names.h af_names.h - - quiet_cmd_make-caps = GEN $@ - cmd_make-caps = echo "static const char *capability_names[] = {" > $@ ; sed -n -e "/CAP_FS_MASK/d" -e "s/^\#define[ \\t]\\+CAP_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ - -+quiet_cmd_make-af = GEN $@ -+cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ; sed -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "s/^\#define[ \\t]\\+AF_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ -+ - quiet_cmd_make-rlim = GEN $@ - cmd_make-rlim = echo "static const char *rlim_names[] = {" > $@ ; sed -n --e "/AF_MAX/d" -e "s/^\# \\?define[ \\t]\\+RLIMIT_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ ; echo "static const int rlim_map[] = {" >> $@ ; sed -n -e "/AF_MAX/d" -e "s/^\# \\?define[ \\t]\\+\\(RLIMIT_[A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/\\1,/p" $< >> $@ ; echo "};" >> $@ - - $(obj)/capability.o : $(obj)/capability_names.h -+$(obj)/net.o : $(obj)/af_names.h - $(obj)/resource.o : $(obj)/rlim_names.h - $(obj)/capability_names.h : $(srctree)/include/linux/capability.h - $(call cmd,make-caps) ---- /dev/null -+++ b/security/apparmor/include/net.h -@@ -0,0 +1,40 @@ -+/* -+ * AppArmor security module -+ * -+ * This file contains AppArmor network mediation definitions. -+ * -+ * Copyright (C) 1998-2008 Novell/SUSE -+ * Copyright 2009-2010 Canonical Ltd. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation, version 2 of the -+ * License. -+ */ -+ -+#ifndef __AA_NET_H -+#define __AA_NET_H -+ -+#include -+ -+/* struct aa_net - network confinement data -+ * @allowed: basic network families permissions -+ * @audit_network: which network permissions to force audit -+ * @quiet_network: which network permissions to quiet rejects -+ */ -+struct aa_net { -+ u16 allow[AF_MAX]; -+ u16 audit[AF_MAX]; -+ u16 quiet[AF_MAX]; -+}; -+ -+extern int aa_net_perm(int op, struct aa_profile *profile, u16 family, -+ int type, int protocol, struct sock *sk); -+extern int aa_revalidate_sk(int op, struct sock *sk); -+ -+static inline void aa_free_net_rules(struct aa_net *new) -+{ -+ /* NOP */ -+} -+ -+#endif /* __AA_NET_H */ ---- a/security/apparmor/include/policy.h -+++ b/security/apparmor/include/policy.h -@@ -27,6 +27,7 @@ - #include "capability.h" - #include "domain.h" - #include "file.h" -+#include "net.h" - #include "resource.h" - - extern const char *profile_mode_names[]; -@@ -145,6 +146,7 @@ struct aa_namespace { - * @size: the memory consumed by this profiles rules - * @file: The set of rules governing basic file access and domain transitions - * @caps: capabilities for the profile -+ * @net: network controls for the profile - * @rlimits: rlimits for the profile - * - * The AppArmor profile contains the basic confinement data. Each profile -@@ -181,6 +183,7 @@ struct aa_profile { - - struct aa_file_rules file; - struct aa_caps caps; -+ struct aa_net net; - struct aa_rlimit rlimits; - }; - ---- a/security/apparmor/lsm.c -+++ b/security/apparmor/lsm.c -@@ -31,6 +31,7 @@ - #include "include/context.h" - #include "include/file.h" - #include "include/ipc.h" -+#include "include/net.h" - #include "include/path.h" - #include "include/policy.h" - #include "include/procattr.h" -@@ -619,6 +620,104 @@ static int apparmor_task_setrlimit(struc - return error; - } - -+static int apparmor_socket_create(int family, int type, int protocol, int kern) -+{ -+ struct aa_profile *profile; -+ int error = 0; -+ -+ if (kern) -+ return 0; -+ -+ profile = __aa_current_profile(); -+ if (!unconfined(profile)) -+ error = aa_net_perm(OP_CREATE, profile, family, type, protocol, -+ NULL); -+ return error; -+} -+ -+static int apparmor_socket_bind(struct socket *sock, -+ struct sockaddr *address, int addrlen) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_BIND, sk); -+} -+ -+static int apparmor_socket_connect(struct socket *sock, -+ struct sockaddr *address, int addrlen) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_CONNECT, sk); -+} -+ -+static int apparmor_socket_listen(struct socket *sock, int backlog) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_LISTEN, sk); -+} -+ -+static int apparmor_socket_accept(struct socket *sock, struct socket *newsock) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_ACCEPT, sk); -+} -+ -+static int apparmor_socket_sendmsg(struct socket *sock, -+ struct msghdr *msg, int size) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_SENDMSG, sk); -+} -+ -+static int apparmor_socket_recvmsg(struct socket *sock, -+ struct msghdr *msg, int size, int flags) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_RECVMSG, sk); -+} -+ -+static int apparmor_socket_getsockname(struct socket *sock) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_GETSOCKNAME, sk); -+} -+ -+static int apparmor_socket_getpeername(struct socket *sock) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_GETPEERNAME, sk); -+} -+ -+static int apparmor_socket_getsockopt(struct socket *sock, int level, -+ int optname) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_GETSOCKOPT, sk); -+} -+ -+static int apparmor_socket_setsockopt(struct socket *sock, int level, -+ int optname) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_SETSOCKOPT, sk); -+} -+ -+static int apparmor_socket_shutdown(struct socket *sock, int how) -+{ -+ struct sock *sk = sock->sk; -+ -+ return aa_revalidate_sk(OP_SOCK_SHUTDOWN, sk); -+} -+ - static struct security_operations apparmor_ops = { - .name = "apparmor", - -@@ -650,6 +749,19 @@ static struct security_operations apparm - .getprocattr = apparmor_getprocattr, - .setprocattr = apparmor_setprocattr, - -+ .socket_create = apparmor_socket_create, -+ .socket_bind = apparmor_socket_bind, -+ .socket_connect = apparmor_socket_connect, -+ .socket_listen = apparmor_socket_listen, -+ .socket_accept = apparmor_socket_accept, -+ .socket_sendmsg = apparmor_socket_sendmsg, -+ .socket_recvmsg = apparmor_socket_recvmsg, -+ .socket_getsockname = apparmor_socket_getsockname, -+ .socket_getpeername = apparmor_socket_getpeername, -+ .socket_getsockopt = apparmor_socket_getsockopt, -+ .socket_setsockopt = apparmor_socket_setsockopt, -+ .socket_shutdown = apparmor_socket_shutdown, -+ - .cred_alloc_blank = apparmor_cred_alloc_blank, - .cred_free = apparmor_cred_free, - .cred_prepare = apparmor_cred_prepare, ---- /dev/null -+++ b/security/apparmor/net.c -@@ -0,0 +1,170 @@ -+/* -+ * AppArmor security module -+ * -+ * This file contains AppArmor network mediation -+ * -+ * Copyright (C) 1998-2008 Novell/SUSE -+ * Copyright 2009-2010 Canonical Ltd. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation, version 2 of the -+ * License. -+ */ -+ -+#include "include/apparmor.h" -+#include "include/audit.h" -+#include "include/context.h" -+#include "include/net.h" -+#include "include/policy.h" -+ -+#include "af_names.h" -+ -+static const char *sock_type_names[] = { -+ "unknown(0)", -+ "stream", -+ "dgram", -+ "raw", -+ "rdm", -+ "seqpacket", -+ "dccp", -+ "unknown(7)", -+ "unknown(8)", -+ "unknown(9)", -+ "packet", -+}; -+ -+/* audit callback for net specific fields */ -+static void audit_cb(struct audit_buffer *ab, void *va) -+{ -+ struct common_audit_data *sa = va; -+ -+ audit_log_format(ab, " family="); -+ if (address_family_names[sa->u.net.family]) { -+ audit_log_string(ab, address_family_names[sa->u.net.family]); -+ } else { -+ audit_log_format(ab, " \"unknown(%d)\"", sa->u.net.family); -+ } -+ -+ audit_log_format(ab, " sock_type="); -+ if (sock_type_names[sa->aad.net.type]) { -+ audit_log_string(ab, sock_type_names[sa->aad.net.type]); -+ } else { -+ audit_log_format(ab, "\"unknown(%d)\"", sa->aad.net.type); -+ } -+ -+ audit_log_format(ab, " protocol=%d", sa->aad.net.protocol); -+} -+ -+/** -+ * audit_net - audit network access -+ * @profile: profile being enforced (NOT NULL) -+ * @op: operation being checked -+ * @family: network family -+ * @type: network type -+ * @protocol: network protocol -+ * @sk: socket auditing is being applied to -+ * @error: error code for failure else 0 -+ * -+ * Returns: %0 or sa->error else other errorcode on failure -+ */ -+static int audit_net(struct aa_profile *profile, int op, u16 family, int type, -+ int protocol, struct sock *sk, int error) -+{ -+ int audit_type = AUDIT_APPARMOR_AUTO; -+ struct common_audit_data sa; -+ if (sk) { -+ COMMON_AUDIT_DATA_INIT(&sa, NET); -+ } else { -+ COMMON_AUDIT_DATA_INIT(&sa, NONE); -+ } -+ /* todo fill in socket addr info */ -+ -+ sa.aad.op = op, -+ sa.u.net.family = family; -+ sa.u.net.sk = sk; -+ sa.aad.net.type = type; -+ sa.aad.net.protocol = protocol; -+ sa.aad.error = error; -+ -+ if (likely(!sa.aad.error)) { -+ u16 audit_mask = profile->net.audit[sa.u.net.family]; -+ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) && -+ !(1 << sa.aad.net.type & audit_mask))) -+ return 0; -+ audit_type = AUDIT_APPARMOR_AUDIT; -+ } else { -+ u16 quiet_mask = profile->net.quiet[sa.u.net.family]; -+ u16 kill_mask = 0; -+ u16 denied = (1 << sa.aad.net.type) & ~quiet_mask; -+ -+ if (denied & kill_mask) -+ audit_type = AUDIT_APPARMOR_KILL; -+ -+ if ((denied & quiet_mask) && -+ AUDIT_MODE(profile) != AUDIT_NOQUIET && -+ AUDIT_MODE(profile) != AUDIT_ALL) -+ return COMPLAIN_MODE(profile) ? 0 : sa.aad.error; -+ } -+ -+ return aa_audit(audit_type, profile, GFP_KERNEL, &sa, audit_cb); -+} -+ -+/** -+ * aa_net_perm - very course network access check -+ * @op: operation being checked -+ * @profile: profile being enforced (NOT NULL) -+ * @family: network family -+ * @type: network type -+ * @protocol: network protocol -+ * -+ * Returns: %0 else error if permission denied -+ */ -+int aa_net_perm(int op, struct aa_profile *profile, u16 family, int type, -+ int protocol, struct sock *sk) -+{ -+ u16 family_mask; -+ int error; -+ -+ if ((family < 0) || (family >= AF_MAX)) -+ return -EINVAL; -+ -+ if ((type < 0) || (type >= SOCK_MAX)) -+ return -EINVAL; -+ -+ /* unix domain and netlink sockets are handled by ipc */ -+ if (family == AF_UNIX || family == AF_NETLINK) -+ return 0; -+ -+ family_mask = profile->net.allow[family]; -+ -+ error = (family_mask & (1 << type)) ? 0 : -EACCES; -+ -+ return audit_net(profile, op, family, type, protocol, sk, error); -+} -+ -+/** -+ * aa_revalidate_sk - Revalidate access to a sock -+ * @op: operation being checked -+ * @sk: sock being revalidated (NOT NULL) -+ * -+ * Returns: %0 else error if permission denied -+ */ -+int aa_revalidate_sk(int op, struct sock *sk) -+{ -+ struct aa_profile *profile; -+ int error = 0; -+ -+ /* aa_revalidate_sk should not be called from interrupt context -+ * don't mediate these calls as they are not task related -+ */ -+ if (in_interrupt()) -+ return 0; -+ -+ profile = __aa_current_profile(); -+ if (!unconfined(profile)) -+ error = aa_net_perm(op, profile, sk->sk_family, sk->sk_type, -+ sk->sk_protocol, sk); -+ -+ return error; -+} ---- a/security/apparmor/policy.c -+++ b/security/apparmor/policy.c -@@ -745,6 +745,7 @@ static void free_profile(struct aa_profi - - aa_free_file_rules(&profile->file); - aa_free_cap_rules(&profile->caps); -+ aa_free_net_rules(&profile->net); - aa_free_rlimit_rules(&profile->rlimits); - - aa_free_sid(profile->sid); ---- a/security/apparmor/policy_unpack.c -+++ b/security/apparmor/policy_unpack.c -@@ -190,6 +190,19 @@ fail: - return 0; - } - -+static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name) -+{ -+ if (unpack_nameX(e, AA_U16, name)) { -+ if (!inbounds(e, sizeof(u16))) -+ return 0; -+ if (data) -+ *data = le16_to_cpu(get_unaligned((u16 *) e->pos)); -+ e->pos += sizeof(u16); -+ return 1; -+ } -+ return 0; -+} -+ - static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) - { - if (unpack_nameX(e, AA_U32, name)) { -@@ -468,7 +481,8 @@ static struct aa_profile *unpack_profile - { - struct aa_profile *profile = NULL; - const char *name = NULL; -- int error = -EPROTO; -+ size_t size = 0; -+ int i, error = -EPROTO; - kernel_cap_t tmpcap; - u32 tmp; - -@@ -559,6 +573,38 @@ static struct aa_profile *unpack_profile - if (!unpack_rlimits(e, profile)) - goto fail; - -+ size = unpack_array(e, "net_allowed_af"); -+ if (size) { -+ -+ for (i = 0; i < size; i++) { -+ /* discard extraneous rules that this kernel will -+ * never request -+ */ -+ if (i > AF_MAX) { -+ u16 tmp; -+ if (!unpack_u16(e, &tmp, NULL) || -+ !unpack_u16(e, &tmp, NULL) || -+ !unpack_u16(e, &tmp, NULL)) -+ goto fail; -+ continue; -+ } -+ if (!unpack_u16(e, &profile->net.allow[i], NULL)) -+ goto fail; -+ if (!unpack_u16(e, &profile->net.audit[i], NULL)) -+ goto fail; -+ if (!unpack_u16(e, &profile->net.quiet[i], NULL)) -+ goto fail; -+ } -+ if (!unpack_nameX(e, AA_ARRAYEND, NULL)) -+ goto fail; -+ /* -+ * allow unix domain and netlink sockets they are handled -+ * by IPC -+ */ -+ } -+ profile->net.allow[AF_UNIX] = 0xffff; -+ profile->net.allow[AF_NETLINK] = 0xffff; -+ - /* get file rules */ - profile->file.dfa = unpack_dfa(e); - if (IS_ERR(profile->file.dfa)) { diff --git a/patches.arch/0001-x86-ioapic-Fix-potential-resume-deadlock.patch b/patches.arch/0001-x86-ioapic-Fix-potential-resume-deadlock.patch deleted file mode 100644 index 5ed5133..0000000 --- a/patches.arch/0001-x86-ioapic-Fix-potential-resume-deadlock.patch +++ /dev/null @@ -1,50 +0,0 @@ -From b134eac05adf33188616bf53ea38dc6c7ee487e8 Mon Sep 17 00:00:00 2001 -From: Daniel J Blueman -Date: Wed, 18 May 2011 16:31:31 -0700 -Subject: [PATCH] x86, ioapic: Fix potential resume deadlock - -commit b64ce24daffb634b5b3133a2e411bd4de50654e8 upstream. - -Fix a potential deadlock when resuming; here the calling -function has disabled interrupts, so we cannot sleep. - -Change the memory allocation flag from GFP_KERNEL to GFP_ATOMIC. - -TODO: We can do away with this memory allocation during resume - by reusing the ioapic suspend/resume code that uses boot time - allocated buffers, but we want to keep this -stable patch - simple. - -Signed-off-by: Daniel J Blueman -Signed-off-by: Suresh Siddha -Link: http://lkml.kernel.org/r/20110518233157.385970138@sbsiddha-MOBL3.sc.intel.com -Signed-off-by: Ingo Molnar -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/kernel/apic/io_apic.c | 4 ++-- - 1 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index ca9e2a35..e437778 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -615,14 +615,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) - struct IO_APIC_route_entry **ioapic_entries; - - ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, -- GFP_KERNEL); -+ GFP_ATOMIC); - if (!ioapic_entries) - return 0; - - for (apic = 0; apic < nr_ioapics; apic++) { - ioapic_entries[apic] = - kzalloc(sizeof(struct IO_APIC_route_entry) * -- nr_ioapic_registers[apic], GFP_KERNEL); -+ nr_ioapic_registers[apic], GFP_ATOMIC); - if (!ioapic_entries[apic]) - goto nomem; - } --- -1.7.6.5 - diff --git a/patches.arch/acpi-export-hotplug_execute b/patches.arch/acpi-export-hotplug_execute deleted file mode 100644 index bc27ba3..0000000 --- a/patches.arch/acpi-export-hotplug_execute +++ /dev/null @@ -1,24 +0,0 @@ -From: Jeff Mahoney -Subject: acpi: export acpi_os_hotplug_execute -Patch-mainline: not yet - - The ACPI dock driver changes require acpi_os_hotplug_execute, - which wasn't exported. - - This patch exports it. - -Signed-off-by: Jeff Mahoney ---- - drivers/acpi/osl.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/acpi/osl.c -+++ b/drivers/acpi/osl.c -@@ -941,6 +941,7 @@ acpi_status acpi_os_hotplug_execute(acpi - { - return __acpi_os_execute(0, function, context, 1); - } -+EXPORT_SYMBOL(acpi_os_hotplug_execute); - - void acpi_os_wait_events_complete(void *context) - { diff --git a/patches.arch/acpi_ec_provide_non_interrupt_mode_boot_param.patch b/patches.arch/acpi_ec_provide_non_interrupt_mode_boot_param.patch deleted file mode 100644 index 4888f5b..0000000 --- a/patches.arch/acpi_ec_provide_non_interrupt_mode_boot_param.patch +++ /dev/null @@ -1,67 +0,0 @@ -From: Alexey Starikovskiy -Subject: ACPI: EC: Don't degrade to poll mode at storm automatically. -References: bnc#446142 -Patch-Mainline: no - -Signed-off-by: Thomas Renninger - -Not all users of semi-broken EC devices want to degrade to poll mode, so -give them right to choose. - -Signed-off-by: Alexey Starikovskiy ---- - - Documentation/kernel-parameters.txt | 5 +++++ - drivers/acpi/ec.c | 15 +++++++++++++++ - 2 files changed, 20 insertions(+) - - ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -691,6 +691,11 @@ and is between 256 and 4096 characters. - - eata= [HW,SCSI] - -+ ec_intr= [HW,ACPI] ACPI Embedded Controller interrupt mode -+ Format: -+ 0: polling mode -+ non-0: interrupt mode (default) -+ - edd= [EDD] - Format: {"off" | "on" | "skip[mbr]"} - ---- a/drivers/acpi/ec.c -+++ b/drivers/acpi/ec.c -@@ -118,6 +118,8 @@ static struct acpi_ec { - spinlock_t curr_lock; - } *boot_ec, *first_ec; - -+int acpi_ec_intr = 1; /* Default is interrupt mode */ -+ - static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ - - /* -------------------------------------------------------------------------- -@@ -754,6 +756,8 @@ static int ec_install_handlers(struct ac - &acpi_ec_gpe_handler, ec); - if (ACPI_FAILURE(status)) - return -ENODEV; -+ if (!acpi_ec_intr) -+ set_bit(EC_FLAGS_NO_GPE, &ec->flags); - acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); - acpi_enable_gpe(NULL, ec->gpe); - status = acpi_install_address_space_handler(ec->handle, -@@ -1034,3 +1038,14 @@ static void __exit acpi_ec_exit(void) - return; - } - #endif /* 0 */ -+ -+static int __init acpi_ec_set_intr_mode(char *str) -+{ -+ if (!get_option(&str, &acpi_ec_intr)) { -+ acpi_ec_intr = 0; -+ return 0; -+ } -+ return 1; -+} -+ -+__setup("ec_intr=", acpi_ec_set_intr_mode); diff --git a/patches.arch/acpi_fix_fadt_32_bit_zero_length.patch b/patches.arch/acpi_fix_fadt_32_bit_zero_length.patch deleted file mode 100644 index c087474..0000000 --- a/patches.arch/acpi_fix_fadt_32_bit_zero_length.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Thomas Renninger -Subject: Only use 32 bit addresses if they have a valid length -References: bug#581644 -Patch-Mainline: not yet - -Also not sure whether it will help, but it's a fix. - -Please remove this patch again after a while also if it's not -mainline. - ---- - drivers/acpi/acpica/tbfadt.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/acpi/acpica/tbfadt.c -+++ b/drivers/acpi/acpica/tbfadt.c -@@ -550,11 +550,12 @@ static void acpi_tb_validate_fadt(void) - (!address64->address && length)) { - ACPI_WARNING((AE_INFO, - "Optional field %s has zero address or length: " -- "0x%8.8X%8.8X/0x%X", -+ "0x%8.8X%8.8X/0x%X - not using it", - name, - ACPI_FORMAT_UINT64(address64-> - address), - length)); -+ address64->address = 0; - } - } - } diff --git a/patches.arch/acpi_srat-pxm-rev-ia64.patch b/patches.arch/acpi_srat-pxm-rev-ia64.patch deleted file mode 100644 index deaebfd..0000000 --- a/patches.arch/acpi_srat-pxm-rev-ia64.patch +++ /dev/null @@ -1,59 +0,0 @@ -From: Kurt Garloff -Subject: Use SRAT table rev to use 8bit or 16/32bit PXM fields (ia64) -References: bnc#503038 -Patch-mainline: not yet - -In SRAT v1, we had 8bit proximity domain (PXM) fields; SRAT v2 provides -32bits for these. The new fields were reserved before. -According to the ACPI spec, the OS must disregrard reserved fields. - -ia64 did handle the PXM fields almost consistently, but depending on -sgi's sn2 platform. This patch leaves the sn2 logic in, but does also -use 16/32 bits for PXM if the SRAT has rev 2 or higher. - -The patch also adds __init to the two pxm accessor functions, as they -access __initdata now and are called from an __init function only anyway. - -Note that the code only uses 16 bits for the PXM field in the processor -proximity field; the patch does not address this as 16 bits are more than -enough. - -This is patch 3/3. - -Signed-off-by: Kurt Garloff - ---- - arch/ia64/kernel/acpi.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - ---- a/arch/ia64/kernel/acpi.c -+++ b/arch/ia64/kernel/acpi.c -@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FL - static struct acpi_table_slit __initdata *slit_table; - cpumask_t early_cpu_possible_map = CPU_MASK_NONE; - --static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) -+static int __init -+get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) - { - int pxm; - - pxm = pa->proximity_domain_lo; -- if (ia64_platform_is("sn2")) -+ if (ia64_platform_is("sn2") || acpi_srat_revision >= 2) - pxm += pa->proximity_domain_hi[0] << 8; - return pxm; - } - --static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) -+static int __init -+get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) - { - int pxm; - - pxm = ma->proximity_domain; -- if (!ia64_platform_is("sn2")) -+ if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1) - pxm &= 0xff; - - return pxm; diff --git a/patches.arch/acpi_srat-pxm-rev-store.patch b/patches.arch/acpi_srat-pxm-rev-store.patch deleted file mode 100644 index 69f1d70..0000000 --- a/patches.arch/acpi_srat-pxm-rev-store.patch +++ /dev/null @@ -1,52 +0,0 @@ -From: Kurt Garloff -Subject: Store SRAT table revision -References: bnc#503038 -Patch-mainline: not yet - -In SRAT v1, we had 8bit proximity domain (PXM) fields; SRAT v2 provides -32bits for these. The new fields were reserved before. -According to the ACPI spec, the OS must disregrard reserved fields. -In order to know whether or not, we must know what version the SRAT -table has. - -This patch stores the SRAT table revision for later consumption -by arch specific __init functions. - -This is patch 1/3. - -Signed-off-by: Kurt Garloff - ---- - drivers/acpi/numa.c | 3 +++ - include/acpi/acpi_numa.h | 1 + - 2 files changed, 4 insertions(+) - ---- a/drivers/acpi/numa.c -+++ b/drivers/acpi/numa.c -@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAI - static int node_to_pxm_map[MAX_NUMNODES] - = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; - -+unsigned char acpi_srat_revision __initdata; -+ - int pxm_to_node(int pxm) - { - if (pxm < 0) -@@ -259,6 +261,7 @@ static int __init acpi_parse_srat(struct - return -EINVAL; - - /* Real work done in acpi_table_parse_srat below. */ -+ acpi_srat_revision = table->revision; - - return 0; - } ---- a/include/acpi/acpi_numa.h -+++ b/include/acpi/acpi_numa.h -@@ -15,6 +15,7 @@ extern int pxm_to_node(int); - extern int node_to_pxm(int); - extern void __acpi_map_pxm_to_node(int, int); - extern int acpi_map_pxm_to_node(int); -+extern unsigned char acpi_srat_revision; - - #endif /* CONFIG_ACPI_NUMA */ - #endif /* __ACP_NUMA_H */ diff --git a/patches.arch/acpi_srat-pxm-rev-x86-64.patch b/patches.arch/acpi_srat-pxm-rev-x86-64.patch deleted file mode 100644 index a7a120b..0000000 --- a/patches.arch/acpi_srat-pxm-rev-x86-64.patch +++ /dev/null @@ -1,42 +0,0 @@ -From: Kurt Garloff -Subject: Use SRAT table rev to use 8bit or 32bit PXM fields (x86-64) -References: bnc#503038 -Patch-mainline: not yet - -In SRAT v1, we had 8bit proximity domain (PXM) fields; SRAT v2 provides -32bits for these. The new fields were reserved before. -According to the ACPI spec, the OS must disregrard reserved fields. - -x86-64 was rather inconsistent prior to this patch; it used 8 bits -for the pxm field in cpu_affinity, but 32 bits in mem_affinity. -This patch makes it consistent: Either use 8 bits consistently (SRAT -rev 1 or lower) or 32 bits (SRAT rev 2 or higher). - -This is patch 2/3. - -Signed-off-by: Kurt Garloff - ---- - arch/x86/mm/srat_64.c | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/arch/x86/mm/srat_64.c -+++ b/arch/x86/mm/srat_64.c -@@ -156,6 +156,8 @@ acpi_numa_processor_affinity_init(struct - if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) - return; - pxm = pa->proximity_domain_lo; -+ if (acpi_srat_revision >= 2) -+ pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8; - node = setup_node(pxm); - if (node < 0) { - printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); -@@ -259,6 +261,8 @@ acpi_numa_memory_affinity_init(struct ac - start = ma->base_address; - end = start + ma->length; - pxm = ma->proximity_domain; -+ if (acpi_srat_revision <= 1) -+ pxm &= 0xff; - node = setup_node(pxm); - if (node < 0) { - printk(KERN_ERR "SRAT: Too many proximity domains.\n"); diff --git a/patches.arch/acpi_thermal_passive_blacklist.patch b/patches.arch/acpi_thermal_passive_blacklist.patch deleted file mode 100644 index 6084e87..0000000 --- a/patches.arch/acpi_thermal_passive_blacklist.patch +++ /dev/null @@ -1,125 +0,0 @@ -From: Thomas Renninger -Subject: Avoid critical temp shutdowns on specific ThinkPad T4x(p) and R40 -References: https://bugzilla.novell.com/show_bug.cgi?id=333043 -Patch-mainline: not yet - ---- - drivers/acpi/thermal.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 93 insertions(+) - ---- a/drivers/acpi/thermal.c -+++ b/drivers/acpi/thermal.c -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -984,6 +985,86 @@ static void acpi_thermal_guess_offset(st - tz->kelvin_offset = 2732; - } - -+static struct dmi_system_id thermal_psv_dmi_table[] = { -+ { -+ .ident = "IBM ThinkPad T41", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T41"), -+ }, -+ }, -+ { -+ .ident = "IBM ThinkPad T42", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T42"), -+ }, -+ }, -+ { -+ .ident = "IBM ThinkPad T43", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T43"), -+ }, -+ }, -+ { -+ .ident = "IBM ThinkPad T41p", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T41p"), -+ }, -+ }, -+ { -+ .ident = "IBM ThinkPad T42p", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T42p"), -+ }, -+ }, -+ { -+ .ident = "IBM ThinkPad T43p", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad T43p"), -+ }, -+ }, -+ { -+ .ident = "IBM ThinkPad R40", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad R40"), -+ }, -+ }, -+ { -+ .ident = "IBM ThinkPad R50p", -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), -+ DMI_MATCH(DMI_PRODUCT_VERSION,"ThinkPad R50p"), -+ }, -+ }, -+ {}, -+}; -+ -+static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds) -+{ -+ if (!tz) -+ return -EINVAL; -+ -+ /* Convert value to deci-seconds */ -+ tz->polling_frequency = seconds * 10; -+ -+ tz->thermal_zone->polling_delay = seconds * 1000; -+ -+ if (tz->tz_enabled) -+ thermal_zone_device_update(tz->thermal_zone); -+ -+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, -+ "Polling frequency set to %lu seconds\n", -+ tz->polling_frequency/10)); -+ -+ return 0; -+} -+ - static int acpi_thermal_add(struct acpi_device *device) - { - int result = 0; -@@ -1015,6 +1096,18 @@ static int acpi_thermal_add(struct acpi_ - if (result) - goto free_memory; - -+ if (dmi_check_system(thermal_psv_dmi_table)) { -+ if (tz->trips.passive.flags.valid && -+ tz->trips.passive.temperature > CELSIUS_TO_KELVIN(85)) { -+ printk (KERN_INFO "Adjust passive trip point from %lu" -+ " to %lu\n", -+ KELVIN_TO_CELSIUS(tz->trips.passive.temperature), -+ KELVIN_TO_CELSIUS(tz->trips.passive.temperature - 150)); -+ tz->trips.passive.temperature -= 150; -+ acpi_thermal_set_polling(tz, 5); -+ } -+ } -+ - printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", - acpi_device_name(device), acpi_device_bid(device), - KELVIN_TO_CELSIUS(tz->temperature)); diff --git a/patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch b/patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch deleted file mode 100644 index 9d2086b..0000000 --- a/patches.arch/acpi_thinkpad_introduce_acpi_root_table_boot_param.patch +++ /dev/null @@ -1,118 +0,0 @@ -From: Thomas Renninger -Subject: Introduce acpi_root_table=rsdt boot param and dmi list to force rsdt -Patch-mainline: not yet -References: http://bugzilla.kernel.org/show_bug.cgi?id=8246 - -This one is part of a patch series: -acpi_thinkpad_introduce_acpi_root_table_boot_param.patch -acpi_thinkpad_introduce_acpica_rsdt_global_variable.patch -acpi_thinkpad_remove_R40e_c-state_blacklist.patch - -Blacklist R40e, R51e and T40, T40p, T41, T41p, T42, T42p, R50 and R50p -ThinkPads to use the RSDT instead of the XSDT. - -Update: Jan 12 2009 jeffm -* 2.6.29-rc1 introduced acpi_rsdt_forced. I've updated the patch to issue - a warning that acpi=rsdt is the prefered method of forcing. -* Moved the dmi table stuff to the main dmi table in x86/kernel/acpi/boot. - -Update: Apr 10 2009 jeffm -* Removed documentation, since it's deprecated. - -Signed-off-by: Thomas Renninger -Tested-by: Mark Doughty -CC: Yakui Zhao - ---- - arch/x86/kernel/acpi/boot.c | 53 ++++++++++++++++++++++++++++++++++++++++++++ - drivers/acpi/tables.c | 3 ++ - 2 files changed, 56 insertions(+) - ---- a/arch/x86/kernel/acpi/boot.c -+++ b/arch/x86/kernel/acpi/boot.c -@@ -1350,6 +1350,21 @@ static int __init dmi_ignore_irq0_timer_ - return 0; - } - -+static int __init force_acpi_rsdt(const struct dmi_system_id *d) -+{ -+ if (!acpi_force) { -+ printk(KERN_NOTICE "%s detected: force use of acpi=rsdt\n", -+ d->ident); -+ acpi_rsdt_forced = 1; -+ } else { -+ printk(KERN_NOTICE -+ "Warning: acpi=force overrules DMI blacklist: " -+ "acpi=rsdt\n"); -+ } -+ return 0; -+ -+} -+ - /* - * If your system is blacklisted here, but you find that acpi=force - * works for you, please contact linux-acpi@vger.kernel.org -@@ -1425,6 +1440,32 @@ static struct dmi_system_id __initdata a - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), - }, - }, -+ -+ /* -+ * Boxes that need RSDT as ACPI root table -+ */ -+ { -+ .callback = force_acpi_rsdt, -+ .ident = "ThinkPad ", /* R40e, broken C-states */ -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR, "IBM"), -+ DMI_MATCH(DMI_BIOS_VERSION, "1SET")}, -+ }, -+ { -+ .callback = force_acpi_rsdt, -+ .ident = "ThinkPad ", /* R50e, slow booting */ -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR, "IBM"), -+ DMI_MATCH(DMI_BIOS_VERSION, "1WET")}, -+ }, -+ { -+ .callback = force_acpi_rsdt, -+ .ident = "ThinkPad ", /* T40, T40p, T41, T41p, T42, T42p -+ R50, R50p */ -+ .matches = { -+ DMI_MATCH(DMI_BIOS_VENDOR, "IBM"), -+ DMI_MATCH(DMI_BIOS_VERSION, "1RET")}, -+ }, - {} - }; - -@@ -1612,6 +1653,18 @@ static int __init parse_acpi(char *arg) - } - early_param("acpi", parse_acpi); - -+/* Alias for acpi=rsdt for compatibility with openSUSE 11.1 and SLE11 */ -+static int __init parse_acpi_root_table(char *opt) -+{ -+ if (!strcmp(opt, "rsdt")) { -+ acpi_rsdt_forced = 1; -+ printk(KERN_WARNING "acpi_root_table=rsdt is deprecated. " -+ "Please use acpi=rsdt instead.\n"); -+ } -+ return 0; -+} -+early_param("acpi_root_table", parse_acpi_root_table); -+ - /* FIXME: Using pci= for an ACPI parameter is a travesty. */ - static int __init parse_pci(char *arg) - { ---- a/drivers/acpi/tables.c -+++ b/drivers/acpi/tables.c -@@ -339,6 +339,9 @@ int __init acpi_table_init(void) - { - acpi_status status; - -+ if (acpi_rsdt_forced) -+ printk(KERN_INFO "Using RSDT as ACPI root table\n"); -+ - status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); - if (ACPI_FAILURE(status)) - return 1; diff --git a/patches.arch/i386-unwind-annotations b/patches.arch/i386-unwind-annotations deleted file mode 100644 index c6c168c..0000000 --- a/patches.arch/i386-unwind-annotations +++ /dev/null @@ -1,15 +0,0 @@ -From: jbeulich@novell.com -Subject: fix unwind annotations -Patch-mainline: queued for 2.6.39 - ---- head-2011-02-17.orig/arch/x86/lib/semaphore_32.S 2011-03-01 15:03:45.000000000 +0100 -+++ head-2011-02-17/arch/x86/lib/semaphore_32.S 2011-03-01 15:04:50.000000000 +0100 -@@ -36,7 +36,7 @@ - */ - #ifdef CONFIG_SMP - ENTRY(__write_lock_failed) -- CFI_STARTPROC simple -+ CFI_STARTPROC - FRAME - 2: LOCK_PREFIX - addl $ RW_LOCK_BIAS,(%eax) diff --git a/patches.arch/ia64-page-migration b/patches.arch/ia64-page-migration deleted file mode 100644 index 69386fc..0000000 --- a/patches.arch/ia64-page-migration +++ /dev/null @@ -1,603 +0,0 @@ -From: Russ Anderson -Subject: ia64: Call migration code on correctable errors v8 -References: 415829 -Acked-by: schwab@suse.de -Patch-mainline: not yet - -Migrate data off pages with correctable memory errors. This patch is the -ia64 specific piece. It connects the CPE handler to the page migration -code. It is implemented as a kernel loadable module, similar to the mca -recovery code (mca_recovery.ko). This allows the feature to be turned off -by uninstalling the module. - -Update Jan 19 2009 jeffm: -- isolate_lru_page doesn't put the page on a list anymore - - -Signed-off-by: Russ Anderson - ---- - arch/ia64/Kconfig | 9 - arch/ia64/include/asm/mca.h | 6 - arch/ia64/include/asm/page.h | 1 - arch/ia64/kernel/Makefile | 1 - arch/ia64/kernel/cpe_migrate.c | 434 +++++++++++++++++++++++++++++++++++++++++ - arch/ia64/kernel/mca.c | 37 +++ - 6 files changed, 487 insertions(+), 1 deletion(-) - ---- a/arch/ia64/Kconfig -+++ b/arch/ia64/Kconfig -@@ -511,6 +511,15 @@ config ARCH_PROC_KCORE_TEXT - config IA64_MCA_RECOVERY - tristate "MCA recovery from errors other than TLB." - -+config IA64_CPE_MIGRATE -+ tristate "Migrate data off pages with correctable errors" -+ default m -+ help -+ Migrate data off pages with correctable memory errors. Selecting -+ Y will build this functionality into the kernel. Selecting M will -+ build this functionality as a kernel loadable module. Installing -+ the module will turn on the functionality. -+ - config PERFMON - bool "Performance monitor support" - help ---- a/arch/ia64/include/asm/mca.h -+++ b/arch/ia64/include/asm/mca.h -@@ -142,6 +142,7 @@ extern unsigned long __per_cpu_mca[NR_CP - - extern int cpe_vector; - extern int ia64_cpe_irq; -+extern int cpe_poll_enabled; - extern void ia64_mca_init(void); - extern void ia64_mca_cpu_init(void *); - extern void ia64_os_mca_dispatch(void); -@@ -156,11 +157,16 @@ extern void ia64_slave_init_handler(void - extern void ia64_mca_cmc_vector_setup(void); - extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); - extern void ia64_unreg_MCA_extension(void); -+extern int ia64_reg_CE_extension(int (*fn)(void *)); -+extern void ia64_unreg_CE_extension(void); - extern unsigned long ia64_get_rnat(unsigned long *); - extern void ia64_set_psr_mc(void); - extern void ia64_mca_printk(const char * fmt, ...) - __attribute__ ((format (printf, 1, 2))); - -+extern struct list_head badpagelist; -+extern unsigned int total_badpages; -+ - struct ia64_mca_notify_die { - struct ia64_sal_os_state *sos; - int *monarch_cpu; ---- a/arch/ia64/include/asm/page.h -+++ b/arch/ia64/include/asm/page.h -@@ -121,6 +121,7 @@ extern unsigned long max_low_pfn; - #endif - - #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) -+#define phys_to_page(kaddr) (pfn_to_page(kaddr >> PAGE_SHIFT)) - #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) - #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) - ---- a/arch/ia64/kernel/Makefile -+++ b/arch/ia64/kernel/Makefile -@@ -25,6 +25,7 @@ obj-$(CONFIG_PERFMON) += perfmon_defaul - obj-$(CONFIG_IA64_CYCLONE) += cyclone.o - obj-$(CONFIG_CPU_FREQ) += cpufreq/ - obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o -+obj-$(CONFIG_IA64_CPE_MIGRATE) += cpe_migrate.o - obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o - obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o - obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o ---- /dev/null -+++ b/arch/ia64/kernel/cpe_migrate.c -@@ -0,0 +1,434 @@ -+/* -+ * File: cpe_migrate.c -+ * Purpose: Migrate data from physical pages with excessive correctable -+ * errors to new physical pages. Keep the old pages on a discard -+ * list. -+ * -+ * Copyright (C) 2008 SGI - Silicon Graphics Inc. -+ * Copyright (C) 2008 Russ Anderson -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#define BADRAM_BASENAME "badram" -+#define CE_HISTORY_LENGTH 30 -+ -+struct cpe_info { -+ u64 paddr; -+ u16 node; -+}; -+static struct cpe_info cpe[CE_HISTORY_LENGTH]; -+ -+static int cpe_polling_enabled = 1; -+static int cpe_head; -+static int cpe_tail; -+static int work_scheduled; -+static int mstat_cannot_isolate; -+static int mstat_failed_to_discard; -+static int mstat_already_marked; -+static int mstat_already_on_list; -+ -+DEFINE_SPINLOCK(cpe_migrate_lock); -+ -+static void -+get_physical_address(void *buffer, u64 *paddr, u16 *node) -+{ -+ sal_log_record_header_t *rh; -+ sal_log_mem_dev_err_info_t *mdei; -+ ia64_err_rec_t *err_rec; -+ sal_log_platform_err_info_t *plat_err; -+ efi_guid_t guid; -+ -+ err_rec = buffer; -+ rh = &err_rec->sal_elog_header; -+ *paddr = 0; -+ *node = 0; -+ -+ /* -+ * Make sure it is a corrected error. -+ */ -+ if (rh->severity != sal_log_severity_corrected) -+ return; -+ -+ plat_err = (sal_log_platform_err_info_t *)&err_rec->proc_err; -+ -+ guid = plat_err->mem_dev_err.header.guid; -+ if (efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0) { -+ /* -+ * Memory cpe -+ */ -+ mdei = &plat_err->mem_dev_err; -+ if (mdei->valid.oem_data) { -+ if (mdei->valid.physical_addr) -+ *paddr = mdei->physical_addr; -+ -+ if (mdei->valid.node) { -+ if (ia64_platform_is("sn2")) -+ *node = nasid_to_cnodeid(mdei->node); -+ else -+ *node = mdei->node; -+ } -+ } -+ } -+} -+ -+static struct page * -+alloc_migrate_page(struct page *ignored, unsigned long node, int **x) -+{ -+ -+ return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0); -+} -+ -+static int -+validate_paddr_page(u64 paddr) -+{ -+ struct page *page; -+ -+ if (!paddr) -+ return -EINVAL; -+ -+ if (!ia64_phys_addr_valid(paddr)) -+ return -EINVAL; -+ -+ if (!pfn_valid(paddr >> PAGE_SHIFT)) -+ return -EINVAL; -+ -+ page = phys_to_page(paddr); -+ if (PageMemError(page)) -+ mstat_already_marked++; -+ return 0; -+} -+ -+extern int isolate_lru_page(struct page *); -+static int -+ia64_mca_cpe_move_page(u64 paddr, u32 node) -+{ -+ LIST_HEAD(pagelist); -+ struct page *page; -+ int ret; -+ -+ ret = validate_paddr_page(paddr); -+ if (ret < 0) -+ return ret; -+ -+ /* -+ * convert physical address to page number -+ */ -+ page = phys_to_page(paddr); -+ -+ migrate_prep(); -+ ret = isolate_lru_page(page); -+ if (ret) { -+ mstat_cannot_isolate++; -+ return ret; -+ } -+ -+ list_add(&page->lru, &pagelist); -+ ret = migrate_pages(&pagelist, alloc_migrate_page, node, 0, true); -+ if (ret == 0) { -+ total_badpages++; -+ list_add_tail(&page->lru, &badpagelist); -+ } else { -+ mstat_failed_to_discard++; -+ /* -+ * The page failed to migrate and is not on the bad page list. -+ * Clearing the error bit will allow another attempt to migrate -+ * if it gets another correctable error. -+ */ -+ ClearPageMemError(page); -+ } -+ -+ return 0; -+} -+ -+/* -+ * ia64_mca_cpe_migrate -+ * The worker that does the actual migration. It pulls a -+ * physical address off the list and calls the migration code. -+ */ -+static void -+ia64_mca_cpe_migrate(struct work_struct *unused) -+{ -+ int ret; -+ u64 paddr; -+ u16 node; -+ -+ do { -+ paddr = cpe[cpe_tail].paddr; -+ if (paddr) { -+ /* -+ * There is a valid entry that needs processing. -+ */ -+ node = cpe[cpe_tail].node; -+ -+ ret = ia64_mca_cpe_move_page(paddr, node); -+ if (ret <= 0) -+ /* -+ * Even though the return status is negative, -+ * clear the entry. If the same address has -+ * another CPE it will be re-added to the list. -+ */ -+ cpe[cpe_tail].paddr = 0; -+ -+ } -+ if (++cpe_tail >= CE_HISTORY_LENGTH) -+ cpe_tail = 0; -+ -+ } while (cpe_tail != cpe_head); -+ work_scheduled = 0; -+} -+ -+static DECLARE_WORK(cpe_enable_work, ia64_mca_cpe_migrate); -+DEFINE_SPINLOCK(cpe_list_lock); -+ -+/* -+ * cpe_setup_migrate -+ * Get the physical address out of the CPE record, add it -+ * to the list of addresses to migrate (if not already on), -+ * and schedule the back end worker task. This is called -+ * in interrupt context so cannot directly call the migration -+ * code. -+ * -+ * Inputs -+ * rec The CPE record -+ * Outputs -+ * 1 on Success, -1 on failure -+ */ -+static int -+cpe_setup_migrate(void *rec) -+{ -+ u64 paddr; -+ u16 node; -+ /* int head, tail; */ -+ int i, ret; -+ -+ if (!rec) -+ return -EINVAL; -+ -+ get_physical_address(rec, &paddr, &node); -+ ret = validate_paddr_page(paddr); -+ if (ret < 0) -+ return -EINVAL; -+ -+ if ((cpe_head != cpe_tail) || (cpe[cpe_head].paddr != 0)) -+ /* -+ * List not empty -+ */ -+ for (i = 0; i < CE_HISTORY_LENGTH; i++) { -+ if (PAGE_ALIGN(cpe[i].paddr) == PAGE_ALIGN(paddr)) { -+ mstat_already_on_list++; -+ return 1; /* already on the list */ -+ } -+ } -+ -+ if (!spin_trylock(&cpe_list_lock)) { -+ /* -+ * Someone else has the lock. To avoid spinning in interrupt -+ * handler context, bail. -+ */ -+ return 1; -+ } -+ -+ if (cpe[cpe_head].paddr == 0) { -+ cpe[cpe_head].node = node; -+ cpe[cpe_head].paddr = paddr; -+ -+ if (++cpe_head >= CE_HISTORY_LENGTH) -+ cpe_head = 0; -+ } -+ spin_unlock(&cpe_list_lock); -+ -+ if (!work_scheduled) { -+ work_scheduled = 1; -+ schedule_work(&cpe_enable_work); -+ } -+ -+ return 1; -+} -+ -+/* -+ * ============================================================================= -+ */ -+ -+/* -+ * free_one_bad_page -+ * Free one page from the list of bad pages. -+ */ -+static int -+free_one_bad_page(unsigned long paddr) -+{ -+ LIST_HEAD(pagelist); -+ struct page *page, *page2, *target; -+ -+ /* -+ * Verify page address -+ */ -+ target = phys_to_page(paddr); -+ list_for_each_entry_safe(page, page2, &badpagelist, lru) { -+ if (page != target) -+ continue; -+ -+ ClearPageMemError(page); /* Mark the page as good */ -+ total_badpages--; -+ list_move_tail(&page->lru, &pagelist); -+ putback_lru_pages(&pagelist); -+ break; -+ } -+ return 0; -+} -+ -+/* -+ * free_all_bad_pages -+ * Free all of the pages on the bad pages list. -+ */ -+static int -+free_all_bad_pages(void) -+{ -+ struct page *page, *page2; -+ -+ list_for_each_entry_safe(page, page2, &badpagelist, lru) { -+ ClearPageMemError(page); /* Mark the page as good */ -+ total_badpages--; -+ } -+ putback_lru_pages(&badpagelist); -+ return 0; -+} -+ -+#define OPT_LEN 16 -+ -+static ssize_t -+badpage_store(struct kobject *kobj, -+ struct kobj_attribute *attr, const char *buf, size_t count) -+{ -+ char optstr[OPT_LEN]; -+ unsigned long opt; -+ int len = OPT_LEN; -+ int err; -+ -+ if (count < len) -+ len = count; -+ -+ strlcpy(optstr, buf, len); -+ -+ err = strict_strtoul(optstr, 16, &opt); -+ if (err) -+ return err; -+ -+ if (opt == 0) -+ free_all_bad_pages(); -+ else -+ free_one_bad_page(opt); -+ -+ return count; -+} -+ -+/* -+ * badpage_show -+ * Display the number, size, and addresses of all the pages on the -+ * bad page list. -+ * -+ * Note that sysfs provides buf of PAGE_SIZE length. bufend tracks -+ * the remaining space in buf to avoid overflowing. -+ */ -+static ssize_t -+badpage_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+ -+{ -+ struct page *page, *page2; -+ int i = 0, cnt = 0; -+ char *bufend = buf + PAGE_SIZE; -+ -+ cnt = snprintf(buf, bufend - (buf + cnt), -+ "Memory marked bad: %d kB\n" -+ "Pages marked bad: %d\n" -+ "Unable to isolate on LRU: %d\n" -+ "Unable to migrate: %d\n" -+ "Already marked bad: %d\n" -+ "Already on list: %d\n" -+ "List of bad physical pages\n", -+ total_badpages << (PAGE_SHIFT - 10), total_badpages, -+ mstat_cannot_isolate, mstat_failed_to_discard, -+ mstat_already_marked, mstat_already_on_list -+ ); -+ -+ list_for_each_entry_safe(page, page2, &badpagelist, lru) { -+ if (bufend - (buf + cnt) < 20) -+ break; /* Avoid overflowing the buffer */ -+ cnt += snprintf(buf + cnt, bufend - (buf + cnt), -+ " 0x%011lx", page_to_phys(page)); -+ if (!(++i % 5)) -+ cnt += snprintf(buf + cnt, bufend - (buf + cnt), "\n"); -+ } -+ cnt += snprintf(buf + cnt, bufend - (buf + cnt), "\n"); -+ -+ return cnt; -+} -+ -+static struct kobj_attribute badram_attr = { -+ .attr = { -+ .name = "badram", -+ .mode = S_IWUSR | S_IRUGO, -+ }, -+ .show = badpage_show, -+ .store = badpage_store, -+}; -+ -+static int __init -+cpe_migrate_external_handler_init(void) -+{ -+ int error; -+ -+ error = sysfs_create_file(kernel_kobj, &badram_attr.attr); -+ if (error) -+ return -EINVAL; -+ -+ /* -+ * register external ce handler -+ */ -+ if (ia64_reg_CE_extension(cpe_setup_migrate)) { -+ printk(KERN_ERR "ia64_reg_CE_extension failed.\n"); -+ return -EFAULT; -+ } -+ cpe_poll_enabled = cpe_polling_enabled; -+ -+ printk(KERN_INFO "Registered badram Driver\n"); -+ return 0; -+} -+ -+static void __exit -+cpe_migrate_external_handler_exit(void) -+{ -+ /* unregister external mca handlers */ -+ ia64_unreg_CE_extension(); -+ -+ sysfs_remove_file(kernel_kobj, &badram_attr.attr); -+} -+ -+module_init(cpe_migrate_external_handler_init); -+module_exit(cpe_migrate_external_handler_exit); -+ -+module_param(cpe_polling_enabled, int, 0644); -+MODULE_PARM_DESC(cpe_polling_enabled, -+ "Enable polling with migration"); -+ -+MODULE_AUTHOR("Russ Anderson "); -+MODULE_DESCRIPTION("ia64 Corrected Error page migration driver"); ---- a/arch/ia64/kernel/mca.c -+++ b/arch/ia64/kernel/mca.c -@@ -68,6 +68,9 @@ - * - * 2007-04-27 Russ Anderson - * Support multiple cpus going through OS_MCA in the same event. -+ * -+ * 2008-04-22 Russ Anderson -+ * Migrate data off pages with correctable memory errors. - */ - #include - #include -@@ -164,7 +167,14 @@ static int cmc_polling_enabled = 1; - * but encounters problems retrieving CPE logs. This should only be - * necessary for debugging. - */ --static int cpe_poll_enabled = 1; -+int cpe_poll_enabled = 1; -+EXPORT_SYMBOL(cpe_poll_enabled); -+ -+unsigned int total_badpages; -+EXPORT_SYMBOL(total_badpages); -+ -+LIST_HEAD(badpagelist); -+EXPORT_SYMBOL(badpagelist); - - extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); - -@@ -524,6 +534,28 @@ int mca_recover_range(unsigned long addr - } - EXPORT_SYMBOL_GPL(mca_recover_range); - -+/* Function pointer to Corrected Error memory migration driver */ -+int (*ia64_mca_ce_extension)(void *); -+ -+int -+ia64_reg_CE_extension(int (*fn)(void *)) -+{ -+ if (ia64_mca_ce_extension) -+ return 1; -+ -+ ia64_mca_ce_extension = fn; -+ return 0; -+} -+EXPORT_SYMBOL(ia64_reg_CE_extension); -+ -+void -+ia64_unreg_CE_extension(void) -+{ -+ if (ia64_mca_ce_extension) -+ ia64_mca_ce_extension = NULL; -+} -+EXPORT_SYMBOL(ia64_unreg_CE_extension); -+ - #ifdef CONFIG_ACPI - - int cpe_vector = -1; -@@ -535,6 +567,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, v - static unsigned long cpe_history[CPE_HISTORY_LENGTH]; - static int index; - static DEFINE_SPINLOCK(cpe_history_lock); -+ int recover; - - IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", - __func__, cpe_irq, smp_processor_id()); -@@ -581,6 +614,8 @@ ia64_mca_cpe_int_handler (int cpe_irq, v - out: - /* Get the CPE error record and log it */ - ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); -+ recover = (ia64_mca_ce_extension && ia64_mca_ce_extension( -+ IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_CPE))); - - return IRQ_HANDLED; - } diff --git a/patches.arch/ia64-page-migration.fix b/patches.arch/ia64-page-migration.fix deleted file mode 100644 index fbc4357..0000000 --- a/patches.arch/ia64-page-migration.fix +++ /dev/null @@ -1,159 +0,0 @@ -From: Russ Anderson -Subject: ia64: cpe_migrate.ko causes deadlock. -References: bnc#464676 -Patch-mainline: not yet, depends on patches.arch/ia64-page-migration - -schedule_on_each_cpu() deadlocks when called from an event thread. -Change cpe_migrate to use a kthread to avoid the problem. - -Signed-off-by: Russ Anderson -Acked-by: Raymund Will - ---- - arch/ia64/kernel/cpe_migrate.c | 72 +++++++++++++++++++++++++++++++---------- - 1 file changed, 56 insertions(+), 16 deletions(-) - ---- a/arch/ia64/kernel/cpe_migrate.c -+++ b/arch/ia64/kernel/cpe_migrate.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -40,12 +41,15 @@ static struct cpe_info cpe[CE_HISTORY_LE - static int cpe_polling_enabled = 1; - static int cpe_head; - static int cpe_tail; --static int work_scheduled; - static int mstat_cannot_isolate; - static int mstat_failed_to_discard; - static int mstat_already_marked; - static int mstat_already_on_list; - -+/* IRQ handler notifies this wait queue on receipt of an IRQ */ -+DECLARE_WAIT_QUEUE_HEAD(cpe_activate_IRQ_wq); -+static DECLARE_COMPLETION(kthread_cpe_migrated_exited); -+int cpe_active; - DEFINE_SPINLOCK(cpe_migrate_lock); - - static void -@@ -160,12 +164,12 @@ ia64_mca_cpe_move_page(u64 paddr, u32 no - } - - /* -- * ia64_mca_cpe_migrate -- * The worker that does the actual migration. It pulls a -- * physical address off the list and calls the migration code. -+ * cpe_process_queue -+ * Pulls the physical address off the list and calls the migration code. -+ * Will process all the addresses on the list. - */ --static void --ia64_mca_cpe_migrate(struct work_struct *unused) -+void -+cpe_process_queue(void) - { - int ret; - u64 paddr; -@@ -193,10 +197,36 @@ ia64_mca_cpe_migrate(struct work_struct - cpe_tail = 0; - - } while (cpe_tail != cpe_head); -- work_scheduled = 0; -+ return; -+} -+ -+inline int -+cpe_list_empty(void) -+{ -+ return (cpe_head == cpe_tail) && (!cpe[cpe_head].paddr); -+} -+ -+/* -+ * kthread_cpe_migrate -+ * kthread_cpe_migrate is created at module load time and lives -+ * until the module is removed. When not active, it will sleep. -+ */ -+static int -+kthread_cpe_migrate(void *ignore) -+{ -+ while (cpe_active) { -+ /* -+ * wait for work -+ */ -+ (void)wait_event_interruptible(cpe_activate_IRQ_wq, -+ (!cpe_list_empty() || -+ !cpe_active)); -+ cpe_process_queue(); /* process work */ -+ } -+ complete(&kthread_cpe_migrated_exited); -+ return 0; - } - --static DECLARE_WORK(cpe_enable_work, ia64_mca_cpe_migrate); - DEFINE_SPINLOCK(cpe_list_lock); - - /* -@@ -228,10 +258,7 @@ cpe_setup_migrate(void *rec) - if (ret < 0) - return -EINVAL; - -- if ((cpe_head != cpe_tail) || (cpe[cpe_head].paddr != 0)) -- /* -- * List not empty -- */ -+ if (!cpe_list_empty()) - for (i = 0; i < CE_HISTORY_LENGTH; i++) { - if (PAGE_ALIGN(cpe[i].paddr) == PAGE_ALIGN(paddr)) { - mstat_already_on_list++; -@@ -256,10 +283,7 @@ cpe_setup_migrate(void *rec) - } - spin_unlock(&cpe_list_lock); - -- if (!work_scheduled) { -- work_scheduled = 1; -- schedule_work(&cpe_enable_work); -- } -+ wake_up_interruptible(&cpe_activate_IRQ_wq); - - return 1; - } -@@ -396,12 +420,23 @@ static int __init - cpe_migrate_external_handler_init(void) - { - int error; -+ struct task_struct *kthread; - - error = sysfs_create_file(kernel_kobj, &badram_attr.attr); - if (error) - return -EINVAL; - - /* -+ * set up the kthread -+ */ -+ cpe_active = 1; -+ kthread = kthread_run(kthread_cpe_migrate, NULL, "cpe_migrate"); -+ if (IS_ERR(kthread)) { -+ complete(&kthread_cpe_migrated_exited); -+ return -EFAULT; -+ } -+ -+ /* - * register external ce handler - */ - if (ia64_reg_CE_extension(cpe_setup_migrate)) { -@@ -420,6 +455,11 @@ cpe_migrate_external_handler_exit(void) - /* unregister external mca handlers */ - ia64_unreg_CE_extension(); - -+ /* Stop kthread */ -+ cpe_active = 0; /* tell kthread_cpe_migrate to exit */ -+ wake_up_interruptible(&cpe_activate_IRQ_wq); -+ wait_for_completion(&kthread_cpe_migrated_exited); -+ - sysfs_remove_file(kernel_kobj, &badram_attr.attr); - } - diff --git a/patches.arch/kmsg-fix-parameter-limitations b/patches.arch/kmsg-fix-parameter-limitations deleted file mode 100644 index 35bcfe5..0000000 --- a/patches.arch/kmsg-fix-parameter-limitations +++ /dev/null @@ -1,54 +0,0 @@ -From: Jeff Mahoney -Subject: kmsg: Fix parameter limitations -Patch-mainline: Whenever kmsg is upstream - - The kmsg infrastructure, currently only employed on s/390, has limitations - with the parameters it can handle due to the way it assembles the - magic string for parsing with scripts/kmsg-doc. - - cpp expects the result to be a valid expression and exits with an error - if it is not. - - The netfilter ipvs code causes this error, though there are more examples: - error: pasting "_ARGS_" and "&" does not give a valid preprocessing token - - This stems from an otherwise valid expression: - pr_info("Registered protocols (%s)\n", &protocols[2]); - - It tries to concatenate _ARGS_ and &protocols[2] and fails. - - This patch fixes the issue by stringifying the entire parameter list - and allowing kmsg-doc to unquote the resultant expression. - - The dev_* expressions that evaluate to __KMSG_DEV are unaffected because - the insertion of the "dev, " between _ARGS_ and the parameter list ends - up creating a valid expression. - -Signed-off-by: Jeff Mahoney ---- - include/linux/printk.h | 2 +- - scripts/kmsg-doc | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -416,7 +416,7 @@ extern int hex_to_bin(char ch); - - /* generate magic string for scripts/kmsg-doc to parse */ - #define pr_printk_hash(level, format, ...) \ -- __KMSG_PRINT(level _FMT_ format _ARGS_ ##__VA_ARGS__ _END_) -+ __KMSG_PRINT(level _FMT_ format _ARGS_ #__VA_ARGS__ _END_) - - #elif defined(CONFIG_KMSG_IDS) && defined(KMSG_COMPONENT) - ---- a/scripts/kmsg-doc -+++ b/scripts/kmsg-doc -@@ -307,7 +307,7 @@ sub process_cpp_file($$$$) - - while () { - chomp; -- if (/.*__KMSG_PRINT\(\s*(\S*)\s*_FMT_(.*)_ARGS_\s*(.*)?_END_\s*\)/o) { -+ if (/.*__KMSG_PRINT\(\s*(\S*)\s*_FMT_(.*)_ARGS_\s*"(.*)"\s*_END_\s*\)/o) { - if ($component ne "") { - add_kmsg_print($component, $1, $2, $3); - } else { diff --git a/patches.arch/kvm-only-export-selected-pv-ops-feature-structs b/patches.arch/kvm-only-export-selected-pv-ops-feature-structs deleted file mode 100644 index 154fe80..0000000 --- a/patches.arch/kvm-only-export-selected-pv-ops-feature-structs +++ /dev/null @@ -1,139 +0,0 @@ -From: Alexander Graf -Date: Wed, 18 Nov 2009 00:39:12 +0100 -Subject: Only export selected pv-ops feature structs -References: bnc#556135, FATE#306453 -Patch-Mainline: Submitted to virtualization list - -To really check for sure that we're not using any pv-ops code by accident, -we should make sure that we don't even export the structures used to access -pv-ops exported functions. - -So let's surround the pv-ops structs by #ifdefs. - -Signed-off-by: Alexander Graf ---- - arch/x86/kernel/paravirt.c | 35 +++++++++++++++++++++++++++++------ - 1 file changed, 29 insertions(+), 6 deletions(-) - ---- a/arch/x86/kernel/paravirt.c -+++ b/arch/x86/kernel/paravirt.c -@@ -124,11 +124,21 @@ static void *get_call_destination(u8 typ - { - struct paravirt_patch_template tmpl = { - .pv_init_ops = pv_init_ops, -+#ifdef CONFIG_PARAVIRT_TIME - .pv_time_ops = pv_time_ops, -+#endif -+#ifdef CONFIG_PARAVIRT_CPU - .pv_cpu_ops = pv_cpu_ops, -+#endif -+#ifdef CONFIG_PARAVIRT_IRQ - .pv_irq_ops = pv_irq_ops, -+#endif -+#ifdef CONFIG_PARAVIRT_APIC - .pv_apic_ops = pv_apic_ops, -+#endif -+#ifdef CONFIG_PARAVIRT_MMU - .pv_mmu_ops = pv_mmu_ops, -+#endif - #ifdef CONFIG_PARAVIRT_SPINLOCKS - .pv_lock_ops = pv_lock_ops, - #endif -@@ -185,6 +195,7 @@ unsigned paravirt_patch_insns(void *insn - return insn_len; - } - -+#ifdef CONFIG_PARAVIRT_MMU - static void native_flush_tlb(void) - { - __native_flush_tlb(); -@@ -203,6 +214,7 @@ static void native_flush_tlb_single(unsi - { - __native_flush_tlb_single(addr); - } -+#endif /* CONFIG_PARAVIRT_MMU */ - - /* These are in entry.S */ - extern void native_iret(void); -@@ -284,6 +296,7 @@ enum paravirt_lazy_mode paravirt_get_laz - return percpu_read(paravirt_lazy_mode); - } - -+#ifdef CONFIG_PARAVIRT_MMU - void arch_flush_lazy_mmu_mode(void) - { - preempt_disable(); -@@ -295,6 +308,7 @@ void arch_flush_lazy_mmu_mode(void) - - preempt_enable(); - } -+#endif /* CONFIG_PARAVIRT_MMU */ - - struct pv_info pv_info = { - .name = "bare hardware", -@@ -306,11 +320,16 @@ struct pv_info pv_info = { - struct pv_init_ops pv_init_ops = { - .patch = native_patch, - }; -+EXPORT_SYMBOL_GPL(pv_info); - -+#ifdef CONFIG_PARAVIRT_TIME - struct pv_time_ops pv_time_ops = { - .sched_clock = native_sched_clock, - }; -+EXPORT_SYMBOL_GPL(pv_time_ops); -+#endif - -+#ifdef CONFIG_PARAVIRT_IRQ - struct pv_irq_ops pv_irq_ops = { - .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), - .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), -@@ -322,7 +341,10 @@ struct pv_irq_ops pv_irq_ops = { - .adjust_exception_frame = paravirt_nop, - #endif - }; -+EXPORT_SYMBOL (pv_irq_ops); -+#endif - -+#ifdef CONFIG_PARAVIRT_CPU - struct pv_cpu_ops pv_cpu_ops = { - .cpuid = native_cpuid, - .get_debugreg = native_get_debugreg, -@@ -383,12 +405,17 @@ struct pv_cpu_ops pv_cpu_ops = { - .start_context_switch = paravirt_nop, - .end_context_switch = paravirt_nop, - }; -+EXPORT_SYMBOL (pv_cpu_ops); -+#endif - -+#ifdef CONFIG_PARAVIRT_APIC - struct pv_apic_ops pv_apic_ops = { - #ifdef CONFIG_X86_LOCAL_APIC - .startup_ipi_hook = paravirt_nop, - #endif - }; -+EXPORT_SYMBOL_GPL(pv_apic_ops); -+#endif - - #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) - /* 32-bit pagetable entries */ -@@ -398,6 +425,7 @@ struct pv_apic_ops pv_apic_ops = { - #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) - #endif - -+#ifdef CONFIG_PARAVIRT_MMU - struct pv_mmu_ops pv_mmu_ops = { - - .read_cr2 = native_read_cr2, -@@ -466,10 +494,5 @@ struct pv_mmu_ops pv_mmu_ops = { - - .set_fixmap = native_set_fixmap, - }; -- --EXPORT_SYMBOL_GPL(pv_time_ops); --EXPORT_SYMBOL (pv_cpu_ops); - EXPORT_SYMBOL (pv_mmu_ops); --EXPORT_SYMBOL_GPL(pv_apic_ops); --EXPORT_SYMBOL_GPL(pv_info); --EXPORT_SYMBOL (pv_irq_ops); -+#endif diff --git a/patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic b/patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic deleted file mode 100644 index b63bf11..0000000 --- a/patches.arch/kvm-replace-kvm-io-delay-pv-ops-with-linux-magic +++ /dev/null @@ -1,80 +0,0 @@ -From: Alexander Graf -Date: Wed, 18 Nov 2009 12:58:00 +0100 -Subject: Replace kvm io delay pv-ops with linux magic -References: bnc#556135, FATE#306453 -Patch-Mainline: Submitted to virtualization list - -Currently we use pv-ops to tell linux not to do anything on io_delay. - -While the basic idea is good IMHO, I don't see why we would need pv-ops -for that. The io delay function already has a switch that can do nothing -if you're so inclined. - -So here's a patch (stacked on top of the previous pv-ops series) that -removes the io delay pv-ops hook and just sets the native io delay -variable instead. - -Signed-off-by: Alexander Graf ---- - arch/x86/Kconfig | 14 -------------- - arch/x86/kernel/kvm.c | 16 +++------------- - 2 files changed, 3 insertions(+), 27 deletions(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -548,20 +548,6 @@ config KVM_GUEST - This option enables various optimizations for running under the KVM - hypervisor. - --config KVM_IODELAY -- bool "KVM IO-delay support" -- depends on KVM_GUEST -- select PARAVIRT_CPU -- ---help--- -- Usually we wait for PIO access to complete. When inside KVM there's -- no need to do that, as we know that we're not going through a bus, -- but process PIO requests instantly. -- -- This option disables PIO waits, but drags in CPU-bound pv-ops. Thus -- you will probably get more speed loss than speedup using this option. -- -- If in doubt, say N. -- - config KVM_MMU - bool "KVM PV MMU support" - depends on KVM_GUEST ---- a/arch/x86/kernel/kvm.c -+++ b/arch/x86/kernel/kvm.c -@@ -29,15 +29,6 @@ - #include - #include - --#ifdef CONFIG_KVM_IODELAY --/* -- * No need for any "IO delay" on KVM -- */ --static void kvm_io_delay(void) --{ --} --#endif /* CONFIG_KVM_IODELAY */ -- - #ifdef CONFIG_KVM_MMU - #define MMU_QUEUE_SIZE 1024 - -@@ -201,13 +192,12 @@ static void kvm_leave_lazy_mmu(void) - - static void __init paravirt_ops_setup(void) - { -+ extern int io_delay_type; - pv_info.name = "KVM"; - pv_info.paravirt_enabled = 1; - --#ifdef CONFIG_KVM_IODELAY -- if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) -- pv_cpu_ops.io_delay = kvm_io_delay; --#endif -+ /* Disable IO delay */ -+ io_delay_type = CONFIG_IO_DELAY_TYPE_NONE; - - #ifdef CONFIG_KVM_MMU - if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) { diff --git a/patches.arch/kvm-split-paravirt-ops-by-functionality b/patches.arch/kvm-split-paravirt-ops-by-functionality deleted file mode 100644 index d1b9b78..0000000 --- a/patches.arch/kvm-split-paravirt-ops-by-functionality +++ /dev/null @@ -1,728 +0,0 @@ -From: Alexander Graf -Date: Wed, 18 Nov 2009 00:27:59 +0100 -Subject: Split paravirt ops by functionality -References: bnc#556135, FATE#306453 -Patch-Mainline: Submitted to virtualization list - -Currently when using paravirt ops it's an all-or-nothing option. We can either -use pv-ops for CPU, MMU, timing, etc. or not at all. - -Now there are some use cases where we don't need the full feature set, but only -a small chunk of it. KVM is a pretty prominent example for this. - -So let's make everything a bit more fine-grained. We already have a splitting -by function groups, namely "cpu", "mmu", "time", "irq", "apic" and "spinlock". - -Taking that existing splitting and extending it to only compile in the PV -capable bits sounded like a natural fit. That way we don't get performance hits -in MMU code from using the KVM PV clock which only needs the TIME parts of -pv-ops. - -We define a new CONFIG_PARAVIRT_ALL option that basically does the same thing -the CONFIG_PARAVIRT did before this splitting. We move all users of -CONFIG_PARAVIRT to CONFIG_PARAVIRT_ALL, so they behave the same way they did -before. - -So here it is - the splitting! I would have made the patch smaller, but this -was the closest I could get to atomic (for bisect) while staying sane. - -Signed-off-by: Alexander Graf ---- - arch/x86/Kconfig | 41 +++++++++++++++++++++++--- - arch/x86/include/asm/apic.h | 2 - - arch/x86/include/asm/desc.h | 4 +- - arch/x86/include/asm/fixmap.h | 2 - - arch/x86/include/asm/io.h | 2 - - arch/x86/include/asm/irqflags.h | 21 ++++++++++--- - arch/x86/include/asm/mmu_context.h | 4 +- - arch/x86/include/asm/msr.h | 4 +- - arch/x86/include/asm/paravirt.h | 44 ++++++++++++++++++++++++++-- - arch/x86/include/asm/paravirt_types.h | 12 +++++++ - arch/x86/include/asm/pgalloc.h | 2 - - arch/x86/include/asm/pgtable-3level_types.h | 2 - - arch/x86/include/asm/pgtable.h | 2 - - arch/x86/include/asm/processor.h | 2 - - arch/x86/include/asm/required-features.h | 2 - - arch/x86/include/asm/smp.h | 2 - - arch/x86/include/asm/system.h | 13 +++++--- - arch/x86/include/asm/tlbflush.h | 4 +- - arch/x86/kernel/head_64.S | 2 - - arch/x86/kernel/paravirt.c | 2 + - arch/x86/kernel/tsc.c | 2 - - arch/x86/kernel/vsmp_64.c | 2 - - arch/x86/kernel/x8664_ksyms_64.c | 2 - - arch/x86/xen/Kconfig | 2 - - 24 files changed, 140 insertions(+), 37 deletions(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -367,7 +367,7 @@ endif - config X86_VSMP - bool "ScaleMP vSMP" - select PARAVIRT_GUEST -- select PARAVIRT -+ select PARAVIRT_ALL - depends on X86_64 && PCI - depends on X86_EXTENDED_PLATFORM - ---help--- -@@ -533,7 +533,6 @@ config VMI - - config KVM_CLOCK - bool "KVM paravirtualized clock" -- select PARAVIRT - select PARAVIRT_CLOCK - ---help--- - Turning on this option will allow you to run a paravirtualized clock -@@ -544,7 +543,7 @@ config KVM_CLOCK - - config KVM_GUEST - bool "KVM Guest support" -- select PARAVIRT -+ select PARAVIRT_ALL - ---help--- - This option enables various optimizations for running under the KVM - hypervisor. -@@ -572,8 +571,42 @@ config PARAVIRT_SPINLOCKS - - If you are unsure how to answer this question, answer N. - -+config PARAVIRT_CPU -+ bool -+ select PARAVIRT -+ -+config PARAVIRT_TIME -+ bool -+ select PARAVIRT -+ -+config PARAVIRT_IRQ -+ bool -+ select PARAVIRT -+ -+config PARAVIRT_APIC -+ bool -+ select PARAVIRT -+ -+config PARAVIRT_MMU -+ bool -+ select PARAVIRT -+ -+# -+# This is a placeholder to activate the old "include all pv-ops functionality" -+# behavior. If you're using this I'd recommend looking through your code to see -+# if you can be more specific. It probably saves you a few cycles! -+# -+config PARAVIRT_ALL -+ bool -+ select PARAVIRT_CPU -+ select PARAVIRT_TIME -+ select PARAVIRT_IRQ -+ select PARAVIRT_APIC -+ select PARAVIRT_MMU -+ - config PARAVIRT_CLOCK - bool -+ select PARAVIRT_TIME - - endif - ---- a/arch/x86/include/asm/apic.h -+++ b/arch/x86/include/asm/apic.h -@@ -81,7 +81,7 @@ static inline bool apic_from_smp_config( - /* - * Basic functions accessing APICs. - */ --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_APIC - #include - #endif - ---- a/arch/x86/include/asm/desc.h -+++ b/arch/x86/include/asm/desc.h -@@ -78,7 +78,7 @@ static inline int desc_empty(const void - return !(desc[0] | desc[1]); - } - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_CPU - #include - #else - #define load_TR_desc() native_load_tr_desc() -@@ -108,7 +108,7 @@ static inline void paravirt_alloc_ldt(st - static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) - { - } --#endif /* CONFIG_PARAVIRT */ -+#endif /* CONFIG_PARAVIRT_CPU */ - - #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) - ---- a/arch/x86/include/asm/fixmap.h -+++ b/arch/x86/include/asm/fixmap.h -@@ -162,7 +162,7 @@ void __native_set_fixmap(enum fixed_addr - void native_set_fixmap(enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags); - --#ifndef CONFIG_PARAVIRT -+#ifndef CONFIG_PARAVIRT_MMU - static inline void __set_fixmap(enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags) - { ---- a/arch/x86/include/asm/io.h -+++ b/arch/x86/include/asm/io.h -@@ -268,7 +268,7 @@ extern void native_io_delay(void); - extern int io_delay_type; - extern void io_delay_init(void); - --#if defined(CONFIG_PARAVIRT) -+#if defined(CONFIG_PARAVIRT_CPU) - #include - #else - ---- a/arch/x86/include/asm/irqflags.h -+++ b/arch/x86/include/asm/irqflags.h -@@ -58,9 +58,11 @@ static inline void native_halt(void) - - #ifdef CONFIG_PARAVIRT - #include --#else -+#endif -+ - #ifndef __ASSEMBLY__ - -+#ifndef CONFIG_PARAVIRT_IRQ - static inline unsigned long arch_local_save_flags(void) - { - return native_save_fl(); -@@ -110,12 +112,17 @@ static inline unsigned long __raw_local_ - arch_local_irq_disable(); - return flags; - } --#else -+#endif /* CONFIG_PARAVIRT_IRQ */ -+ -+#else /* __ASSEMBLY__ */ - -+#ifndef CONFIG_PARAVIRT_IRQ - #define ENABLE_INTERRUPTS(x) sti - #define DISABLE_INTERRUPTS(x) cli -+#endif /* !CONFIG_PARAVIRT_IRQ */ - - #ifdef CONFIG_X86_64 -+#ifndef CONFIG_PARAVIRT_CPU - #define SWAPGS swapgs - /* - * Currently paravirt can't handle swapgs nicely when we -@@ -128,8 +135,6 @@ static inline unsigned long __raw_local_ - */ - #define SWAPGS_UNSAFE_STACK swapgs - --#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ -- - #define INTERRUPT_RETURN iretq - #define USERGS_SYSRET64 \ - swapgs; \ -@@ -141,16 +146,22 @@ static inline unsigned long __raw_local_ - swapgs; \ - sti; \ - sysexit -+#endif /* !CONFIG_PARAVIRT_CPU */ -+ -+#ifndef CONFIG_PARAVIRT_IRQ -+#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ -+#endif /* !CONFIG_PARAVIRT_IRQ */ - - #else -+#ifndef CONFIG_PARAVIRT_CPU - #define INTERRUPT_RETURN iret - #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit - #define GET_CR0_INTO_EAX movl %cr0, %eax -+#endif /* !CONFIG_PARAVIRT_CPU */ - #endif - - - #endif /* __ASSEMBLY__ */ --#endif /* CONFIG_PARAVIRT */ - - #ifndef __ASSEMBLY__ - static inline int arch_irqs_disabled_flags(unsigned long flags) ---- a/arch/x86/include/asm/mmu_context.h -+++ b/arch/x86/include/asm/mmu_context.h -@@ -6,14 +6,14 @@ - #include - #include - #include --#ifndef CONFIG_PARAVIRT -+#ifndef CONFIG_PARAVIRT_MMU - #include - - static inline void paravirt_activate_mm(struct mm_struct *prev, - struct mm_struct *next) - { - } --#endif /* !CONFIG_PARAVIRT */ -+#endif /* !CONFIG_PARAVIRT_MMU */ - - /* - * Used for LDT copy/destruction. ---- a/arch/x86/include/asm/msr.h -+++ b/arch/x86/include/asm/msr.h -@@ -135,7 +135,7 @@ static inline unsigned long long native_ - return EAX_EDX_VAL(val, low, high); - } - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_CPU - #include - #else - #include -@@ -246,7 +246,7 @@ do { - - #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) - --#endif /* !CONFIG_PARAVIRT */ -+#endif /* !CONFIG_PARAVIRT_CPU */ - - - #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ ---- a/arch/x86/include/asm/paravirt.h -+++ b/arch/x86/include/asm/paravirt.h -@@ -18,6 +18,7 @@ static inline int paravirt_enabled(void) - return pv_info.paravirt_enabled; - } - -+#ifdef CONFIG_PARAVIRT_CPU - static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) - { -@@ -58,7 +59,9 @@ static inline void write_cr0(unsigned lo - { - PVOP_VCALL1(pv_cpu_ops.write_cr0, x); - } -+#endif /* CONFIG_PARAVIRT_CPU */ - -+#ifdef CONFIG_PARAVIRT_MMU - static inline unsigned long read_cr2(void) - { - return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2); -@@ -78,7 +81,9 @@ static inline void write_cr3(unsigned lo - { - PVOP_VCALL1(pv_mmu_ops.write_cr3, x); - } -+#endif /* CONFIG_PARAVIRT_MMU */ - -+#ifdef CONFIG_PARAVIRT_CPU - static inline unsigned long read_cr4(void) - { - return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); -@@ -92,8 +97,9 @@ static inline void write_cr4(unsigned lo - { - PVOP_VCALL1(pv_cpu_ops.write_cr4, x); - } -+#endif /* CONFIG_PARAVIRT_CPU */ - --#ifdef CONFIG_X86_64 -+#if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_CPU) - static inline unsigned long read_cr8(void) - { - return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); -@@ -105,6 +111,7 @@ static inline void write_cr8(unsigned lo - } - #endif - -+#ifdef CONFIG_PARAVIRT_IRQ - static inline void arch_safe_halt(void) - { - PVOP_VCALL0(pv_irq_ops.safe_halt); -@@ -114,14 +121,18 @@ static inline void halt(void) - { - PVOP_VCALL0(pv_irq_ops.halt); - } -+#endif /* CONFIG_PARAVIRT_IRQ */ - -+#ifdef CONFIG_PARAVIRT_CPU - static inline void wbinvd(void) - { - PVOP_VCALL0(pv_cpu_ops.wbinvd); - } -+#endif - - #define get_kernel_rpl() (pv_info.kernel_rpl) - -+#ifdef CONFIG_PARAVIRT_CPU - static inline u64 paravirt_read_msr(unsigned msr, int *err) - { - return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); -@@ -224,12 +235,16 @@ do { \ - } while (0) - - #define rdtscll(val) (val = paravirt_read_tsc()) -+#endif /* CONFIG_PARAVIRT_CPU */ - -+#ifdef CONFIG_PARAVIRT_TIME - static inline unsigned long long paravirt_sched_clock(void) - { - return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); - } -+#endif /* CONFIG_PARAVIRT_TIME */ - -+#ifdef CONFIG_PARAVIRT_CPU - static inline unsigned long long paravirt_read_pmc(int counter) - { - return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); -@@ -345,8 +360,9 @@ static inline void slow_down_io(void) - pv_cpu_ops.io_delay(); - #endif - } -+#endif /* CONFIG_PARAVIRT_CPU */ - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_APIC) - static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, - unsigned long start_esp) - { -@@ -355,6 +371,7 @@ static inline void startup_ipi_hook(int - } - #endif - -+#ifdef CONFIG_PARAVIRT_MMU - static inline void paravirt_activate_mm(struct mm_struct *prev, - struct mm_struct *next) - { -@@ -689,7 +706,9 @@ static inline void pmd_clear(pmd_t *pmdp - set_pmd(pmdp, __pmd(0)); - } - #endif /* CONFIG_X86_PAE */ -+#endif /* CONFIG_PARAVIRT_MMU */ - -+#ifdef CONFIG_PARAVIRT_CPU - #define __HAVE_ARCH_START_CONTEXT_SWITCH - static inline void arch_start_context_switch(struct task_struct *prev) - { -@@ -700,7 +719,9 @@ static inline void arch_end_context_swit - { - PVOP_VCALL1(pv_cpu_ops.end_context_switch, next); - } -+#endif /* CONFIG_PARAVIRT_CPU */ - -+#ifdef CONFIG_PARAVIRT_MMU - #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE - static inline void arch_enter_lazy_mmu_mode(void) - { -@@ -719,6 +740,7 @@ static inline void __set_fixmap(unsigned - { - pv_mmu_ops.set_fixmap(idx, phys, flags); - } -+#endif /* CONFIG_PARAVIRT_MMU */ - - #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) - -@@ -829,6 +851,7 @@ static __always_inline void arch_spin_un - #define __PV_IS_CALLEE_SAVE(func) \ - ((struct paravirt_callee_save) { func }) - -+#ifdef CONFIG_PARAVIRT_IRQ - static inline notrace unsigned long arch_local_save_flags(void) - { - return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); -@@ -857,6 +880,7 @@ static inline unsigned long __raw_local_ - arch_local_irq_disable(); - return f; - } -+#endif /* CONFIG_PARAVIRT_IRQ */ - - - /* Make sure as little as possible of this mess escapes. */ -@@ -939,10 +963,13 @@ extern void default_banner(void); - #define PARA_INDIRECT(addr) *%cs:addr - #endif - -+#ifdef CONFIG_PARAVIRT_CPU - #define INTERRUPT_RETURN \ - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) -+#endif /* CONFIG_PARAVIRT_CPU */ - -+#ifdef CONFIG_PARAVIRT_IRQ - #define DISABLE_INTERRUPTS(clobbers) \ - PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ - PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ -@@ -954,13 +981,17 @@ extern void default_banner(void); - PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ - PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) -+#endif /* CONFIG_PARAVIRT_IRQ */ - -+#ifdef CONFIG_PARAVIRT_CPU - #define USERGS_SYSRET32 \ - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ - CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32)) -+#endif /* CONFIG_PARAVIRT_CPU */ - - #ifdef CONFIG_X86_32 -+#ifdef CONFIG_PARAVIRT_CPU - #define GET_CR0_INTO_EAX \ - push %ecx; push %edx; \ - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ -@@ -970,10 +1001,12 @@ extern void default_banner(void); - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ - CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) -+#endif /* CONFIG_PARAVIRT_CPU */ - - - #else /* !CONFIG_X86_32 */ - -+#ifdef CONFIG_PARAVIRT_CPU - /* - * If swapgs is used while the userspace stack is still current, - * there's no way to call a pvop. The PV replacement *must* be -@@ -993,17 +1026,23 @@ extern void default_banner(void); - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ - ) -+#endif /* CONFIG_PARAVIRT_CPU */ - -+#ifdef CONFIG_PARAVIRT_MMU - #define GET_CR2_INTO_RCX \ - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \ - movq %rax, %rcx; \ - xorq %rax, %rax; -+#endif /* CONFIG_PARAVIRT_MMU */ - -+#ifdef CONFIG_PARAVIRT_IRQ - #define PARAVIRT_ADJUST_EXCEPTION_FRAME \ - PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ - CLBR_NONE, \ - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame)) -+#endif /* CONFIG_PARAVIRT_IRQ */ - -+#ifdef CONFIG_PARAVIRT_CPU - #define USERGS_SYSRET64 \ - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ - CLBR_NONE, \ -@@ -1013,6 +1052,7 @@ extern void default_banner(void); - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ - CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) -+#endif /* CONFIG_PARAVIRT_CPU */ - #endif /* CONFIG_X86_32 */ - - #endif /* __ASSEMBLY__ */ ---- a/arch/x86/include/asm/paravirt_types.h -+++ b/arch/x86/include/asm/paravirt_types.h -@@ -339,12 +339,24 @@ struct paravirt_patch_template { - - extern struct pv_info pv_info; - extern struct pv_init_ops pv_init_ops; -+#ifdef CONFIG_PARAVIRT_TIME - extern struct pv_time_ops pv_time_ops; -+#endif -+#ifdef CONFIG_PARAVIRT_CPU - extern struct pv_cpu_ops pv_cpu_ops; -+#endif -+#ifdef CONFIG_PARAVIRT_IRQ - extern struct pv_irq_ops pv_irq_ops; -+#endif -+#ifdef CONFIG_PARAVIRT_APIC - extern struct pv_apic_ops pv_apic_ops; -+#endif -+#ifdef CONFIG_PARAVIRT_MMU - extern struct pv_mmu_ops pv_mmu_ops; -+#endif -+#ifdef CONFIG_PARAVIRT_SPINLOCKS - extern struct pv_lock_ops pv_lock_ops; -+#endif - - #define PARAVIRT_PATCH(x) \ - (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) ---- a/arch/x86/include/asm/pgalloc.h -+++ b/arch/x86/include/asm/pgalloc.h -@@ -7,7 +7,7 @@ - - static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_MMU - #include - #else - #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) ---- a/arch/x86/include/asm/pgtable-3level_types.h -+++ b/arch/x86/include/asm/pgtable-3level_types.h -@@ -18,7 +18,7 @@ typedef union { - } pte_t; - #endif /* !__ASSEMBLY__ */ - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_MMU - #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) - #else - #define SHARED_KERNEL_PMD 1 ---- a/arch/x86/include/asm/pgtable.h -+++ b/arch/x86/include/asm/pgtable.h -@@ -28,7 +28,7 @@ extern unsigned long empty_zero_page[PAG - - extern struct mm_struct *pgd_page_get_mm(struct page *page); - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_MMU - #include - #else /* !CONFIG_PARAVIRT */ - #define set_pte(ptep, pte) native_set_pte(ptep, pte) ---- a/arch/x86/include/asm/processor.h -+++ b/arch/x86/include/asm/processor.h -@@ -569,7 +569,7 @@ static inline void native_swapgs(void) - #endif - } - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_CPU - #include - #else - #define __cpuid native_cpuid ---- a/arch/x86/include/asm/required-features.h -+++ b/arch/x86/include/asm/required-features.h -@@ -48,7 +48,7 @@ - #endif - - #ifdef CONFIG_X86_64 --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_MMU - /* Paravirtualized systems may not have PSE or PGE available */ - #define NEED_PSE 0 - #define NEED_PGE 0 ---- a/arch/x86/include/asm/smp.h -+++ b/arch/x86/include/asm/smp.h -@@ -66,7 +66,7 @@ struct smp_ops { - extern void set_cpu_sibling_map(int cpu); - - #ifdef CONFIG_SMP --#ifndef CONFIG_PARAVIRT -+#ifndef CONFIG_PARAVIRT_APIC - #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) - #endif - extern struct smp_ops smp_ops; ---- a/arch/x86/include/asm/system.h -+++ b/arch/x86/include/asm/system.h -@@ -304,13 +304,18 @@ static inline void native_wbinvd(void) - - #ifdef CONFIG_PARAVIRT - #include --#else --#define read_cr0() (native_read_cr0()) --#define write_cr0(x) (native_write_cr0(x)) -+#endif/* CONFIG_PARAVIRT */ -+ -+#ifndef CONFIG_PARAVIRT_MMU - #define read_cr2() (native_read_cr2()) - #define write_cr2(x) (native_write_cr2(x)) - #define read_cr3() (native_read_cr3()) - #define write_cr3(x) (native_write_cr3(x)) -+#endif /* CONFIG_PARAVIRT_MMU */ -+ -+#ifndef CONFIG_PARAVIRT_CPU -+#define read_cr0() (native_read_cr0()) -+#define write_cr0(x) (native_write_cr0(x)) - #define read_cr4() (native_read_cr4()) - #define read_cr4_safe() (native_read_cr4_safe()) - #define write_cr4(x) (native_write_cr4(x)) -@@ -324,7 +329,7 @@ static inline void native_wbinvd(void) - /* Clear the 'TS' bit */ - #define clts() (native_clts()) - --#endif/* CONFIG_PARAVIRT */ -+#endif /* CONFIG_PARAVIRT_CPU */ - - #define stts() write_cr0(read_cr0() | X86_CR0_TS) - ---- a/arch/x86/include/asm/tlbflush.h -+++ b/arch/x86/include/asm/tlbflush.h -@@ -7,7 +7,7 @@ - #include - #include - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_MMU - #include - #else - #define __flush_tlb() __native_flush_tlb() -@@ -162,7 +162,7 @@ static inline void reset_lazy_tlbstate(v - - #endif /* SMP */ - --#ifndef CONFIG_PARAVIRT -+#ifndef CONFIG_PARAVIRT_MMU - #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va) - #endif - ---- a/arch/x86/kernel/head_64.S -+++ b/arch/x86/kernel/head_64.S -@@ -20,7 +20,7 @@ - #include - #include - --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_MMU - #include - #include - #else ---- a/arch/x86/kernel/paravirt.c -+++ b/arch/x86/kernel/paravirt.c -@@ -155,12 +155,14 @@ unsigned paravirt_patch_default(u8 type, - else if (opfunc == _paravirt_ident_64) - ret = paravirt_patch_ident_64(insnbuf, len); - -+#ifdef CONFIG_PARAVIRT_CPU - else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || - type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || - type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || - type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) - /* If operation requires a jmp, then jmp */ - ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); -+#endif - else - /* Otherwise call the function; assume target could - clobber any caller-save reg */ ---- a/arch/x86/kernel/tsc.c -+++ b/arch/x86/kernel/tsc.c -@@ -66,7 +66,7 @@ u64 native_sched_clock(void) - - /* We need to define a real function for sched_clock, to override the - weak default version */ --#ifdef CONFIG_PARAVIRT -+#ifdef CONFIG_PARAVIRT_TIME - unsigned long long sched_clock(void) - { - return paravirt_sched_clock(); ---- a/arch/x86/kernel/vsmp_64.c -+++ b/arch/x86/kernel/vsmp_64.c -@@ -22,7 +22,7 @@ - #include - #include - --#if defined CONFIG_PCI && defined CONFIG_PARAVIRT -+#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_IRQ - /* - * Interrupt control on vSMPowered systems: - * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' ---- a/arch/x86/kernel/x8664_ksyms_64.c -+++ b/arch/x86/kernel/x8664_ksyms_64.c -@@ -54,6 +54,6 @@ EXPORT_SYMBOL(memcpy); - EXPORT_SYMBOL(__memcpy); - - EXPORT_SYMBOL(empty_zero_page); --#ifndef CONFIG_PARAVIRT -+#ifndef CONFIG_PARAVIRT_CPU - EXPORT_SYMBOL(native_load_gs_index); - #endif ---- a/arch/x86/xen/Kconfig -+++ b/arch/x86/xen/Kconfig -@@ -4,7 +4,7 @@ - - config XEN - bool "Xen guest support" -- select PARAVIRT -+ select PARAVIRT_ALL - select PARAVIRT_CLOCK - depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) - depends on X86_CMPXCHG && X86_TSC diff --git a/patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature b/patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature deleted file mode 100644 index 27ce090..0000000 --- a/patches.arch/kvm-split-the-KVM-pv-ops-support-by-feature +++ /dev/null @@ -1,125 +0,0 @@ -From: Alexander Graf -Date: Wed, 18 Nov 2009 00:45:10 +0100 -Subject: Split the KVM pv-ops support by feature -References: bnc#556135, FATE#306453 -Patch-Mainline: Submitted to virtualization list - -Currently selecting KVM guest support enabled multiple features at once that -not everyone necessarily wants to have, namely: - - - PV MMU - - zero io delay - - apic detection workaround - -Let's split them off so we don't drag in the full pv-ops framework just to -detect we're running on KVM. That gives us more chances to tweak performance! - -Signed-off-by: Alexander Graf ---- - arch/x86/Kconfig | 29 ++++++++++++++++++++++++++++- - arch/x86/kernel/kvm.c | 22 +++++++++++++++------- - 2 files changed, 43 insertions(+), 8 deletions(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -543,11 +543,38 @@ config KVM_CLOCK - - config KVM_GUEST - bool "KVM Guest support" -- select PARAVIRT_ALL -+ select PARAVIRT - ---help--- - This option enables various optimizations for running under the KVM - hypervisor. - -+config KVM_IODELAY -+ bool "KVM IO-delay support" -+ depends on KVM_GUEST -+ select PARAVIRT_CPU -+ ---help--- -+ Usually we wait for PIO access to complete. When inside KVM there's -+ no need to do that, as we know that we're not going through a bus, -+ but process PIO requests instantly. -+ -+ This option disables PIO waits, but drags in CPU-bound pv-ops. Thus -+ you will probably get more speed loss than speedup using this option. -+ -+ If in doubt, say N. -+ -+config KVM_MMU -+ bool "KVM PV MMU support" -+ depends on KVM_GUEST -+ select PARAVIRT_MMU -+ ---help--- -+ This option enables the paravirtualized MMU for KVM. In most cases -+ it's pretty useless and shouldn't be used. -+ -+ It will only cost you performance, because it drags in pv-ops for -+ memory management. -+ -+ If in doubt, say N. -+ - source "arch/x86/lguest/Kconfig" - - config PARAVIRT ---- a/arch/x86/kernel/kvm.c -+++ b/arch/x86/kernel/kvm.c -@@ -29,6 +29,16 @@ - #include - #include - -+#ifdef CONFIG_KVM_IODELAY -+/* -+ * No need for any "IO delay" on KVM -+ */ -+static void kvm_io_delay(void) -+{ -+} -+#endif /* CONFIG_KVM_IODELAY */ -+ -+#ifdef CONFIG_KVM_MMU - #define MMU_QUEUE_SIZE 1024 - - static int kvmapf = 1; -@@ -43,13 +53,6 @@ static struct kvm_para_state *kvm_para_s - return &per_cpu(para_state, raw_smp_processor_id()); - } - --/* -- * No need for any "IO delay" on KVM -- */ --static void kvm_io_delay(void) --{ --} -- - #define KVM_TASK_SLEEP_HASHBITS 8 - #define KVM_TASK_SLEEP_HASHSIZE (1< -Subject: mm: Avoid putting a bad page back on the LRU v8 -References: 415829 -Acked-by: schwab@suse.de -Patch-mainline: not yet - -Prevent a page with a physical memory error from being placed back -on the LRU. A new page flag (PG_memerror) is added if -CONFIG_PAGEFLAGS_EXTENDED is defined. - -Version 8 change: Removed hot path check for pages with memory -errors on the free list. - -Signed-off-by: Russ Anderson -Reviewed-by: Christoph Lameter - ---- - include/linux/page-flags.h | 16 +++++++++++++++- - mm/migrate.c | 33 +++++++++++++++++++++++++++++++++ - mm/vmscan.c | 1 + - 3 files changed, 49 insertions(+), 1 deletion(-) - ---- a/include/linux/page-flags.h -+++ b/include/linux/page-flags.h -@@ -88,6 +88,7 @@ enum pageflags { - PG_private_2, /* If pagecache, has fs aux data */ - PG_writeback, /* Page is under writeback */ - #ifdef CONFIG_PAGEFLAGS_EXTENDED -+ PG_memerror, /* Page has a physical memory error */ - PG_head, /* A head page */ - PG_tail, /* A tail page */ - #else -@@ -167,14 +168,21 @@ static inline int TestClearPage##uname(s - static inline int __TestClearPage##uname(struct page *page) \ - { return __test_and_clear_bit(PG_##lname, &page->flags); } - -+#define PAGEFLAGMASK(uname, lname) \ -+static inline int PAGEMASK_##uname(void) \ -+ { return (1 << PG_##lname); } -+ - #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ -- SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) -+ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) \ -+ PAGEFLAGMASK(uname, lname) - - #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ - __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) - - #define PAGEFLAG_FALSE(uname) \ - static inline int Page##uname(struct page *page) \ -+ { return 0; } \ -+static inline int PAGEMASK_##uname(void) \ - { return 0; } - - #define TESTSCFLAG(uname, lname) \ -@@ -391,6 +399,12 @@ static inline void __ClearPageTail(struc - - #endif /* !PAGEFLAGS_EXTENDED */ - -+#ifdef CONFIG_PAGEFLAGS_EXTENDED -+PAGEFLAG(MemError, memerror) -+#else -+PAGEFLAG_FALSE(MemError) -+#endif -+ - #ifdef CONFIG_TRANSPARENT_HUGEPAGE - /* - * PageHuge() only returns true for hugetlbfs pages, but not for ---- a/mm/migrate.c -+++ b/mm/migrate.c -@@ -63,6 +63,7 @@ int migrate_prep_local(void) - - return 0; - } -+EXPORT_SYMBOL(migrate_prep); - - /* - * Add isolated pages on the list back to the LRU under page lock -@@ -80,6 +81,7 @@ void putback_lru_pages(struct list_head - putback_lru_page(page); - } - } -+EXPORT_SYMBOL(putback_lru_pages); - - /* - * Restore a potential migration pte to a working pte entry -@@ -701,6 +703,25 @@ unlock: - * restored. - */ - list_del(&page->lru); -+ if (PageMemError(page)) { -+ if (rc == 0) -+ /* -+ * A page with a memory error that has -+ * been migrated will not be moved to -+ * the LRU. -+ */ -+ goto move_newpage; -+ else -+ /* -+ * The page failed to migrate and will not -+ * be added to the bad page list. Clearing -+ * the error bit will allow another attempt -+ * to migrate if it gets another correctable -+ * error. -+ */ -+ ClearPageMemError(page); -+ } -+ - dec_zone_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); - putback_lru_page(page); -@@ -775,6 +796,17 @@ int migrate_pages(struct list_head *from - } - } - } -+ -+ if (rc != 0) -+ list_for_each_entry_safe(page, page2, from, lru) -+ if (PageMemError(page)) -+ /* -+ * The page failed to migrate. Clearing -+ * the error bit will allow another attempt -+ * to migrate if it gets another correctable -+ * error. -+ */ -+ ClearPageMemError(page); - rc = 0; - out: - if (!swapwrite) -@@ -787,6 +819,7 @@ out: - - return nr_failed + retry; - } -+EXPORT_SYMBOL(migrate_pages); - - #ifdef CONFIG_NUMA - /* ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -1127,6 +1127,7 @@ int isolate_lru_page(struct page *page) - } - return ret; - } -+EXPORT_SYMBOL(isolate_lru_page); - - /* - * Are there way too many processes in the direct reclaim path already? diff --git a/patches.arch/perf_timechart_fix_zero_timestamps.patch b/patches.arch/perf_timechart_fix_zero_timestamps.patch deleted file mode 100644 index 75bb39c..0000000 --- a/patches.arch/perf_timechart_fix_zero_timestamps.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Thomas Renninger -Subject: Fix huge and wronge C-state drawings due to uninitialized start/end timestamps -Patch-Mainline: not yet -References: none - -Signed-off-by: Thomas Renninger - ---- - tools/perf/builtin-timechart.c | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -Index: linux-2.6.37-master/tools/perf/builtin-timechart.c -=================================================================== ---- linux-2.6.37-master.orig/tools/perf/builtin-timechart.c -+++ linux-2.6.37-master/tools/perf/builtin-timechart.c -@@ -654,8 +654,15 @@ static void draw_c_p_states(void) - * two pass drawing so that the P state bars are on top of the C state blocks - */ - while (pwr) { -- if (pwr->type == CSTATE) -+ if (pwr->type == CSTATE) { -+ /* If the first event is an _end event, start timestamp is zero -+ -> ignore these */ -+ if (pwr->start_time == 0 || pwr->end_time == 0) { -+ pwr = pwr->next; -+ continue; -+ } - svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); -+ } - pwr = pwr->next; - } - diff --git a/patches.arch/ppc-ipic-suspend-without-83xx-fix b/patches.arch/ppc-ipic-suspend-without-83xx-fix deleted file mode 100644 index 611e6ee..0000000 --- a/patches.arch/ppc-ipic-suspend-without-83xx-fix +++ /dev/null @@ -1,33 +0,0 @@ -From: Takashi Iwai -Subject: [PATCH] Fix build_error without CONFIG_PPC_83xx -Patch-mainline: -References: - -fsl_deep_sleep() is defined only with CONFIG_PPC_83xx although -CONFIG_IPIC is set for CONFIG_PPC_MPC512x, too. - -Signed-off-by: Takashi Iwai - ---- ---- - arch/powerpc/sysdev/ipic.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/arch/powerpc/sysdev/ipic.c -+++ b/arch/powerpc/sysdev/ipic.c -@@ -921,6 +921,7 @@ static int ipic_suspend(struct sys_devic - ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR); - ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR); - -+#ifdef CONFIG_PPC_83xx - if (fsl_deep_sleep()) { - /* In deep sleep, make sure there can be no - * pending interrupts, as this can cause -@@ -931,6 +932,7 @@ static int ipic_suspend(struct sys_devic - ipic_write(ipic->regs, IPIC_SEMSR, 0); - ipic_write(ipic->regs, IPIC_SERMR, 0); - } -+#endif - - return 0; - } diff --git a/patches.arch/ppc-pegasos-console-autodetection.patch b/patches.arch/ppc-pegasos-console-autodetection.patch deleted file mode 100644 index 82da310..0000000 --- a/patches.arch/ppc-pegasos-console-autodetection.patch +++ /dev/null @@ -1,19 +0,0 @@ -From: olh@suse.de -Subject: force speed to fix autodetection on pegasos2 -Patch-mainline: never - ---- - arch/powerpc/platforms/chrp/setup.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/powerpc/platforms/chrp/setup.c -+++ b/arch/powerpc/platforms/chrp/setup.c -@@ -293,7 +293,7 @@ static void chrp_init_early(void) - if (!property) - goto out_put; - if (!strcmp(property, "failsafe") || !strcmp(property, "serial")) -- add_preferred_console("ttyS", 0, NULL); -+ add_preferred_console("ttyS", 0, "115200"); - out_put: - of_node_put(node); - } diff --git a/patches.arch/ppc-prom-nodisplay.patch b/patches.arch/ppc-prom-nodisplay.patch deleted file mode 100644 index 18916f6..0000000 --- a/patches.arch/ppc-prom-nodisplay.patch +++ /dev/null @@ -1,77 +0,0 @@ -From: Olaf Hering -Subject: new prom=nodisplay option to avoid crash in firmware on B50 -Patch-mainline: not yet - -add prom=nodisplay -avoid crash in firmware on IBM B50 when OF stdout is on serial. - - 0 > boot scsi/sd@4:1,yaboot | -yaboot starting: loaded at 00200000 00222530 (0/0/00c1a078; sp: 00efffd0) -brokenfirmware did not claim executable memory, fixed it myself -Config file 'yaboot.cnf' read, 213 bytes - -Welcome to yaboot version 10.1.22-r945.SuSE -booted from '/pci@80000000/scsi@10/sd@4:1,yaboot' -Enter "help" to get some basic usage information -boot: -* linux -boot: linux 3 -Please wait, loading kernel... -Allocated 00600000 bytes for executable @ 02000000 - Elf32 kernel loaded... -Loading ramdisk... -ramdisk loaded 0030e057 @ 04100000 -OF stdout device is: /pci@80000000/isa@b/serial@i3f8 -command line: root=/dev/system/root xmon=on sysrq=1 quiet panic=12 3 -memory layout at init: - memory_limit : 00000000 (16 MB aligned) - alloc_bottom : 0440f000 - alloc_top : 30000000 - alloc_top_hi : 40000000 - rmo_top : 30000000 - ram_top : 40000000 -Looking for displays -found display : /pci@80000000/display@16, opening ... -Unexpected Firmware Error: -DEFAULT CATCH!, code=fff00300 at %SRR0: 00c18ccc %SRR1: 00003030 - ok - 0 > reset-all - - ---- - arch/powerpc/kernel/prom_init.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/arch/powerpc/kernel/prom_init.c -+++ b/arch/powerpc/kernel/prom_init.c -@@ -169,6 +169,7 @@ static unsigned long __initdata dt_strin - - static unsigned long __initdata prom_initrd_start, prom_initrd_end; - -+static int __initdata prom_no_display; - #ifdef CONFIG_PPC64 - static int __initdata prom_iommu_force_on; - static int __initdata prom_iommu_off; -@@ -596,6 +597,14 @@ static void __init early_cmdline_parse(v - #endif /* CONFIG_CMDLINE */ - prom_printf("command line: %s\n", RELOC(prom_cmd_line)); - -+ opt = strstr(RELOC(prom_cmd_line), RELOC("prom=")); -+ if (opt) { -+ opt += 5; -+ while (*opt && *opt == ' ') -+ opt++; -+ if (!strncmp(opt, RELOC("nodisplay"), 9)) -+ RELOC(prom_no_display) = 1; -+ } - #ifdef CONFIG_PPC64 - opt = strstr(RELOC(prom_cmd_line), RELOC("iommu=")); - if (opt) { -@@ -2570,6 +2579,7 @@ unsigned long __init prom_init(unsigned - /* - * Initialize display devices - */ -+ if (RELOC(prom_no_display) == 0) - prom_check_displays(); - - #ifdef CONFIG_PPC64 diff --git a/patches.arch/ppc64-xmon-dmesg-printing.patch b/patches.arch/ppc64-xmon-dmesg-printing.patch deleted file mode 100644 index 7cb2e6d..0000000 --- a/patches.arch/ppc64-xmon-dmesg-printing.patch +++ /dev/null @@ -1,119 +0,0 @@ -Subject: [PATCH] add syslog printing to xmon debugger. -From: Linas Vepstas -Patch-mainline: Not yet - - -This patch 'dmesg'/printk log buffer printing to xmon. I find this -useful because crashes are almost always preceeded by interesting -printk's. This patch is simple & straightforward, except for one -possibly controversial aspect: it embeds a small snippet in -kernel/printk.c to return the location of the syslog. This is -needed because kallsyms and even CONFIG_KALLSYMS_ALL is not enough -to reveal the location of log_buf. This code is about 90% -cut-n-paste of earlier code from Keith Owens. - -Signed-off-by: Olaf Hering ---- - - arch/powerpc/xmon/xmon.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++ - kernel/printk.c | 2 - - 2 files changed, 58 insertions(+), 1 deletion(-) - ---- a/arch/powerpc/xmon/xmon.c -+++ b/arch/powerpc/xmon/xmon.c -@@ -138,6 +138,7 @@ static struct bpt *in_breakpoint_table(u - static int do_step(struct pt_regs *); - static void bpt_cmds(void); - static void cacheflush(void); -+static void xmon_show_dmesg(void); - static int cpu_cmd(void); - static void csum(void); - static void bootcmds(void); -@@ -197,6 +198,7 @@ Commands:\n\ - #endif - "\ - C checksum\n\ -+ D show dmesg (printk) buffer\n\ - d dump bytes\n\ - di dump instructions\n\ - df dump float values\n\ -@@ -831,6 +833,9 @@ cmds(struct pt_regs *excp) - case 'd': - dump(); - break; -+ case 'D': -+ xmon_show_dmesg(); -+ break; - case 'l': - symbol_lookup(); - break; -@@ -2607,6 +2612,58 @@ static void xmon_print_symbol(unsigned l - printf("%s", after); - } - -+extern void kdb_syslog_data(char *syslog_data[]); -+#define SYSLOG_WRAP(p) if (p < syslog_data[0]) p = syslog_data[1]-1; \ -+ else if (p >= syslog_data[1]) p = syslog_data[0]; -+ -+static void xmon_show_dmesg(void) -+{ -+ char *syslog_data[4], *start, *end, c; -+ int logsize; -+ -+ /* syslog_data[0,1] physical start, end+1. -+ * syslog_data[2,3] logical start, end+1. -+ */ -+ kdb_syslog_data(syslog_data); -+ if (syslog_data[2] == syslog_data[3]) -+ return; -+ logsize = syslog_data[1] - syslog_data[0]; -+ start = syslog_data[0] + (syslog_data[2] - syslog_data[0]) % logsize; -+ end = syslog_data[0] + (syslog_data[3] - syslog_data[0]) % logsize; -+ -+ /* Do a line at a time (max 200 chars) to reduce overhead */ -+ c = '\0'; -+ while(1) { -+ char *p; -+ int chars = 0; -+ if (!*start) { -+ while (!*start) { -+ ++start; -+ SYSLOG_WRAP(start); -+ if (start == end) -+ break; -+ } -+ if (start == end) -+ break; -+ } -+ p = start; -+ while (*start && chars < 200) { -+ c = *start; -+ ++chars; -+ ++start; -+ SYSLOG_WRAP(start); -+ if (start == end || c == '\n') -+ break; -+ } -+ if (chars) -+ printf("%.*s", chars, p); -+ if (start == end) -+ break; -+ } -+ if (c != '\n') -+ printf("\n"); -+} -+ - #ifdef CONFIG_PPC_BOOK3S_64 - static void dump_slb(void) - { ---- a/kernel/printk.c -+++ b/kernel/printk.c -@@ -416,7 +416,7 @@ SYSCALL_DEFINE3(syslog, int, type, char - return do_syslog(type, buf, len, SYSLOG_FROM_CALL); - } - --#ifdef CONFIG_KGDB_KDB -+#if defined(CONFIG_KGDB_KDB) || defined(CONFIG_DEBUG_KERNEL) - /* kdb dmesg command needs access to the syslog buffer. do_syslog() - * uses locks so it cannot be used during debugging. Just tell kdb - * where the start and end of the physical and logical logs are. This diff --git a/patches.arch/s390-add-FREE_PTE_NR b/patches.arch/s390-add-FREE_PTE_NR deleted file mode 100644 index e9f66e2..0000000 --- a/patches.arch/s390-add-FREE_PTE_NR +++ /dev/null @@ -1,43 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] s390: Define FREE_PTE_NR -Patch-mainline: Never, unless FREE_PTE_NR is used in generic code - - Commit ba8a9229ab9e80278c28ad68b15053f65b2b0a7c from - Martin Schwidefsky removed the - #include from asm-s390/tlb.h when he defined the - s390-specific TLB operations. - - FREE_PTR_NR is generally an internal-only value, but our unmap_vmas-lat - patch uses it to make smarter decisions about dumping PTEs in chunks. - - This patch restores the generic value in asm-s390/tlb.h. Since it's only - used for an optimization, this should be safe. - -Signed-off-by: Jeff Mahoney - ---- - arch/s390/include/asm/tlb.h | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - ---- a/arch/s390/include/asm/tlb.h -+++ b/arch/s390/include/asm/tlb.h -@@ -34,6 +34,19 @@ - #define TLB_NR_PTRS 508 - #endif - -+/* Lifted from asm-generic/tlb.h; Is used by patches.suse/unmap_vmas-lat */ -+/* -+ * For UP we don't need to worry about TLB flush -+ * and page free order so much.. -+ */ -+#ifdef CONFIG_SMP -+ #define FREE_PTE_NR 506 -+ #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) -+#else -+ #define FREE_PTE_NR 1 -+ #define tlb_fast_mode(tlb) 1 -+#endif -+ - struct mmu_gather { - struct mm_struct *mm; - unsigned int fullmm; diff --git a/patches.arch/s390-message-catalog-fix.diff b/patches.arch/s390-message-catalog-fix.diff deleted file mode 100644 index 2a62770..0000000 --- a/patches.arch/s390-message-catalog-fix.diff +++ /dev/null @@ -1,23 +0,0 @@ -From: Jiri Slaby -Subject: fix build on s390 as of 2.6.36-rc4 -Patch-mainline: never - -This fixes patches.arch/s390-message-catalog.diff build. - -Signed-off-by: Jiri Slaby - ---- - include/linux/device.h | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/include/linux/device.h -+++ b/include/linux/device.h -@@ -660,6 +660,8 @@ int printk_dev_hash(const char *, const - - #endif - -+#define dev_printk(level, dev, format, arg...) \ -+ dev_printk_hash(level , dev, format, ## arg) - #define dev_emerg(dev, format, arg...) \ - dev_printk_hash(KERN_EMERG , dev , format , ## arg) - #define dev_alert(dev, format, arg...) \ diff --git a/patches.arch/s390-message-catalog.diff b/patches.arch/s390-message-catalog.diff deleted file mode 100644 index fcfcbd6..0000000 --- a/patches.arch/s390-message-catalog.diff +++ /dev/null @@ -1,8630 +0,0 @@ -From: Gerald Schaefer -Subject: Kernel message catalog. -Patch-mainline: Probably never -References: bnc#549193,FATE#306999,LTC#57210 -Patch-mainline: not yet - -Description: Add support for automatic message tags to the printk macro - families dev_xyz and pr_xyz. The message tag consists of a - component name and a 24 bit hash of the message text. For - each message that is documented in the included kernel message - catalog a man page can be created with a script (which is - included in the patch). The generated man pages contain - explanatory text that is intended to help understand the - messages. - - Note that only s390 specific messages are prepared - appropriately and included in the generated message catalog. - - This patch is optional as it is very unlikely to be accepted - in upstream kernel, but is recommended for all distributions - which are built based on the 'Development stream' - -Acked-by: John Jolly ---- - - Documentation/kmsg/s390/aes_s390 | 30 - Documentation/kmsg/s390/af_iucv | 33 - Documentation/kmsg/s390/ap | 47 - Documentation/kmsg/s390/appldata | 88 + - Documentation/kmsg/s390/cio | 145 ++ - Documentation/kmsg/s390/claw | 731 +++++++++++++ - Documentation/kmsg/s390/cpcmd | 17 - Documentation/kmsg/s390/cpu | 69 + - Documentation/kmsg/s390/ctcm | 199 +++ - Documentation/kmsg/s390/dasd | 466 ++++++++ - Documentation/kmsg/s390/dasd-diag | 118 ++ - Documentation/kmsg/s390/dasd-eckd | 1901 ++++++++++++++++++++++++++++++++++++ - Documentation/kmsg/s390/dasd-fba | 30 - Documentation/kmsg/s390/dcssblk | 192 +++ - Documentation/kmsg/s390/extmem | 290 +++++ - Documentation/kmsg/s390/hvc_iucv | 122 ++ - Documentation/kmsg/s390/hypfs | 56 + - Documentation/kmsg/s390/iucv | 33 - Documentation/kmsg/s390/lcs | 161 +++ - Documentation/kmsg/s390/monreader | 127 ++ - Documentation/kmsg/s390/monwriter | 16 - Documentation/kmsg/s390/netiucv | 139 ++ - Documentation/kmsg/s390/qeth | 606 +++++++++++ - Documentation/kmsg/s390/s390dbf | 83 + - Documentation/kmsg/s390/sclp_cmd | 16 - Documentation/kmsg/s390/sclp_config | 3 - Documentation/kmsg/s390/sclp_cpi | 2 - Documentation/kmsg/s390/sclp_sdias | 4 - Documentation/kmsg/s390/setup | 181 +++ - Documentation/kmsg/s390/tape | 104 + - Documentation/kmsg/s390/tape_34xx | 418 +++++++ - Documentation/kmsg/s390/tape_3590 | 184 +++ - Documentation/kmsg/s390/time | 36 - Documentation/kmsg/s390/vmcp | 13 - Documentation/kmsg/s390/vmlogrdr | 18 - Documentation/kmsg/s390/vmur | 47 - Documentation/kmsg/s390/vmwatchdog | 26 - Documentation/kmsg/s390/xpram | 73 + - Documentation/kmsg/s390/zdump | 12 - Documentation/kmsg/s390/zfcp | 865 ++++++++++++++++ - Makefile | 16 - arch/s390/Kconfig | 8 - include/linux/device.h | 34 - include/linux/kernel.h | 35 - kernel/printk.c | 45 - scripts/Makefile.build | 14 - scripts/kmsg-doc | 479 +++++++++ - 47 files changed, 8323 insertions(+), 9 deletions(-) - ---- /dev/null -+++ b/Documentation/kmsg/s390/aes_s390 -@@ -0,0 +1,30 @@ -+/*? -+ * Text: "Allocating AES fallback algorithm %s failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: algorithm name -+ * Description: -+ * The advanced encryption standard (AES) algorithm includes three modes with -+ * 128-bit, 192-bit, and 256-bit keys. Your hardware system only provides -+ * hardware acceleration for the 128-bit mode. The aes_s390 module failed to -+ * allocate a software fallback for the AES modes that are not supported by the -+ * hardware. A possible reason for this problem is that the aes_generic module -+ * that provides the fallback algorithms is not available. -+ * User action: -+ * Use the 128-bit mode only or ensure that the aes_generic module is available -+ * and loaded and reload the aes_s390 module. -+ */ -+ -+/*? -+ * Text: "AES hardware acceleration is only available for 128-bit keys\n" -+ * Severity: Informational -+ * Description: -+ * The advanced encryption standard (AES) algorithm includes three modes with -+ * 128-bit, 192-bit, and 256-bit keys. Your hardware system only provides -+ * hardware acceleration for the 128-bit key mode. The aes_s390 module -+ * will use the less performant software fallback algorithm for the 192-bit -+ * and 256-bit key modes. -+ * User action: -+ * None. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/af_iucv -@@ -0,0 +1,33 @@ -+/*? -+ * Text: "Application %s on z/VM guest %s exceeds message limit\n" -+ * Severity: Error -+ * Parameter: -+ * @1: application name -+ * @2: z/VM user ID -+ * Description: -+ * Messages or packets destined for the application have accumulated and -+ * reached the maximum value. The default for the message limit is 65535. -+ * You can specify a different limit as the value for MSGLIMIT within -+ * the IUCV statement of the z/VM virtual machine on which the application -+ * runs. -+ * User action: -+ * Ensure that you do not send data faster than the application retrieves -+ * them. Ensure that the message limit on the z/VM guest virtual machine -+ * on which the application runs is high enough. -+ */ -+ -+/*? -+ * Text: "The af_iucv module cannot be loaded without z/VM\n" -+ * Severity: Error -+ * Description: -+ * The AF_IUCV protocol connects socket applications running in Linux -+ * kernels on different z/VM virtual machines, or it connects a Linux -+ * application to another sockets application running in a z/VM virtual -+ * machine. On Linux instances that run in environments other than the -+ * z/VM hypervisor, the AF_IUCV protocol does not provide any useful -+ * function and the corresponding af_iucv module cannot be loaded. -+ * User action: -+ * Load the af_iucv module only on Linux instances that run as guest -+ * operating systems of the z/VM hypervisor. If the module has been -+ * compiled into the kernel, ignore this message. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/ap -@@ -0,0 +1,47 @@ -+/*? -+ * Text: "%d is not a valid cryptographic domain\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: AP domain index -+ * Description: -+ * The cryptographic domain specified for the 'domain=' module or kernel -+ * parameter must be an integer in the range 0 to 15. -+ * User action: -+ * Reload the cryptographic device driver with a correct module parameter. -+ * If the device driver has been compiled into the kernel, correct the value -+ * in the kernel parameter line and reboot Linux. -+ */ -+ -+/*? -+ * Text: "The hardware system does not support AP instructions\n" -+ * Severity: Warning -+ * Description: -+ * The ap module addresses AP adapters through AP instructions. The hardware -+ * system on which the Linux instance runs does not support AP instructions. -+ * The ap module cannot detect any AP adapters. -+ * User action: -+ * Load the ap module only if your Linux instance runs on hardware that -+ * supports AP instructions. If the ap module has been compiled into the kernel, -+ * ignore this message. -+ */ -+ -+/*? -+ * Text: "Registering adapter interrupts for AP %d failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: AP device ID -+ * Description: -+ * The hardware system supports AP adapter interrupts but failed to enable -+ * an adapter for interrupts. Possible causes for this error are: -+ * i) The AP adapter firmware does not support AP interrupts. -+ * ii) An AP adapter firmware update to a firmware level that supports AP -+ * adapter interrupts failed. -+ * iii) The AP adapter firmware has been successfully updated to a level that -+ * supports AP interrupts but the new firmware has not been activated. -+ * User action: -+ * Ensure that the firmware on your AP adapters support AP interrupts and that -+ * any firmware updates have completed successfully. If necessary, deconfigure -+ * your cryptographic adapters and reconfigure them to ensure that any firmware -+ * updates become active, then reload the ap module. If the ap module has been -+ * compiled into the kernel, reboot Linux. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/appldata -@@ -0,0 +1,88 @@ -+/*? -+ * Text: "Starting the data collection for %s failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: appldata module -+ * @2: return code -+ * Description: -+ * The specified data collection module used the z/VM diagnose call -+ * DIAG 0xDC to start writing data. z/VM returned an error and the data -+ * collection could not start. If the return code is 5, your z/VM guest -+ * virtual machine is not authorized to write data records. -+ * User action: -+ * If the return code is 5, ensure that your z/VM guest virtual machine's -+ * entry in the z/VM directory includes the OPTION APPLMON statement. -+ * For other return codes see the section about DIAGNOSE Code X'DC' -+ * in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "Stopping the data collection for %s failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: appldata module -+ * @2: return code -+ * Description: -+ * The specified data collection module used the z/VM diagnose call DIAG 0xDC -+ * to stop writing data. z/VM returned an error and the data collection -+ * continues. -+ * User action: -+ * See the section about DIAGNOSE Code X'DC' in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "Starting a new OS data collection failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: return code -+ * Description: -+ * After a CPU hotplug event, the record size for the running operating -+ * system data collection is no longer correct. The appldata_os module tried -+ * to start a new data collection with the correct record size but received -+ * an error from the z/VM diagnose call DIAG 0xDC. Any data collected with -+ * the current record size might be faulty. -+ * User action: -+ * Start a new data collection with the cappldata_os module. For information -+ * about starting data collections see "Device Drivers, Features, and -+ * Commands". For information about the return codes see the section about -+ * DIAGNOSE Code X'DC' in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "Stopping a faulty OS data collection failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: return code -+ * Description: -+ * After a CPU hotplug event, the record size for the running operating -+ * system data collection is no longer correct. The appldata_os module tried -+ * to stop the faulty data collection but received an error from the z/VM -+ * diagnose call DIAG 0xDC. Any data collected with the current record size -+ * might be faulty. -+ * User action: -+ * Try to restart appldata_os monitoring. For information about stopping -+ * and starting data collections see "Device Drivers, Features, and -+ * Commands". For information about the return codes see the section about -+ * DIAGNOSE Code X'DC' in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "Maximum OS record size %i exceeds the maximum record size %i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: no of bytes -+ * @2: no of bytes -+ * Description: -+ * The OS record size grows with the number of CPUs and is adjusted by the -+ * appldata_os module in response to CPU hotplug events. For more than 110 -+ * CPUs the record size would exceed the maximum record size of 4024 bytes -+ * that is supported by the z/VM hypervisor. To prevent the maximum supported -+ * record size from being exceeded while data collection is in progress, -+ * you cannot load the appldata_os module on Linux instances that are -+ * configured for a maximum of more than 110 CPUs. -+ * User action: -+ * If you do not want to collect operating system data, you can ignore this -+ * message. If you want to collect operating system data, reconfigure your -+ * Linux instance to support less than 110 CPUs. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/cio -@@ -0,0 +1,145 @@ -+/*? -+ * Text: "%s is not a valid device for the cio_ignore kernel parameter\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: device bus-ID -+ * Description: -+ * The device specification for the cio_ignore kernel parameter is -+ * syntactically incorrect or specifies an unknown device. This device is not -+ * excluded from being sensed and analyzed. -+ * User action: -+ * Correct your device specification in the kernel parameter line to have the -+ * device excluded when you next reboot Linux. You can write the correct -+ * device specification to /proc/cio_ignore to add the device to the list of -+ * devices to be excluded. This does not immediately make the device -+ * inaccessible but the device is ignored if it disappears and later reappears. -+ */ -+ -+/*? -+ * Text: "0.%x.%04x to 0.%x.%04x is not a valid range for cio_ignore\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: from subchannel set ID -+ * @2: from device number -+ * @3: to subchannel set ID -+ * @4: to device number -+ * Description: -+ * The device range specified for the cio_ignore kernel parameter is -+ * syntactically incorrect. No devices specified with this range are -+ * excluded from being sensed and analyzed. -+ * User action: -+ * Correct your range specification in the kernel parameter line to have the -+ * range of devices excluded when you next reboot Linux. You can write the -+ * correct range specification to /proc/cio_ignore to add the range of devices -+ * to the list of devices to be excluded. This does not immediately make the -+ * devices in the range inaccessible but any of these devices are ignored if -+ * they disappear and later reappear. -+ */ -+ -+/*? -+ * Text: "Processing %s for channel path %x.%02x\n" -+ * Severity: Notice -+ * Parameter: -+ * @1: configuration change -+ * @2: channel subsystem ID -+ * @3: CHPID -+ * Description: -+ * A configuration change is in progress for the given channel path. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "No CCW console was found\n" -+ * Severity: Warning -+ * Description: -+ * Linux did not find the expected CCW console and tries to use an alternative -+ * console. A possible reason why the console was not found is that the console -+ * has been specified in the cio_ignore list. -+ * User action: -+ * None, if an appropriate alternative console has been found, and you want -+ * to use this alternative console. If you want to use the CCW console, ensure -+ * that is not specified in the cio_ignore list, explicitly specify the console -+ * with the 'condev=' kernel parameter, and reboot Linux. -+ */ -+ -+/*? -+ * Text: "Channel measurement facility initialized using format %s (mode %s)\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: format -+ * @2: mode -+ * Description: -+ * The channel measurement facility has been initialized successfully. -+ * Format 'extended' should be used for z990 and later mainframe systems. -+ * Format 'basic' is intended for earlier mainframes. Mode 'autodetected' means -+ * that the format has been set automatically. Mode 'parameter' means that the -+ * format has been set according to the 'format=' kernel parameter. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The CSS device driver initialization failed with errno=%d\n" -+ * Severity: Alert -+ * Parameter: -+ * @1: Return code -+ * Description: -+ * The channel subsystem bus could not be established. -+ * User action: -+ * See the errno man page to find out what caused the problem. -+ */ -+ /*? Text: "%s: Got subchannel machine check but no sch_event handler provided.\n" */ -+ -+/*? -+ * Text: "%s: Setting the device online failed because it is boxed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: Device bus-ID -+ * Description: -+ * Initialization of a device did not complete because it did not respond in -+ * time or it was reserved by another operating system. -+ * User action: -+ * Make sure that the device is working correctly, then try again to set it -+ * online. For devices that support the reserve/release mechanism (for example -+ * DASDs), you can try to override the reservation of the other system by -+ * writing 'force' to the 'online' sysfs attribute of the affected device. -+ */ -+ -+/*? -+ * Text: "%s: Setting the device online failed because it is not operational\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: Device bus-ID -+ * Description: -+ * Initialization of a device did not complete because it is not present or -+ * not operational. -+ * User action: -+ * Make sure that the device is present and working correctly, then try again -+ * to set it online. -+ */ -+ -+/*? -+ * Text: "%s: The device stopped operating while being set offline\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: Device bus-ID -+ * Description: -+ * While the device was set offline, it was not present or not operational. -+ * The device is now inactive, but setting it online again might fail. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The device entered boxed state while being set offline\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: Device bus-ID -+ * Description: -+ * While the device was set offline, it did not respond in time or it was -+ * reserved by another operating system. The device is now inactive, but -+ * setting it online again might fail. -+ * User action: -+ * None. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/claw -@@ -0,0 +1,731 @@ -+/*? -+ * Text: "%s: Creating the /proc files for a new CLAW device failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the failed CLAW device -+ * Description: -+ * For each Common Link Access to Workstation (CLAW) device the CLAW device -+ * driver maintains files in the proc file system. The CLAW device driver -+ * failed to create a new CLAW device because it could not create these /proc -+ * files for the new device. You cannot create CLAW devices for Linux kernels -+ * that do not include a proc file system. -+ * User action: -+ * Ensure that your Linux kernel provides a proc file system. Reboot Linux. -+ * If your kernel provides a proc file system and the problem persists, contact -+ * your support organization. -+ */ -+ -+/*? -+ * Text: "%s: An uninitialized CLAW device received an IRQ, c-%02x d-%02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: subchannel status -+ * @3: device status -+ * Description: -+ * A Common Link Access to Workstation (CLAW) device was not initialized when -+ * it received a channel interrupt (IRQ). The IRQ is ignored. This might be a -+ * temporary condition while the device comes online or is taken offline. -+ * User action: -+ * If this problem occurs frequently, use the status information from the -+ * message and the channel and device traces to analyze the problem. See -+ * "Principles of Operation" for details about of the status information. -+ */ -+ -+/*? -+ * Text: "%s: The device is not a CLAW device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the device -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a -+ * channel interrupt (IRQ) for a subchannel that is not a CLAW read or write -+ * subchannel. A CLAW subchannel must be configured for a 3088 device of -+ * type x'61' and have an even bus ID. -+ * User action: -+ * Assure that the subchannels have been defined correctly to the real or -+ * virtual hardware, for example, in your IOCDS or in your z/VM configuration. -+ */ -+ -+/*? -+ * Text: "%s: The CLAW device received an unexpected IRQ, c-%02x d-%02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: subchannel status -+ * @3: device status -+ * Description: -+ * A Common Link Access to Workstation (CLAW) device received a channel -+ * interrupt (IRQ) while it was in a state in which it cannot process IRQs. -+ * The IRQ is ignored. This might be a temporary condition. -+ * User action: -+ * If this problem occurs frequently, use the status information from the -+ * message and the channel and device traces to analyze the problem. See -+ * "Principles of Operation" for details about the status information. -+ */ -+ -+/*? -+ * Text: "%s: The CLAW device for %s received an unexpected IRQ\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * A Common Link Access to Workstation (CLAW) device received a channel -+ * interrupt (IRQ) while the CLAW device driver had assigned a status to the -+ * device in which it cannot process IRQs. The IRQ is ignored. -+ * User action: -+ * Restart the remote channel adapter. If the problem persists, use s390dbf -+ * traces and CCW traces to diagnose the problem. -+ */ -+ -+/*? -+ * Text: "%s: Deactivating %s completed with incorrect subchannel status (read %02x, write %02x)\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * @3: read subchannel status -+ * @4: write subchannel status -+ * Description: -+ * When the Common Link Access to Workstation (CLAW) device driver closes a -+ * CLAW device, the device driver frees all storage that is used for the -+ * device. A successful closing operation results in status DEVICE END and -+ * CHANNEL END for both the read and write subchannel. At least one of these -+ * statuses is missing for a subchannel. Data might have been lost and there -+ * might be problems when the network interface is activated again. -+ * User action: -+ * If the network interface cannot be activated, vary the subchannels for the -+ * device offline and back online, for example, with chchp. If this does not -+ * resolve the problem, reset the remote channel adapter. -+ */ -+ -+/*? -+ * Text: "%s: The remote channel adapter is not available\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * Description: -+ * During an operation, the Common Link Access to Workstation (CLAW) device -+ * driver received errno ENODEV from the common I/O layer. This means that -+ * the remote channel adapter was not operational or offline. -+ * User action: -+ * Check the remote channel adapter and, if necessary, restart it. -+ */ -+ -+/*? -+ * Text: "%s: The status of the remote channel adapter is not valid\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * Description: -+ * During an operation, the Common Link Access to Workstation (CLAW) device -+ * driver received errno EINVAL from the common I/O layer. This indicates -+ * that the remote channel adapter was offline or not operational. -+ * User action: -+ * Check for related error messages to find the cause of the problem. If -+ * necessary, restart the remote channel adapter. -+ */ -+ -+/*? -+ * Text: "%s: The common device layer returned error code %d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: errno -+ * Description: -+ * During an I/O operation, the Common Link Access to Workstation (CLAW) device -+ * driver received an errno from the common I/O layer. This indicates a problem -+ * with the remote channel adapter. -+ * User action: -+ * See the errno man page to find out what the error code means. Check for -+ * related messages. Restart the remote channel adapter. If the problem -+ * persists, examine the subchannel trace for further diagnostic information. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s disconnected\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a device -+ * status word DEV_STAT_UNIT_CHECK and sense code 0x41. This indicates that the -+ * remote network interface is no longer available. -+ * User action: -+ * Ensure that the remote channel adapter is operational and activate the -+ * remote interface. For information about the sense code see -+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to -+ * locate the information. -+ */ -+ -+/*? -+ * Text: "%s: The remote channel adapter for %s has been reset\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a device -+ * status word DEV_STAT_UNIT_CHECK and sense code 0x40. This indicates that the -+ * remote channel adapter has been reset. -+ * User action: -+ * When the remote channel adapter is operational again, activate the remote -+ * interface. For information about the sense code see -+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to -+ * locate the information. -+ */ -+ -+/*? -+ * Text: "%s: A data streaming timeout occurred for %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a device -+ * status word DEV_STAT_UNIT_CHECK and sense code 0x24. This indicates a data -+ * streaming timeout. The remote channel adapter or the channel might be -+ * faulty. -+ * User action: -+ * Restart the remote channel adapter and activate the remote interface. If the -+ * problem persists, examine the subchannel trace for further diagnostic -+ * information. For information about the sense code see -+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to -+ * locate the information. -+ */ -+ -+/*? -+ * Text: "%s: A data transfer parity error occurred for %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @1: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a device -+ * status word DEV_STAT_UNIT_CHECK and sense code 0x20. This indicates a data -+ * parity error. The remote channel adapter or the channel might be faulty. -+ * User action: -+ * Ensure that all cables are securely plugged. Restart the remote channel -+ * adapter and activate the remote interface. If the problem persists, examine -+ * the subchannel trace for further diagnostic information. For information -+ * about the sense code see /Documentation/s390/cds.txt in the Linux source -+ * tree. Search for 'SNS0' to locate the information. -+ */ -+ -+/*? -+ * Text: "%s: The remote channel adapter for %s is faulty\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a device -+ * status word DEV_STAT_UNIT_CHECK and sense code 0x30. This indicates that the -+ * remote channel adapter is faulty. -+ * User action: -+ * Check and restart the remote channel adapter and activate the remote -+ * interface. If the problem persists, perform device diagnosis for the remote -+ * channel adapter and examine the subchannel trace for further diagnostic -+ * information. For information about the sense code see -+ * /Documentation/s390/cds.txt in the Linux source tree. Search for 'SNS0' to -+ * locate the information. -+ */ -+ -+/*? -+ * Text: "%s: A read data parity error occurred for %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a device -+ * status word DEV_STAT_UNIT_CHECK and sense code 0x10. This indicates a read -+ * data parity error. The remote channel adapter might be faulty. -+ * User action: -+ * Ensure that all cables are securely plugged. Check and restart the remote -+ * channel adapter and activate the remote interface. If the problem persists, -+ * perform device diagnosis for the remote channel adapter and examine the -+ * subchannel trace for further diagnostic information. For information about -+ * the sense code see /Documentation/s390/cds.txt in the Linux source tree. -+ * Search for 'SNS0' to locate the information. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s uses an incorrect API version %d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * @3: CLAW API version -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver received a -+ * SYSTEM_VALIDATE_REQUEST packet from the remote channel adapter. The packet -+ * included an unexpected version ID for the CLAW API. The version ID must -+ * be '2' for all packets. -+ * User action: -+ * Ensure that the remote channel adapter is at the latest firmware level. -+ * Restart the remote channel adapter and activate the remote interface. If the -+ * problem persists, examine the subchannel trace for further diagnostic -+ * information. -+ */ -+ -+/*? -+ * Text: "%s: Host name %s for %s does not match the remote adapter name %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: host name in the local CLAW device settings -+ * @3: network interface name -+ * @4: adapter name in the remote CLAW device settings -+ * Description: -+ * The host name in the local Common Link Access to Workstation (CLAW) device -+ * settings must match the adapter name in the CLAW device settings of the -+ * communication peer. The CLAW device driver discovered a mismatch between -+ * these settings. The connection cannot be established. -+ * User action: -+ * Check the configuration of the CLAW device and of its communication peer. -+ * Correct the erroneous setting and restart the CLAW device, local or remote, -+ * for which you have made corrections. -+ */ -+ -+/*? -+ * Text: "%s: Adapter name %s for %s does not match the remote host name %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: adapter name in the local CLAW device settings -+ * @3: network interface name -+ * @4: host name in the remote CLAW device settings -+ * Description: -+ * The adapter name in the local Common Link Access to Workstation (CLAW) device -+ * settings must match the host name in the CLAW device settings of the -+ * communication peer. The CLAW device driver discovered a mismatch between -+ * these settings. The connection cannot be established. -+ * User action: -+ * Check the configuration of the CLAW device and of its communication peer. -+ * Correct the erroneous setting and restart the CLAW device, local or remote, -+ * for which you have made corrections. -+ */ -+ -+/*? -+ * Text: "%s: The local write buffer is smaller than the remote read buffer\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * Description: -+ * You set the buffer size for the local Common Link Access to Workstation -+ * (CLAW) device implicitly by setting the connection type. For connection -+ * type 'packed' the buffer size is 32 KB, for the other connection types the -+ * buffer size is 4 KB. The connection cannot be established because the -+ * write buffer size of the local CLAW device does not match the read buffer -+ * size of the communication peer. -+ * User action: -+ * Confirm that you are using the correct connection type for the local CLAW -+ * device. Ensure that the read buffer size of the remote CLAW device is set -+ * accordingly. Restart the CLAW device, local or remote, for which you have -+ * made corrections. -+ */ -+ -+/*? -+ * Text: "%s: The local read buffer is smaller than the remote write buffer\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * Description: -+ * You set the buffer size for the local Common Link Access to Workstation -+ * (CLAW) device implicitly by setting the connection type. For connection -+ * type 'packed' the buffer size is 32 KB, for the other connection types the -+ * buffer size is 4 KB. The connection cannot be established because the -+ * read buffer size of the local CLAW device does not match the write buffer -+ * size of the communication peer. -+ * User action: -+ * Confirm that you are using the correct connection type for the local CLAW -+ * device. Ensure that the write buffer size of the remote CLAW device is set -+ * accordingly. Restart the CLAW device, local or remote, for which you have -+ * made corrections. -+ */ -+ -+/*? -+ * Text: "%s: Settings for %s validated (version=%d, remote device=%d, rc=%d, adapter name=%.8s, host name=%.8s)\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * @3: CLAW API version -+ * @4: identifier for the remote CLAW device -+ * @5: return code received from the remote CLAW device -+ * @6: adapter name -+ * @7: host name -+ * Description: -+ * The settings of the local Common Link Access to Workstation (CLAW) device -+ * have been validated by the communication peer. The message summarizes the -+ * content of the response. If the return code is zero, the validation was -+ * successful and the connection is activated. -+ * User action: -+ * If the return code is not equal to zero, look for related warning messages. -+ */ -+ -+/*? -+ * Text: "%s: Validating %s failed because of a host or adapter name mismatch\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) network interface cannot be -+ * activated because there is a mismatch between a host name and the -+ * corresponding adapter name. The local host name must match the remote -+ * adapter name and the local adapter name must match the remote host name. -+ * User action: -+ * Correct the erroneous setting and restart the CLAW device, local or remote, -+ * for which you have made corrections. -+ */ -+ -+/*? -+ * Text: "%s: Validating %s failed because of a version conflict\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) network interface cannot be -+ * activated because the remote CLAW device does not support CLAW version 2. -+ * The CLAW device driver requires CLAW version 2. -+ * User action: -+ * Ensure that the remote channel adapter supports CLAW version 2 and that the -+ * remote CLAW device is configured for CLAW version 2. -+ */ -+ -+/*? -+ * Text: "%s: Validating %s failed because of a frame size conflict\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * You set the frame size for the local Common Link Access to Workstation -+ * (CLAW) device implicitly by setting the connection type. For connection -+ * type 'packed' the frame size is 32 KB, for the other connection types the -+ * frame size is 4 KB. The connection cannot be activated because the -+ * the frame size of the local CLAW device does not match the frame size of the -+ * communication peer. -+ * User action: -+ * Confirm that you are using the correct connection type for the local CLAW -+ * device. Ensure that the frame size of the remote CLAW device is set -+ * accordingly. Restart the CLAW device, local or remote, for which you have -+ * have made corrections. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s rejected the connection\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The remote CLAW device rejected the connection because of a mismatch between -+ * the settings of the local CLAW device and the remote CLAW device. -+ * User action: -+ * Check the settings of both the local and the remote CLAW device and ensure -+ * that the settings are consistent. Restart the CLAW device, local or remote -+ * for which you have made the correction. -+ */ -+ -+/*? -+ * Text: "%s: %s rejected a connection request because it is already active\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device rejected a connection -+ * request by its communication peer because the connection is already active. -+ * The CLAW device driver only supports a single connection for each CLAW -+ * device. This might be a runtime problem. -+ * User action: -+ * None if there is an active connection. If no connection can be established, -+ * restart the remote channel adapter. -+ */ -+ -+/*? -+ * Text: "%s: %s rejected a request to open multiple connections\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device rejected a request by -+ * its communication peer to open more than one connection. The CLAW device -+ * driver only supports a single connection for each CLAW device. -+ * User action: -+ * Reconfigure the remote CLAW device to only use one connection. Restart the -+ * remote CLAW device. -+ */ -+ -+/*? -+ * Text: "%s: %s rejected a connection request because of a type mismatch\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @1: network interface name -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device rejected a request by -+ * its communication peer to open a connection. A connection can only be opened -+ * if the same connection type has been set for both the local and the remote -+ * CLAW device. -+ * User action: -+ * Ensure that the connection types for the local and remote CLAW device match. -+ * Restart the CLAW device, local or remote, for which you have changed the -+ * connection type. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s rejected a connection request\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @1: network interface name -+ * Description: -+ * The remote CLAW device detected an inconsistency in the configurations of the -+ * local and the remote CLAW device and rejected a connection request. -+ * User action: -+ * Examine the settings of your local and remote CLAW device. Correct the -+ * erroneous setting and restart the CLAW device, local or remote, for which -+ * you have made corrections. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s rejected a connection request because of a type mismatch\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The remote Common Link Access to Workstation (CLAW) device rejected a -+ * request to open a connection. A connection can only be opened if the same -+ * connection type has been set for both the local and the remote CLAW device. -+ * not be started. -+ * User action: -+ * Ensure that the connection types for the local and remote CLAW device match. -+ * Restart the CLAW device, local or remote, for which you have changed the -+ * connection type. -+ */ -+ -+/*? -+ * Text: "%s: Activating %s failed because of an incorrect link ID=%d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * @3: link ID returned from the remote CLAW device -+ * Description: -+ * The remote Common Link Access to Workstation (CLAW) device accepted a -+ * connection request but returned an incorrect link ID. The CLAW device driver -+ * only supports a single connection at a time (link ID=1) for each network -+ * interface. -+ * User action: -+ * Restart the remote CLAW device and try again to activate the network -+ * interface. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The remote Common Link Access to Workstation (CLAW) device reported an -+ * error condition that cannot be recovered automatically. -+ * User action: -+ * Restart the remote CLAW device. If this does not resolve the error, gather -+ * logs and traces from the remote CLAW device to obtain further -+ * diagnostic data. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s sent an unknown command code\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * Description: -+ * The remote Common Link Access to Workstation (CLAW) device sent a command -+ * code that is not defined. This might indicate that the remote CLAW device is -+ * malfunctioning. The connection remains operational. -+ * User action: -+ * If this problem occurs frequently, restart the remote CLAW device. If this -+ * does not resolve the error, gather logs and traces from the remote CLAW -+ * device to obtain further diagnostic data. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer of %s sent a faulty frame of length %02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: network interface name -+ * @3: incorrect frame length value -+ * Description: -+ * The remote Common Link Access to Workstation (CLAW) device sent a frame -+ * with an incorrect value in the length field. This problem might result from -+ * data errors or incorrect packing. The connection remains operational. -+ * User action: -+ * If this problem occurs frequently, restart the remote CLAW device. If this -+ * does not resolve the error, gather logs and traces from the remote CLAW -+ * device to obtain further diagnostic data. -+ */ -+ -+/*? -+ * Text: "%s: Allocating a buffer for incoming data failed\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * Description: -+ * A Common Link Access to Workstation (CLAW) data packet was received but -+ * the CLAW device driver could not allocate a receive buffer. A possible cause -+ * of this problem is memory constraints. The data packet is dropped but the -+ * connection remains operational. -+ * User action: -+ * Ensure that sufficient memory is available. If this problem occurs -+ * frequently, restart the remote CLAW device. If this does not resolve the -+ * error, gather logs and traces from the remote CLAW device to obtain further -+ * diagnostic data. -+ */ -+ -+/*? -+ * Text: "%s: Creating a CLAW group device failed with error code %d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: errno -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver failed to create -+ * a CLAW group device. A possible cause of this problem is memory constraints. -+ * User action: -+ * Ensure that there is sufficient free memory. See the errno man page and look -+ * for related messages to find out what caused the problem. If you cannot -+ * resolve the problem, contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Setting the read subchannel online failed with error code %d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: errno -+ * Description: -+ * Setting the Common Link Access to Workstation (CLAW) device online failed -+ * with an error for the read subchannel. This problem occurs, for example, if -+ * the read subchannel used to create the CLAW group device is not defined as a -+ * CLAW read subchannel in the hardware definitions. The CLAW read subchannel -+ * must be for a 3088 device of type x'61' and have an even bus ID. The bus ID -+ * of the read subchannel matches the bus ID of the CLAW device. -+ * User action: -+ * Confirm that you are using the correct bus ID for the read subchannel. If -+ * necessary, ungroup the device and recreate it with the correct bus ID. -+ * Assure that the read subchannel has been defined correctly to the real or -+ * virtual hardware, for example, in your IOCDS or in your z/VM configuration. -+ * Assure that a valid number of read buffers has been assigned to the device. -+ * See 'Device Drivers, Features, and Commands' for details about the read -+ * buffers. See the errno man page for information about the error code. -+ */ -+ -+/*? -+ * Text: "%s: Setting the write subchannel online failed with error code %d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * @2: errno -+ * Description: -+ * Setting the Common Link Access to Workstation (CLAW) device online failed -+ * with an error for the write subchannel. This problem occurs, for example, if -+ * the write subchannel used to create the CLAW group device is not defined as a -+ * CLAW write subchannel in the hardware definitions. The CLAW write subchannel -+ * must be for a 3088 device of type x'61' and have an uneven bus ID. The -+ * bus ID of the write subchannel can be found from the symbolic link -+ * /sys/bus/ccwgroup/drivers/claw//cdev1 where -+ * is the bus ID of the CLAW device. -+ * User action: -+ * Confirm that you are using the correct bus ID for the write subchannel. If -+ * necessary, ungroup the device and recreate it with the correct bus ID. -+ * Assure that the write subchannel has been defined correctly to the real or -+ * virtual hardware, for example, in your IOCDS or in your z/VM configuration. -+ * Assure that a valid number of write buffers has been assigned to the device. -+ * See 'Device Drivers, Features, and Commands' for details about the read -+ * buffers. See the errno man page for information about the error code. -+ */ -+ -+/*? -+ * Text: "%s: Activating the CLAW device failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CLAW device -+ * Description: -+ * Activating the Common Link Access to Workstation (CLAW) device failed. A -+ * possible cause of this problem is memory constraints. -+ * User action: -+ * Free some memory and try again to activate the CLAW device. If the problem -+ * persists, contact your support organization. -+ */ -+ -+/*? -+ * Text: "Registering with the S/390 debug feature failed with error code %d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: errno -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver failed to register -+ * with the S/390 debug feature. No debug traces will be available for CLAW. -+ * User action: -+ * Enter 'lsmod | grep dbf' or an equivalent command to check if the S/390 debug -+ * feature loaded. If the output does not show the dbf module, the S/390 debug -+ * feature has not been loaded, unload the CLAW device driver, load the debug -+ * feature, then reload the CLAW device driver. See the errno man page for -+ * information about the error code. -+ */ -+ -+/*? -+ * Text: "Registering with the cu3088 device driver failed with error code %d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: errno -+ * Description: -+ * The Common Link Access to Workstation (CLAW) device driver failed to register -+ * with the cu3088 channel subsystem device driver. The CLAW device driver -+ * requires the cu3088 device driver. -+ * User action: -+ * Enter 'lsmod | grep cu3088' or an equivalent command to check if the cu3088 -+ * device driver is loaded. If the output does not show the cu3088 module, -+ * unload the CLAW device driver, load the cu3088 device driver, then reload -+ * the CLAW device driver. See the errno man page for information about the -+ * error code. -+ */ -+ -+/*? Text: "%s: %s: CLAW device %.8s: Received Control Packet\n" */ -+/*? Text: "%s: %s: CLAW device %.8s: System validate completed.\n" */ -+/*? Text: "%s: %s: CLAW device %.8s: Connection completed link_id=%d.\n" */ -+/*? Text: "%s: %s: remote side is not ready\n" */ -+/*? Text: "%s: %s: write connection restarting\n" */ -+/*? Text: "%s: %s: subchannel check for device: %04x - Sch Stat %02x Dev Stat %02x CPA - %04x\n" */ -+/*? Text: "%s: %s: Unit Exception occurred in write channel\n" */ -+/*? Text: "%s: %s: Resetting Event occurred:\n" */ -+/*? Text: "%s: %s: Recv Conn Confirm:Vers=%d,link_id=%d,Corr=%d,Host appl=%.8s,WS appl=%.8s\n" */ -+/*? Text: "%s: %s: Recv Conn Req: Vers=%d,link_id=%d,Corr=%d,HOST appl=%.8s,WS appl=%.8s\n" */ -+/*? Text: "%s: %s: Recv Sys Validate Request: Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,Host name=%.8s\n" */ -+/*? Text: "%s: %s: Confirmed Now packing\n" */ -+/*? Text: "%s: %s: Unit Check Occured in write channel\n" */ -+/*? Text: "%s: %s: Restart is required after remote side recovers \n" */ -+/*? Text: "%s: %s: sys Validate Rsize:%d Wsize:%d\n" */ -+/*? Text: "%s: %s:readsize=%d writesize=%d readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n" */ -+/*? Text: "%s: %s:host_name:%.8s, adapter_name :%.8s api_type: %.8s\n" */ -+/*? Text: "Driver unloaded\n" */ -+/*? Text: "Loading %s\n" */ -+/*? Text: "%s: will be removed.\n" */ -+/*? Text: "%s: add for %s\n" */ -+/*? Text: "%s: %s: shutting down \n" */ -+/*? Text: "%s: CLAW device %.8s: System validate completed.\n" */ -+/*? Text: "%s: %s: Disconnect: Vers=%d,link_id=%d,Corr=%d\n" */ -+/*? Text: "%s: %s: Recv Conn Resp: Vers=%d,link_id=%d,Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/cpcmd -@@ -0,0 +1,17 @@ -+/*? -+ * Text: "The cpcmd kernel function failed to allocate a response buffer\n" -+ * Severity: Warning -+ * Description: -+ * IPL code, console detection, and device drivers like vmcp or vmlogrdr use -+ * the cpcmd kernel function to send commands to the z/VM control program (CP). -+ * If a program that uses the cpcmd function does not allocate a contiguous -+ * response buffer below 2 GB guest real storage, cpcmd creates a bounce buffer -+ * to be used as the response buffer. Because of low memory or memory -+ * fragmentation, cpcmd could not create the bounce buffer. -+ * User action: -+ * Look for related page allocation failure messages and at the stack trace to -+ * find out which program or operation failed. Free some memory and retry the -+ * failed operation. Consider allocating more memory to your z/VM guest virtual -+ * machine. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/cpu -@@ -0,0 +1,69 @@ -+/*? -+ * Text: "Processor %d started, address %d, identification %06X\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: logical CPU number -+ * @2: CPU address -+ * @3: CPU identification number -+ * Description: -+ * The kernel detected a CPU with the given characteristics. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Processor %d stopped\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: logical CPU number -+ * Description: -+ * A logical CPU has been set offline. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%d configured CPUs, %d standby CPUs\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: number of configured CPUs -+ * @2: number of standby CPUs -+ * Description: -+ * The kernel detected the given number of configured and standby CPUs. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The CPU configuration topology of the machine is:" -+ * Severity: Informational -+ * Description: -+ * The first six values of the topology information represent fields Mag6 to -+ * Mag1 of system-information block (SYSIB) 15.1.2. These fields specify the -+ * maximum numbers of topology-list entries (TLE) at successive topology nesting -+ * levels. The last value represents the MNest value of SYSIB 15.1.2 which -+ * specifies the maximum possible nesting that can be configured through -+ * dynamic changes. For details see the SYSIB 15.1.2 information in the -+ * "Principles of Operation." -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "CPU %i exceeds the maximum %i and is excluded from the dump\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: CPU number -+ * @2: maximum CPU number -+ * Description: -+ * The Linux kernel is used as a system dumper but it runs on more CPUs than -+ * it has been compiled for with the CONFIG_NR_CPUS kernel configuration -+ * option. The system dump will be created but information on one or more -+ * CPUs will be missing. -+ * User action: -+ * Update the system dump kernel to a newer version that supports more -+ * CPUs or reduce the number of installed CPUs and reproduce the problem -+ * that should be analyzed. If you send the system dump that prompted this -+ * message to a support organization, be sure to communicate that the dump -+ * does not include all CPU information. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/ctcm -@@ -0,0 +1,199 @@ -+/*? -+ * Text: "%s: An I/O-error occurred on the CTCM device\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * Description: -+ * An I/O error was detected on one of the subchannels of the CTCM device. -+ * Depending on the error, the CTCM device driver might attempt an automatic -+ * recovery. -+ * User action: -+ * Check the status of the CTCM device, for example, with ifconfig. If the -+ * device is not operational, perform a manual recovery. See "Device Drivers, -+ * Features, and Commands" for details about how to recover a CTCM device. -+ */ -+ -+/*? -+ * Text: "%s: An adapter hardware operation timed out\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * Description: -+ * The CTCM device uses an adapter to physically connect to its communication -+ * peer. An operation on this adapter timed out. -+ * User action: -+ * Check the status of the CTCM device, for example, with ifconfig. If the -+ * device is not operational, perform a manual recovery. See "Device Drivers, -+ * Features, and Commands" for details about how to recover a CTCM device. -+ */ -+ -+/*? -+ * Text: "%s: An error occurred on the adapter hardware\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * Description: -+ * The CTCM device uses an adapter to physically connect to its communication -+ * peer. An operation on this adapter returned an error. -+ * User action: -+ * Check the status of the CTCM device, for example, with ifconfig. If the -+ * device is not operational, perform a manual recovery. See "Device Drivers, -+ * Features, and Commands" for details about how to recover a CTCM device. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer has disconnected\n" -+ * Severity: Notice -+ * Parameter: -+ * @1: channel ID -+ * Description: -+ * The remote device has disconnected. Possible reasons are that the remote -+ * interface has been closed or that the operating system instance with the -+ * communication peer has been rebooted or shut down. -+ * User action: -+ * Check the status of the peer device. Ensure that the peer operating system -+ * instance is running and that the peer interface is operational. -+ */ -+ -+/*? -+ * Text: "%s: The remote operating system is not available\n" -+ * Severity: Notice -+ * Parameter: -+ * @1: channel ID -+ * Description: -+ * The operating system instance with the communication peer has disconnected. -+ * Possible reasons are that the operating system instance has been rebooted -+ * or shut down. -+ * User action: -+ * Ensure that the peer operating system instance is running and that the peer -+ * interface is operational. -+ */ -+ -+/*? -+ * Text: "%s: The adapter received a non-specific IRQ\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * Description: -+ * The adapter hardware used by the CTCM device received an IRQ that cannot -+ * be mapped to a particular device. This is a hardware problem. -+ * User action: -+ * Check the status of the CTCM device, for example, with ifconfig. Check if -+ * the connection to the remote device still works. If the CTCM device is not -+ * operational, set it offline and back online. If this does not resolve the -+ * problem, perform a manual recovery. See "Device Drivers, Features, and -+ * Commands" for details about how to recover a CTCM device. If this problem -+ * persists, gather Linux debug data, collect the hardware logs, and report the -+ * problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: A check occurred on the subchannel\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * Description: -+ * A check condition has been detected on the subchannel. -+ * User action: -+ * Check if the connection to the remote device still works. If the CTCM device -+ * is not operational, set it offline and back online. If this does not resolve -+ * the problem, perform a manual recovery. See "Device Drivers, Features, and -+ * Commands" for details about how to recover a CTCM device. If this problem -+ * persists, gather Linux debug data and report the problem to your support -+ * organization. -+ */ -+ -+/*? -+ * Text: "%s: The communication peer is busy\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: channel ID -+ * Description: -+ * A busy target device was reported. This might be a temporary problem. -+ * User action: -+ * If this problem persists or is reported frequently ensure that the target -+ * device is working properly. -+ */ -+ -+/*? -+ * Text: "%s: The specified target device is not valid\n" -+ * Severity: Error -+ * Parameter: -+ * @1: channel ID -+ * Description: -+ * A target device was called with a faulty device specification. This is an -+ * adapter hardware problem. -+ * User action: -+ * Gather Linux debug data, collect the hardware logs, and contact IBM support. -+ */ -+ -+/*? -+ * Text: "An I/O operation resulted in error %04x\n" -+ * Severity: Error -+ * Parameter: -+ * @1: channel ID -+ * @2: error information -+ * Description: -+ * A hardware operation ended with an error. -+ * User action: -+ * Check the status of the CTCM device, for example, with ifconfig. If the -+ * device is not operational, perform a manual recovery. See "Device Drivers, -+ * Features, and Commands" for details about how to recover a CTCM device. -+ * If this problem persists, gather Linux debug data, collect the hardware logs, -+ * and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Initialization failed with RX/TX init handshake error %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * @2: error information -+ * Description: -+ * A problem occurred during the initialization of the connection. If the -+ * connection can be established after an automatic recovery, a success message -+ * is issued. -+ * User action: -+ * If the problem is not resolved by the automatic recovery process, check the -+ * local and remote device. If this problem persists, gather Linux debug data -+ * and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The network backlog for %s is exceeded, package dropped\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * @2: calling function -+ * Description: -+ * There is more network traffic than can be handled by the device. The device -+ * is closed and some data has not been transmitted. The device might be -+ * recovered automatically. -+ * User action: -+ * Investigate and resolve the congestion. If necessary, set the device -+ * online to make it operational. -+ */ -+ -+/*? -+ * Text: "%s: The XID used in the MPC protocol is not valid, rc = %d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the CTCM device -+ * @2: return code -+ * Description: -+ * The exchange identification (XID) used by the CTCM device driver when -+ * in MPC mode is not valid. -+ * User action: -+ * Note the error information provided with this message and contact your -+ * support organization. -+ */ -+ -+/*? Text: "CTCM driver unloaded\n" */ -+/*? Text: "%s: %s Internal error: net_device is NULL, ch = 0x%p\n" */ -+/*? Text: "%s / register_cu3088_discipline failed, ret = %d\n" */ -+/*? Text: "%s: %s: Internal error: Can't determine channel for interrupt device %s\n" */ -+/*? Text: "CTCM driver initialized\n" */ -+/*? Text: "%s: setup OK : r/w = %s/%s, protocol : %d\n" */ -+/*? Text: "%s: Connected with remote side\n" */ -+/*? Text: "%s: Restarting device\n" */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/dasd -@@ -0,0 +1,466 @@ -+/* dasd_ioctl */ -+ -+/*? -+ * Text: "%s: The DASD has been put in the quiesce state\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * No I/O operation is possible on this device. -+ * User action: -+ * Resume the DASD to enable I/O operations. -+ */ -+ -+/*? -+ * Text: "%s: I/O operations have been resumed on the DASD\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD is no longer in state quiesce and I/O operations can be performed -+ * on the device. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The DASD cannot be formatted while it is enabled\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD you try to format is enabled. Enabled devices cannot be formatted. -+ * User action: -+ * Contact the owner of the formatting tool. -+ */ -+ -+/*? -+ * Text: "%s: The specified DASD is a partition and cannot be formatted\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD you try to format is a partition. Partitions cannot be formatted -+ * separately. You can only format a complete DASD including all its partitions. -+ * User action: -+ * Format the complete DASD. -+ * ATTENTION: Formatting irreversibly destroys all data on all partitions -+ * of the DASD. -+ */ -+ -+/*? -+ * Text: "%s: Formatting unit %d failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: start track -+ * @3: return code -+ * Description: -+ * The formatting process might have been interrupted by a signal, for example, -+ * CTRL+C. If the process was not interrupted intentionally, an I/O error -+ * might have occurred. -+ * User action: -+ * Retry to format the device. If the error persists, check the log file for -+ * related error messages. If you cannot resolve the error, note the return -+ * code and contact your support organization. -+ */ -+ -+ -+/* dasd */ -+ -+/*? -+ * Text: "%s: start_IO run out of retries and failed with request %s\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: pointer to request -+ * Description: -+ * The start IO function tried to start an IO request but the number -+ * of retries for the I/O was exceeded before the request could be started. -+ * User action: -+ * Check for related previous error messages. -+ */ -+ -+/*? -+ * Text: "%s: Cancelling request %p failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: pointer to request -+ * @3: return code of previous function -+ * Description: -+ * In response to a user action, the DASD device driver tried but failed to -+ * cancel a previously started I/O operation. -+ * User action: -+ * Try the action again. -+ */ -+ -+/*? -+ * Text: "%s: Flushing the DASD request queue failed for request %p\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: pointer to request -+ * Description: -+ * As part of the unloading process, the DASD device driver flushes the -+ * request queue. This failed because a previously started I/O operation -+ * could not be canceled. -+ * User action: -+ * Try again to unload the DASD device driver or to shut down Linux. -+ */ -+ -+/*? -+ * Text: "The DASD device driver could not be initialized\n" -+ * Severity: Informational -+ * Description: -+ * The initialization of the DASD device driver failed because of previous -+ * errors. -+ * User action: -+ * Check for related previous error messages. -+ */ -+ -+/*? -+ * Text: "%s: Accessing the DASD failed because it is in probeonly mode\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The dasd= module or kernel parameter specified the probeonly attribute for -+ * the DASD you are trying to access. The DASD device driver cannot access -+ * DASDs that are in probeonly mode. -+ * User action: -+ * Change the dasd= parameter as to omit probeonly for the DASD and reload -+ * the DASD device driver. If the DASD device driver has been compiled into -+ * the kernel, reboot Linux. -+ */ -+ -+/*? -+ * Text: "%s: cqr %p timed out (%is), %i retries remaining\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: pointer to request -+ * @3: timeout value -+ * @4: number of retries left -+ * Description: -+ * One try of the error recovery procedure (ERP) for the channel queued request -+ * (cqr) timed out and failed to recover the error. ERP continues for the DASD. -+ * User action: -+ * Ignore this message if it occurs infrequently and if the recovery succeeds -+ * during one of the retries. If this error persists, check for related -+ * previous error messages and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: cqr %p timed out (%is) but cannot be ended, retrying in 5 s\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: pointer to request -+ * @3: timeout value -+ * Description: -+ * A try of the error recovery procedure (ERP) for the channel queued request -+ * (cqr) timed out and failed to recover the error. The I/O request submitted -+ * during the try could not be canceled. The ERP waits for 5 seconds before -+ * trying again. -+ * User action: -+ * Ignore this message if it occurs infrequently and if the recovery succeeds -+ * during one of the retries. If this error persists, check for related -+ * previous error messages and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The DASD cannot be set offline while it is in use\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD cannot be set offline because it is in use by an internal process. -+ * An action to free the DASD might not have completed yet. -+ * User action: -+ * Wait some time and set the DASD offline later. -+ */ -+ -+/*? -+ * Text: "%s: The DASD cannot be set offline with open count %i\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: count -+ * Description: -+ * The DASD is being used by one or more processes and cannot be set offline. -+ * User action: -+ * Ensure that the DASD is not in use anymore, for example, unmount all -+ * partitions. Then try again to set the DASD offline. -+ */ -+ -+/*? -+ * Text: "%s: Setting the DASD online failed with rc=%d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: return code -+ * Description: -+ * The DASD could not be set online because of previous errors. -+ * User action: -+ * Look for previous error messages. If you cannot resolve the error, note -+ * the return code and contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s Setting the DASD online with discipline %s failed with rc=%i\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: discipline -+ * @3: return code -+ * Description: -+ * The DASD could not be set online because of previous errors. -+ * User action: -+ * Look for previous error messages. If you cannot resolve the error, note the -+ * return code and contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s Setting the DASD online failed because of missing DIAG discipline\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD was to be set online with discipline DIAG but this discipline of -+ * the DASD device driver is not available. -+ * User action: -+ * Ensure that the dasd_diag_mod module is loaded. If your Linux system does -+ * not include this module, you cannot set DASDs online with the DIAG -+ * discipline. -+ */ -+ -+/*? -+ * Text: "%s Setting the DASD online failed because of a missing discipline\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD was to be set online with a DASD device driver discipline that -+ * is not available. -+ * User action: -+ * Ensure that all DASD modules are loaded correctly. -+ */ -+ -+--------------------------- -+ -+/*? -+ * Text: "The statistics feature has been switched off\n" -+ * Severity: Informational -+ * Description: -+ * The statistics feature of the DASD device driver has been switched off. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The statistics feature has been switched on\n" -+ * Severity: Informational -+ * Description: -+ * The statistics feature of the DASD device driver has been switched on. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The statistics have been reset\n" -+ * Severity: Informational -+ * Description: -+ * The DASD statistics data have been reset. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s is not a supported value for /proc/dasd/statistics\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: value -+ * Description: -+ * An incorrect value has been written to /proc/dasd/statistics. -+ * The supported values are: 'set on', 'set off', and 'reset'. -+ * User action: -+ * Write a supported value to /proc/dasd/statistics. -+ */ -+ -+/*? -+ * Text: "%s is not a valid device range\n" -+ * Severity: Error -+ * Parameter: -+ * @1: range -+ * Description: -+ * A device range specified with the dasd= parameter is not valid. -+ * User action: -+ * Examine the dasd= parameter and correct the device range. -+ */ -+ -+/*? -+ * Text: "The probeonly mode has been activated\n" -+ * Severity: Informational -+ * Description: -+ * The probeonly mode of the DASD device driver has been activated. In this -+ * mode the device driver rejects any 'open' syscalls with EPERM. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The IPL device is not a CCW device\n" -+ * Severity: Error -+ * Description: -+ * The value for the dasd= parameter contains the 'ipldev' keyword. During -+ * the boot process this keyword is replaced with the device from which the -+ * IPL was performed. The 'ipldev' keyword is not valid if the IPL device is -+ * not a CCW device. -+ * User action: -+ * Do not specify the 'ipldev' keyword when performing an IPL from a device -+ * other than a CCW device. -+ */ -+ -+/*? -+ * Text: "A closing parenthesis ')' is missing in the dasd= parameter\n" -+ * Severity: Warning -+ * Description: -+ * The specification for the dasd= kernel or module parameter has an opening -+ * parenthesis '(' * without a matching closing parenthesis ')'. -+ * User action: -+ * Correct the parameter value. -+ */ -+ -+/*? -+ * Text: "The autodetection mode has been activated\n" -+ * Severity: Informational -+ * Description: -+ * The autodetection mode of the DASD device driver has been activated. In -+ * this mode the DASD device driver sets all detected DASDs online. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%*s is not a supported device option\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: length of option code -+ * @2: option code -+ * Description: -+ * The dasd= parameter includes an unknown option for a DASD or a device range. -+ * Options are specified in parenthesis and immediately follow a device or -+ * device range. -+ * User action: -+ * Check the dasd= syntax and remove any unsupported options from the dasd= -+ * parameter specification. -+ */ -+ -+/*? -+ * Text: "PAV support has be deactivated\n" -+ * Severity: Informational -+ * Description: -+ * The 'nopav' keyword has been specified with the dasd= kernel or module -+ * parameter. The Parallel Access Volume (PAV) support of the DASD device -+ * driver has been deactivated. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "'nopav' is not supported on z/VM\n" -+ * Severity: Informational -+ * Description: -+ * For Linux instances that run as guest operating systems of the z/VM -+ * hypervisor Parallel Access Volume (PAV) support is controlled by z/VM not -+ * by Linux. -+ * User action: -+ * Remove 'nopav' from the dasd= module or kernel parameter specification. -+ */ -+ -+/*? -+ * Text: "High Performance FICON support has been deactivated\n" -+ * Severity: Informational -+ * Description: -+ * The 'nofcx' keyword has been specified with the dasd= kernel or module -+ * parameter. The High Performance FICON (transport mode) support of the DASD -+ * device driver has been deactivated. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The dasd= parameter value %s has an invalid ending\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: parameter value -+ * Description: -+ * The specified value for the dasd= kernel or module parameter is not correct. -+ * User action: -+ * Check the module or the kernel parameter. -+ */ -+ -+/*? -+ * Text: "Registering the device driver with major number %d failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: DASD major -+ * Description: -+ * Major number 94 is reserved for the DASD device driver. The DASD device -+ * driver failed to register with this major number. Another device driver -+ * might have used major number 94. -+ * User action: -+ * Determine which device driver uses major number 94 instead of the DASD -+ * device driver and unload this device driver. Then try again to load the -+ * DASD device driver. -+ */ -+ -+/*? -+ * Text: "%s: default ERP has run out of retries and failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The error recovery procedure (ERP) tried to recover an error but the number -+ * of retries for the I/O was exceeded before the error could be resolved. -+ * User action: -+ * Check for related previous error messages. -+ */ -+ -+/*? -+ * Text: "%s: Unable to terminate request %p on suspend\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: pointer to request -+ * Description: -+ * As part of the suspend process, the DASD device driver terminates requests -+ * on the request queue. This failed because a previously started I/O operation -+ * could not be canceled. The suspend process will be stopped. -+ * User action: -+ * Try again to suspend the system. -+ */ -+ -+/*? -+ * Text: "%s: ERP failed for the DASD\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * An error recovery procedure (ERP) was performed for the DASD but failed. -+ * User action: -+ * Check the message log for previous related error messages. -+ */ -+ -+/*? -+ * Text: "%s: An error occurred in the DASD device driver, reason=%s\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: reason code -+ * Description: -+ * This problem indicates a program error in the DASD device driver. -+ * User action: -+ * Note the reason code and contact your support organization. -+*/ ---- /dev/null -+++ b/Documentation/kmsg/s390/dasd-diag -@@ -0,0 +1,118 @@ -+/* dasd_diag */ -+ -+/*? -+ * Text: "%s: A 64-bit DIAG call failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * 64-bit DIAG calls require a 64-bit z/VM version. -+ * User action: -+ * Use z/VM 5.2 or later or set the sysfs 'use_diag' attribute of the DASD to 0 -+ * to switch off DIAG. -+ */ -+ -+/*? -+ * Text: "%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: return code -+ * Description: -+ * The format of the DASD is not correct. -+ * User action: -+ * Check the device format. For details about the return code see the -+ * section about the INITIALIZE function for DIAGNOSE Code X'250' -+ * in "z/VM CP Programming Services". If you cannot resolve the error, note -+ * the return code and contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: New DASD with %ld byte/block, total size %ld KB%s\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: bytes per block -+ * @3: size -+ * @4: access mode -+ * Description: -+ * A DASD with the indicated block size and total size has been set online. -+ * If the DASD is configured as read-only to the real or virtual hardware, -+ * the message includes an indication of this hardware access mode. The -+ * hardware access mode is independent from the 'readonly' attribute of -+ * the device in sysfs. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: DIAG ERP failed with rc=%d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: return code -+ * Description: -+ * An error in the DIAG processing could not be recovered by the error -+ * recovery procedure (ERP) of the DIAG discipline. -+ * User action: -+ * Note the return code, check for related I/O errors, and report this problem -+ * to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: DIAG initialization failed with rc=%d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: return code -+ * Description: -+ * Initializing the DASD with the DIAG discipline failed. Possible reasons for -+ * this problem are that the device has a device type other than FBA or ECKD, -+ * or has a block size other than one of the supported sizes: -+ * 512 byte, 1024 byte, 2048 byte, or 4096 byte. -+ * User action: -+ * Ensure that the device can be written to and has a supported device type -+ * and block size. For details about the return code see the section about -+ * the INITIALIZE function for DIAGNOSE Code X'250' in "z/VM CP Programming -+ * Services". If you cannot resolve the error, note the error code and contact -+ * your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Device type %d is not supported in DIAG mode\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: device type -+ * Description: -+ * Only DASD of type FBA and ECKD are supported in DIAG mode. -+ * User action: -+ * Set the sysfs 'use_diag' attribute of the DASD to 0 and try again to access -+ * the DASD. -+ */ -+ -+/*? -+ * Text: "Discipline %s cannot be used without z/VM\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: discipline name -+ * Description: -+ * The discipline that is specified with the dasd= kernel or module parameter -+ * is only available for Linux instances that run as guest operating -+ * systems of the z/VM hypervisor. -+ * User action: -+ * Remove the unsupported discipline from the parameter string. -+ */ -+ -+/*? -+ * Text: "%s: The access mode of a DIAG device changed to read-only" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A device changed its access mode from writeable to -+ * read-only while in use. -+ * User action: -+ * Set the device offline, ensure that the device is configured correctly in -+ * z/VM, then set the device online again. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/dasd-eckd -@@ -0,0 +1,1901 @@ -+/* dasd_eckd */ -+ -+/*? -+ * Text: "%s: ERP failed for the DASD\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * An error recovery procedure (ERP) was performed for the DASD but failed. -+ * User action: -+ * Check the message log for previous related error messages. -+ */ -+ -+/*? -+ * Text: "%s: An error occurred in the DASD device driver, reason=%s\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: reason code -+ * Description: -+ * This problem indicates a program error in the DASD device driver. -+ * User action: -+ * Note the reason code and contact your support organization. -+*/ -+ -+/*? -+ * Text: "%s: Allocating memory for private DASD data failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD device driver maintains data structures for each DASD it manages. -+ * There is not enough memory to allocate these data structures for one or -+ * more DASD. -+ * User action: -+ * Free some memory and try the operation again. -+ */ -+ -+/*? -+ * Text: "%s: DASD with %d KB/block, %d KB total size, %d KB/track, %s\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: block size -+ * @3: DASD size -+ * @4: track size -+ * @5: disc layout -+ * Description: -+ * A DASD with the shown characteristics has been set online. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: Start track number %d used in formatting is too big\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: track number -+ * Description: -+ * The DASD format I/O control was used incorrectly by a formatting tool. -+ * User action: -+ * Contact the owner of the formatting tool. -+ */ -+ -+/*? -+ * Text: "%s: The DASD is not formatted\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A DASD has been set online but it has not been formatted yet. You must -+ * format the DASD before you can use it. -+ * User action: -+ * Format the DASD, for example, with dasdfmt. -+ */ -+ -+/*? -+ * Text: "%s: 0x%x is not a known command\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: command -+ * Description: -+ * This problem is likely to be caused by a programming error. -+ * User action: -+ * Contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Track 0 has no records following the VTOC\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * Linux has identified a volume table of contents (VTOC) on the DASD but -+ * cannot read any data records following the VTOC. A possible cause of this -+ * problem is that the DASD has been used with another System z operating -+ * system. -+ * User action: -+ * Format the DASD for usage with Linux, for example, with dasdfmt. -+ * ATTENTION: Formatting irreversibly destroys all data on the DASD. -+ */ -+ -+/*? -+ * Text: "%s: An I/O control call used incorrect flags 0x%x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: flags -+ * Description: -+ * The DASD format I/O control was used incorrectly. -+ * User action: -+ * Contact the owner of the formatting tool. -+ */ -+ -+/*? -+ * Text: "%s: New DASD %04X/%02X (CU %04X/%02X) with %d cylinders, %d heads, %d sectors\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: device type -+ * @3: device model -+ * @4: control unit type -+ * @5: control unit model -+ * @6: number of cylinders -+ * @7: tracks per cylinder -+ * @8: sectors per track -+ * Description: -+ * A DASD with the shown characteristics has been set online. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The disk layout of the DASD is not supported\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD device driver only supports the following disk layouts: CDL, LDL, -+ * FBA, CMS, and CMS RESERVED. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: Start track %d used in formatting exceeds end track\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: track number -+ * Description: -+ * The DASD format I/O control was used incorrectly by a formatting tool. -+ * User action: -+ * Contact the owner of the formatting tool. -+ */ -+ -+/*? -+ * Text: "%s: The DASD cache mode was set to %x (%i cylinder prestage)\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: operation mode -+ * @3: number of cylinders -+ * Description: -+ * The DASD cache mode has been changed. See the storage system documentation -+ * for information about the different cache operation modes. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The DASD cannot be formatted with block size %d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: block size -+ * Description: -+ * The block size specified for a format instruction is not valid. The block -+ * size must be between 512 and 4096 byte and must be a power of 2. -+ * User action: -+ * Call the format command with a supported block size. -+ */ -+ -+/*? -+ * Text: "%s: The UID of the DASD has changed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The Unique Identifier (UID) of a DASD that is currently in use has changed. -+ * This indicates that the physical disk has been replaced. -+ * User action: -+ * None if the replacement was intentional. -+ * If the disk change is not expected, stop using the disk to prevent possible -+ * data loss. -+*/ -+ -+ -+/* dasd_3990_erp */ -+ -+/*? -+ * Text: "%s: is offline or not installed - INTERVENTION REQUIRED!!\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD to be accessed is not in an accessible state. The I/O operation -+ * will wait until the device is operational again. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * Make the DASD accessible again. For details see the storage system -+ * documentation. -+ */ -+ -+/*? -+ * Text: "%s: The DASD cannot be reached on any path (lpum=%x/opm=%x)\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: last path used mask -+ * @3: online path mask -+ * Description: -+ * After a path to the DASD failed, the error recovery procedure of the DASD -+ * device driver tried but failed to reconnect the DASD through an alternative -+ * path. -+ * User action: -+ * Ensure that the cabling between the storage server and the mainframe -+ * system is securely in place. Check the file systems on the DASD when it is -+ * accessible again. -+ */ -+ -+/*? -+ * Text: "%s: Unable to allocate DCTL-CQR\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an internal error. -+ * User action: -+ * Contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Invalid Parameter\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A data argument of a command is not valid. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - DPS Installation Check\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This operating system independent message is issued by the storage system -+ * for one of the following reasons: -+ * - A 3380 Model D or E DASD does not have the Dynamic Path Selection (DPS) -+ * feature in the DASD A-unit. -+ * - The device type of an attached DASD is not supported by the firmware. -+ * - A type 3390 DASD is attached to a 3 MB channel. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 2 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Drive motor switch is off\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - CCW Count less than required\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The CCW count of a command is less than required. This is an operating -+ * system independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Channel requested ... %02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: reason code -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. The possible reason codes indicate the following problems: -+ * 00 No Message. -+ * 01 The channel has requested unit check sense data. -+ * 02 The channel has requested retry and retry is exhausted. -+ * 03 A SA Check-2 error has occurred. This sense is presented with -+ * Equipment Check. -+ * 04 The channel has requested retry and retry is not possible. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Status Not As Required: reason %02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: reason code -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. There are several potential reasons for this message; -+ * byte 8 contains the reason code. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Device status 1 not valid\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Storage Path Restart\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * An operation for an active channel program was queued in a Storage Control -+ * when a warm start was received by the path. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Reset Notification\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A system reset or its equivalent was received on an interface. The Unit -+ * Check that generates this sense is posted to the next channel initiated -+ * selection following the resetting event. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Invalid Command Sequence\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * An incorrect sequence of commands has occurred. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Missing device address bit\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Subsystem Processing Error\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A firmware logic error has been detected. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Seek incomplete\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Invalid Command\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A command was issued that is not in the 2107/1750 command set. -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Command Invalid on Secondary Address\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A command or order not allowed on a PPRC secondary device has been received -+ * by the secondary device. This is an operating system independent message -+ * that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Invalid Defective/Alternate Track Pointer\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A defective track has been accessed. The subsystem generates an invalid -+ * Defective/Alternate Track Pointer as a part of RAID Recovery. -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Channel Returned with Incorrect retry CCW\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A command portion of the CCW returned after a command retry sequence does -+ * not match the command for which retry was signaled. This is an operating -+ * system independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Diagnostic of Special Command Violates File Mask\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A command is not allowed under the Access Authorization specified by the -+ * File Mask. This is an operating system independent message that is issued -+ * by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Head address does not compare\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Device did not respond to selection\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Device check-2 error or Set Sector is not complete\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Device Error Source\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The device has completed soft error logging. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Data Pinned for Device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * Modified data in cache or in persistent storage exists for the DASD. The -+ * data cannot be destaged to the device. This track is the first track pinned -+ * for this device. This is an operating system independent message that is -+ * issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel C\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Device Status 1 not as expected\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 0 - Device Fenced - device = %02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: sense data byte 4 -+ * Description: -+ * The device shown in sense byte 4 has been fenced. This is an operating -+ * system independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Interruption cannot be reset\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Index missing\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - DASD Fast Write inhibited\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * DASD Fast Write is not allowed because of a nonvolatile storage battery -+ * check condition. This is an operating system independent message that is -+ * issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Invalid tag-in for an extended command sequence\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Key area error; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Count area error; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Track physical address did not compare\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 2 - 3990 check-2 error\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Offset active cannot be reset\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - RCC 1 and RCC 2 sequences not successful\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No syn byte in count address area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Data area error\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel A\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No sync byte in count address area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the key area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Caching status reset to default\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The storage director has assigned two new subsystem status devices and -+ * resets the status to its default value. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the data area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Device not ready\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No sync byte in key area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - DASD controller failed to set or reset the long busy latch\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 1 - Cylinder address did not compare\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 3 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No syn byte in data area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 2 - Support facility errors\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Key area error\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - End operation with transfer count not zero\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 2 - Microcode detected error %02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: error code -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the count area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 3 - Allegiance terminated\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * Allegiance terminated because of a Reset Allegiance or an Unconditional -+ * Reserve command on another channel. This is an operating system independent -+ * message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Home address area error\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Count area error\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Invalid tag-in during selection sequence\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No sync byte in data area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No sync byte in home address area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Home address area error; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - Data area error; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No sync byte in home address area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the home address area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the home address area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the count area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 4 - No sync byte in key area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Invalid DCC selection response or timeout\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the data area\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Operation Terminated\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The storage system ends an operation related to an active channel program -+ * when termination and redrive are required and logging is not desired. -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel B\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 5 - Data Check in the key area; offset active\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Volume is suspended duplex\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The duplex pair volume has entered the suspended duplex state because of a -+ * failure. This is an operating system independent message that is issued by -+ * the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel D\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - RCC 1 sequence not successful\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel E\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - 3990 microcode time out when stopping selection\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel F\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - RCC initiated by a connection check alert\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel G\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - extra RCC required\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 6 - Overrun on channel H\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - Unexpected end operation response code\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Permanent path error (DASD controller not available)\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Missing end operation; device transfer incomplete\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT D - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Cache or nonvolatile storage equipment failure\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * An equipment failure has occurred in the cache storage or nonvolatile -+ * storage of the storage system. This is an operating system independent -+ * message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - DPS cannot be filled\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - Error correction code hardware fault\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Missing end operation; device transfer complete\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - DASD controller not available on disconnected command chain\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - No interruption from device during a command chain\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - No response to selection after a poll interruption\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 9 - Track physical address did not compare while oriented\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 9 - Head address did not compare\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Invalid tag-in for an immediate command sequence\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 9 - Cylinder address did not compare\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - DPS checks after a system reset or selective reset\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Caching reinitiated\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * Caching has been automatically reinitiated following an error. -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - End operation with transfer count zero\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 7 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 9 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - Short busy time-out during device selection\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Caching terminated\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The storage system was unable to initiate caching or had to suspend caching -+ * for a 3990 control unit. If this problem is caused by a failure condition, -+ * an additional message will provide more information about the failure. -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * Check for additional messages that point out possible failures. For more -+ * information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Subsystem status cannot be determined\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The status of a DASD Fast Write or PPRC volume cannot be determined. -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Nonvolatile storage terminated\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The storage director has stopped using nonvolatile storage or cannot -+ * initiate nonvolatile storage. If this problem is caused by a failure, an -+ * additional message will provide more information about the failure. This is -+ * an operating system independent message that is issued by the storage system. -+ * User action: -+ * Check for additional messages that point out possible failures. For more -+ * information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 8 - Reserved\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: Write inhibited path encountered\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an informational message. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT 9 - Device check-2 error\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This is an operating system independent message that is issued by the -+ * storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Track format incorrect\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A track format error occurred while data was being written to the DASD or -+ * while a duplex pair was being established. This is an operating system -+ * independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: FORMAT F - Cache fast write access not authorized\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A request for Cache Fast Write Data access cannot be satisfied because -+ * of missing access authorization for the storage system. This is an operating -+ * system independent message that is issued by the storage system. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: Data recovered during retry with PCI fetch mode active\n" -+ * Severity: Emerg -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * A data error has been recovered on the storages system but the Linux file -+ * system cannot be informed about the data mismatch. To prevent Linux from -+ * running with incorrect data, the DASD device driver will trigger a kernel -+ * panic. -+ * User action: -+ * Reset your real or virtual hardware and reboot Linux. -+ */ -+ -+/*? -+ * Text: "%s: The specified record was not found\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The record to be accessed does not exist. The DASD might be unformatted -+ * or defect. -+ * User action: -+ * Try to format the DASD or replace it. -+ * ATTENTION: Formatting irreversibly destroys all data on the DASD. -+ */ -+ -+/*? -+ * Text: "%s: ERP %p (%02x) refers to %p\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: pointer to ERP -+ * @3: ERP status -+ * @4: cqr -+ * Description: -+ * This message provides debug information for the enhanced error recovery -+ * procedure (ERP). -+ * User action: -+ * If you do not need this information, you can suppress this message by -+ * switching off ERP logging, for example, by writing '1' to the 'erplog' -+ * sysfs attribute of the DASD. -+ */ -+ -+/*? -+ * Text: "%s: ERP chain at END of ERP-ACTION\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This message provides debug information for the enhanced error recovery -+ * procedure (ERP). -+ * User action: -+ * If you do not need this information, you can suppress this message by -+ * switching off ERP logging, for example, by writing '1' to the 'erplog' -+ * sysfs attribute of the DASD. -+ */ -+ -+/*? -+ * Text: "%s: The cylinder data for accessing the DASD is inconsistent\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * An error occurred in the storage system hardware. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: Accessing the DASD failed because of a hardware error\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * An error occurred in the storage system hardware. -+ * User action: -+ * For more information see the documentation of your storage system. -+ */ -+ -+/*? -+ * Text: "%s: ERP chain at BEGINNING of ERP-ACTION\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * This message provides debug information for the enhanced error recovery -+ * procedure (ERP). -+ * User action: -+ * If you do not need this information, you can suppress this message by -+ * switching off ERP logging, for example, by writing '1' to the 'erplog' -+ * sysfs attribute of the DASD. -+ */ -+ -+/*? -+ * Text: "%s: ERP %p has run out of retries and failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: ERP pointer -+ * Description: -+ * The error recovery procedure (ERP) tried to recover an error but the number -+ * of retries for the I/O was exceeded before the error could be resolved. -+ * User action: -+ * Check for related previous error messages. -+ */ -+ -+/*? -+ * Text: "%s: ERP failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The error recovery procedure (ERP) tried to recover an error but has -+ * failed. A retry is not recommended. The I/O will also fail. -+ * User action: -+ * Check for related previous error messages. -+ */ -+ -+/*? -+ * Text: "%s: SIM - SRC: %02x%02x%02x%02x\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: sense byte -+ * @3: sense byte -+ * @4: sense byte -+ * @5: sense byte -+ * Description: -+ * This error message is a System Information Message (SIM) generated by the -+ * storage system. The System Reference Code (SRC) defines the error in detail. -+ * User action: -+ * Look up the SRC in the storage server documentation. -+ */ -+ -+/*? -+ * Text: "%s: log SIM - SRC: %02x%02x%02x%02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: sense byte -+ * @3: sense byte -+ * @4: sense byte -+ * @5: sense byte -+ * Description: -+ * This System Information Message (SIM) is generated by the storage system. -+ * The System Reference Code (SRC) defines the error in detail. -+ * User action: -+ * Look up the SRC in the storage server documentation. -+ */ -+ -+/*? -+ * Text: "%s: Reading device feature codes failed with rc=%d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: return code -+ * Description: -+ * The device feature codes state which advanced features are supported by a -+ * device. -+ * Examples for advanced features are PAV or high performance FICON. -+ * Some early devices do not provide feature codes and no advanced features are -+ * available on these devices. -+ * User action: -+ * None, if the DASD does not provide feature codes. If the DASD provides -+ * feature codes, make sure that it is working correctly, then set it offline -+ * and back online. -+ */ -+ -+/*? -+ * Text: "%s: A channel path group could not be established\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * Initialization of a DASD did not complete because a channel path group -+ * could not be established. -+ * User action: -+ * Make sure that the DASD is working correctly, then try again to set it -+ * online. If initialization still fails, reboot. -+ */ -+ -+/*? -+ * Text: "%s: The DASD is not operating in multipath mode\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD channel path group could not be configured to use multipath mode. -+ * This might negatively affect I/O performance on this DASD. -+ * User action: -+ * Make sure that the DASD is working correctly, then try again to set it -+ * online. If initialization still fails, reboot. -+ */ -+ -+/*? -+ * Text: "%s: Detecting the DASD disk layout failed because of an I/O error\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The disk layout of the DASD could not be detected because of an unexpected -+ * I/O error. The DASD device driver treats the device like an unformatted DASD, -+ * and partitions on the device are not accessible. -+ * User action: -+ * If the DASD is formatted, make sure that the DASD is working correctly, -+ * then set it offline and back online. If the DASD is unformatted, format the -+ * DASD, for example, with dasdfmt. -+ * ATTENTION: Formatting irreversibly destroys all data on the DASD. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/dasd-fba -@@ -0,0 +1,30 @@ -+ -+/*? -+ * Text: "%s: New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB and %d B/blk\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the DASD -+ * @2: device type -+ * @3: device model -+ * @4: control unit type -+ * @5: control unit model -+ * @6: size -+ * @7: bytes per block -+ * Description: -+ * A DASD with the shown characteristics has been set online. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: Allocating memory for private DASD data failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the DASD -+ * Description: -+ * The DASD device driver maintains data structures for each DASD it manages. -+ * There is not enough memory to allocate these data structures for one or -+ * more DASD. -+ * User action: -+ * Free some memory and try the operation again. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/dcssblk -@@ -0,0 +1,192 @@ -+/*? -+ * Text: "Adjacent DCSSs %s and %s are not contiguous\n" -+ * Severity: Error -+ * Parameter: -+ * @1: name 1 -+ * @2: name 2 -+ * Description: -+ * You can only map a set of two or more DCSSs to a single DCSS device if the -+ * DCSSs in the set form a contiguous memory space. The DCSS device cannot be -+ * created because there is a memory gap between two adjacent DCSSs. -+ * User action: -+ * Ensure that you have specified all DCSSs that belong to the set. Check the -+ * definitions of the DCSSs on the z/VM hypervisor to verify that they form -+ * a contiguous memory space. -+ */ -+ -+/*? -+ * Text: "DCSS %s and DCSS %s have incompatible types\n" -+ * Severity: Error -+ * Parameter: -+ * @1: name 1 -+ * @2: name 2 -+ * Description: -+ * You can only map a set of two or more DCSSs to a single DCSS device if -+ * either all DCSSs in the set have the same type or if the set contains DCSSs -+ * of the two types EW and EN but no other type. The DCSS device cannot be -+ * created because at least two of the specified DCSSs are not compatible. -+ * User action: -+ * Check the definitions of the DCSSs on the z/VM hypervisor to verify that -+ * their types are compatible. -+ */ -+ -+/*? -+ * Text: "DCSS %s is of type SC and cannot be loaded as exclusive-writable\n" -+ * Severity: Error -+ * Parameter: -+ * @1: device name -+ * Description: -+ * You cannot load a DCSS device in exclusive-writable access mode if the DCSS -+ * devise maps to one or more DCSSs of type SC. -+ * User action: -+ * Load the DCSS in shared access mode. -+ */ -+ -+/*? -+ * Text: "DCSS device %s is removed after a failed access mode change\n" -+ * Severity: Error -+ * Parameter: -+ * @1: device name -+ * Description: -+ * To change the access mode of a DCSS device, all DCSSs that map to the device -+ * were unloaded. Reloading the DCSSs for the new access mode failed and the -+ * device is removed. -+ * User action: -+ * Look for related messages to find out why the DCSSs could not be reloaded. -+ * If necessary, add the device again. -+ */ -+ -+/*? -+ * Text: "All DCSSs that map to device %s are saved\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: device name -+ * Description: -+ * A save request has been submitted for the DCSS device. Changes to all DCSSs -+ * that map to the device are saved permanently. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Device %s is in use, its DCSSs will be saved when it becomes idle\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: device name -+ * Description: -+ * A save request for the device has been deferred until the device becomes -+ * idle. Then changes to all DCSSs that the device maps to will be saved -+ * permanently. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "A pending save request for device %s has been canceled\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: device name -+ * Description: -+ * A save request for the DCSSs that map to a DCSS device has been pending -+ * while the device was in use. This save request has been canceled. Changes to -+ * the DCSSs will not be saved permanently. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Loaded %s with total size %lu bytes and capacity %lu sectors\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: DCSS names -+ * @2: total size in bytes -+ * @3: total size in 512 byte sectors -+ * Description: -+ * The listed DCSSs have been verified as contiguous and successfully loaded. -+ * The displayed sizes are the sums of all DCSSs. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Device %s cannot be removed because it is not a known device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: device name -+ * Description: -+ * The DCSS device you are trying to remove is not known to the DCSS device -+ * driver. -+ * User action: -+ * List the entries under /sys/devices/dcssblk/ to see the names of the -+ * existing DCSS devices. -+ */ -+ -+/*? -+ * Text: "Device %s cannot be removed while it is in use\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: device name -+ * Description: -+ * You are trying to remove a device that is in use. -+ * User action: -+ * Make sure that all users of the device close the device before you try to -+ * remove it. -+ */ -+ -+/*? -+ * Text: "Device %s has become idle and is being saved now\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: device name -+ * Description: -+ * A save request for the DCSSs that map to a DCSS device has been pending -+ * while the device was in use. The device has become idle and all changes -+ * to the DCSSs are now saved permanently. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Writing to %s failed because it is a read-only device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: device name -+ * Description: -+ * The DCSS device is in shared access mode and cannot be written to. Depending -+ * on the type of the DCSSs that the device maps to, you might be able to -+ * change the access mode to exclusive-writable. -+ * User action: -+ * If the DCSSs of the device are of type SC, do not attempt to write to the -+ * device. If the DCSSs of the device are of type ER or SR, change the access -+ * mode to exclusive-writable before writing to the device. -+ */ -+ -+/*? -+ * Text: "The address range of DCSS %s changed while the system was suspended\n" -+ * Severity: Error -+ * Parameter: -+ * @1: device name -+ * Description: -+ * After resuming the system, the start address or end address of a DCSS does -+ * not match the address when the system was suspended. DCSSs must not be -+ * changed after the system was suspended. -+ * This error cannot be recovered. The system is stopped with a kernel panic. -+ * User action: -+ * Reboot Linux. -+ */ -+ -+/*? -+ * Text: "Suspending the system failed because DCSS device %s is writable\n" -+ * Severity: Error -+ * Parameter: -+ * @1: device name -+ * Description: -+ * A system cannot be suspended if one or more DCSSs are accessed in exclusive- -+ * writable mode. DCSS segment types EW, SW, and EN are always writable and -+ * must be removed before a system is suspended. -+ * User action: -+ * Remove all DCSSs of segment types EW, SW, and EN by writing the DCSS name to -+ * the sysfs 'remove' attribute. Set the access mode for all DCSSs of segment -+ * types SR and ER to read-only by writing 1 to the sysfs 'shared' attribute of -+ * the DCSS. Then try again to suspend the system. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/extmem -@@ -0,0 +1,290 @@ -+/*? -+ * Text: "Querying a DCSS type failed with rc=%ld\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: return code -+ * Description: -+ * The DCSS kernel interface used z/VM diagnose call X'64' to query the -+ * type of a DCSS. z/VM failed to determine the type and returned an error. -+ * User action: -+ * Look for related messages to find out which DCSS is affected. -+ * For details about the return codes see the section about DIAGNOSE Code -+ * X'64' in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "Loading DCSS %s failed with rc=%ld\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: DCSS name -+ * @2: return code -+ * Description: -+ * The DCSS kernel interface used diagnose call X'64' to load a DCSS. z/VM -+ * failed to load the DCSS and returned an error. -+ * User action: -+ * For details about the return codes see the section about DIAGNOSE Code -+ * X'64' in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "DCSS %s of range %p to %p and type %s loaded as exclusive-writable\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: DCSS name -+ * @2: starting page address -+ * @3: ending page address -+ * @4: DCSS type -+ * Description: -+ * The DCSS was loaded successfully in exclusive-writable access mode. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "DCSS %s of range %p to %p and type %s loaded in shared access mode\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: DCSS name -+ * @2: starting page address -+ * @3: ending page address -+ * @4: DCSS type -+ * Description: -+ * The DCSS was loaded successfully in shared access mode. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "DCSS %s is already in the requested access mode\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * A request to reload a DCSS with a new access mode has been rejected -+ * because the new access mode is the same as the current access mode. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "DCSS %s is in use and cannot be reloaded\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * Reloading a DCSS in a different access mode has failed because the DCSS is -+ * being used by one or more device drivers. The DCSS remains loaded with the -+ * current access mode. -+ * User action: -+ * Ensure that the DCSS is not used by any device driver then try again to -+ * load the DCSS with the new access mode. -+ */ -+ -+/*? -+ * Text: "DCSS %s overlaps with used memory resources and cannot be reloaded\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * The DCSS has been unloaded and cannot be reloaded because it overlaps with -+ * another loaded DCSS or with the memory of the z/VM guest virtual machine -+ * (guest storage). -+ * User action: -+ * Ensure that no DCSS is loaded that has overlapping memory resources -+ * with the DCSS you want to reload. If the DCSS overlaps with guest storage, -+ * use the DEF STORE CONFIG z/VM CP command to create a sufficient storage gap -+ * for the DCSS. For details, see the section about the DCSS device driver in -+ * "Device Drivers, Features, and Commands". -+ */ -+ -+/*? -+ * Text: "Reloading DCSS %s failed with rc=%ld\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: DCSS name -+ * @2: return code -+ * Description: -+ * The DCSS kernel interface used z/VM diagnose call X'64' to reload a DCSS -+ * in a different access mode. The DCSS was unloaded but z/VM failed to reload -+ * the DCSS. -+ * User action: -+ * For details about the return codes see the section about DIAGNOSE Code -+ * X'64' in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "Unloading unknown DCSS %s failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * The specified DCSS cannot be unloaded. The DCSS is known to the DCSS device -+ * driver but not to the DCSS kernel interface. This problem indicates a -+ * program error in extmem.c. -+ * User action: -+ * Report this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "Saving unknown DCSS %s failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * The specified DCSS cannot be saved. The DCSS is known to the DCSS device -+ * driver but not to the DCSS kernel interface. This problem indicates a -+ * program error in extmem.c. -+ * User action: -+ * Report this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "Saving a DCSS failed with DEFSEG response code %i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: response-code -+ * Description: -+ * The DEFSEG z/VM CP command failed to permanently save changes to a DCSS. -+ * User action: -+ * Look for related messages to find the cause of this error. See also message -+ * HCPE in the DEFSEG section of the "z/VM CP Command and -+ * Utility Reference". -+ */ -+ -+/*? -+ * Text: "Saving a DCSS failed with SAVESEG response code %i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: response-code -+ * Description: -+ * The SAVESEG z/VM CP command failed to permanently save changes to a DCSS. -+ * User action: -+ * Look for related messages to find the cause of this error. See also message -+ * HCPE in the SAVESEG section of the "z/VM CP Command and -+ * Utility Reference". -+ */ -+ -+/*? -+ * Text: "DCSS %s cannot be loaded or queried\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * You cannot load or query the specified DCSS because it either is not defined -+ * in the z/VM hypervisor, or it is a class S DCSS, or it is above 2047 MB -+ * and he Linux system is a 31-bit system. -+ * User action: -+ * Use the CP command "QUERY NSS" to find out if the DCSS is a valid -+ * DCSS that can be loaded. -+ */ -+ -+/*? -+ * Text: "DCSS %s cannot be loaded or queried without z/VM\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * A DCSS is a z/VM resource. Your Linux instance is not running as a z/VM -+ * guest operating system and, therefore, cannot load DCSSs. -+ * User action: -+ * Load DCSSs only on Linux instances that run as z/VM guest operating systems. -+ */ -+ -+/*? -+ * Text: "Loading or querying DCSS %s resulted in a hardware error\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * Either the z/VM DIAGNOSE X'64' query or load call issued for the DCSS -+ * returned with an error. -+ * User action: -+ * Look for previous extmem message to find the return code from the -+ * DIAGNOSE X'64' query or load call. For details about the return codes see -+ * the section about DIAGNOSE Code X'64' in "z/VM CP Programming Services". -+ */ -+ -+/*? -+ * Text: "DCSS %s has multiple page ranges and cannot be loaded or queried\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * You can only load or query a DCSS with multiple page ranges if: -+ * - The DCSS has 6 or fewer page ranges -+ * - The page ranges form a contiguous address space -+ * - The page ranges are of type EW or EN -+ * User action: -+ * Check the definition of the DCSS to make sure that the conditions for -+ * DCSSs with multiple page ranges are met. -+ */ -+ -+/*? -+ * Text: "%s needs used memory resources and cannot be loaded or queried\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * You cannot load or query the DCSS because it overlaps with an already -+ * loaded DCSS or with the memory of the z/VM guest virtual machine -+ * (guest storage). -+ * User action: -+ * Ensure that no DCSS is loaded that has overlapping memory resources -+ * with the DCSS you want to load or query. If the DCSS overlaps with guest -+ * storage, use the DEF STORE CONFIG z/VM CP command to create a sufficient -+ * storage gap for the DCSS. For details, see the section about the DCSS -+ * device driver in "Device Drivers, Features, and Commands". -+ */ -+ -+/*? -+ * Text: "DCSS %s is already loaded in a different access mode\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * The DCSS you are trying to load has already been loaded in a different -+ * access mode. You cannot simultaneously load the DCSS in different modes. -+ * User action: -+ * Reload the DCSS in a different mode or load it with the same mode in which -+ * it has already been loaded. -+ */ -+ -+/*? -+ * Text: "There is not enough memory to load or query DCSS %s\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * The available memory is not enough to load or query the DCSS. -+ * User action: -+ * Free some memory and repeat the failed operation. -+ */ -+ -+/*? -+ * Text: "DCSS %s overlaps with used storage and cannot be loaded\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * You cannot load the DCSS because it overlaps with an already loaded DCSS -+ * or with the memory of the z/VM guest virtual machine (guest storage). -+ * User action: -+ * Ensure that no DCSS is loaded that has overlapping memory resources -+ * with the DCSS you want to load. If the DCSS overlaps with guest storage, -+ * use the DEF STORE CONFIG z/VM CP command to create a sufficient storage gap -+ * for the DCSS. For details, see the section about the DCSS device driver in -+ * "Device Drivers, Features, and Commands". -+ */ -+ -+/*? -+ * Text: "DCSS %s exceeds the kernel mapping range (%lu) and cannot be loaded\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * @2: kernel mapping range in bytes -+ * Description: -+ * You cannot load the DCSS because it exceeds the kernel mapping range limit. -+ * User action: -+ * Ensure that the DCSS range is defined below the kernel mapping range. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/hvc_iucv -@@ -0,0 +1,122 @@ -+/*? -+ * Text: "The z/VM IUCV HVC device driver cannot be used without z/VM\n" -+ * Severity: Notice -+ * Description: -+ * The z/VM IUCV hypervisor console (HVC) device driver requires the -+ * z/VM inter-user communication vehicle (IUCV). -+ * User action: -+ * Set "hvc_iucv=" to zero in the kernel parameter line and reboot Linux. -+ */ -+ -+/*? -+ * Text: "%lu is not a valid value for the hvc_iucv= kernel parameter\n" -+ * Severity: Error -+ * Parameter: -+ * @1: hvc_iucv_devices -+ * Description: -+ * The "hvc_iucv=" kernel parameter specifies the number of z/VM IUCV -+ * hypervisor console (HVC) terminal devices. -+ * The parameter value ranges from 0 to 8. -+ * If zero is specified, the z/VM IUCV HVC device driver is disabled -+ * and no IUCV-based terminal access is available. -+ * User action: -+ * Correct the "hvc_iucv=" setting in the kernel parameter line and -+ * reboot Linux. -+ */ -+ -+/*? -+ * Text: "Creating a new HVC terminal device failed with error code=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: errno -+ * Description: -+ * The device driver initialization failed to allocate a new -+ * HVC terminal device. -+ * A possible cause of this problem is memory constraints. -+ * User action: -+ * If the error code is -12 (ENOMEM), consider assigning more memory -+ * to your z/VM guest virtual machine. -+ */ -+ -+/*? -+ * Text: "Registering HVC terminal device as Linux console failed\n" -+ * Severity: Error -+ * Description: -+ * The device driver initialization failed to set up the first HVC terminal -+ * device for use as Linux console. -+ * User action: -+ * If the error code is -12 (ENOMEM), consider assigning more memory -+ * to your z/VM guest virtual machine. -+ */ -+ -+/*? -+ * Text: "Registering IUCV handlers failed with error code=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: errno -+ * Description: -+ * The device driver initialization failed to register with z/VM IUCV to -+ * handle IUCV connections, as well as sending and receiving of IUCV messages. -+ * User action: -+ * Check for related IUCV error messages and see the errno manual page -+ * to find out what caused the problem. -+ */ -+ -+/*? -+ * Text: "Allocating memory failed with reason code=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: reason -+ * Description: -+ * The z/VM IUCV hypervisor console (HVC) device driver initialization failed, -+ * because of a general memory allocation failure. The reason code indicates -+ * the memory operation that has failed: -+ * kmem_cache (reason code=1), -+ * mempool (reason code=2), or -+ * hvc_iucv_allow= (reason code=3) -+ * User action: -+ * Consider assigning more memory to your z/VM guest virtual machine. -+ */ -+ -+/*? -+ * Text: "hvc_iucv_allow= does not specify a valid z/VM user ID list\n" -+ * Severity: Error -+ * Description: -+ * The "hvc_iucv_allow=" kernel parameter specifies a comma-separated list -+ * of z/VM user IDs that are permitted to connect to the z/VM IUCV hypervisor -+ * device driver. -+ * The z/VM user IDs in the list must not exceed eight characters and must -+ * not contain spaces. -+ * User action: -+ * Correct the "hvc_iucv_allow=" setting in the kernel parameter line and reboot -+ * Linux. -+ */ -+ -+/*? -+ * Text: "hvc_iucv_allow= specifies too many z/VM user IDs\n" -+ * Severity: Error -+ * Description: -+ * The "hvc_iucv_allow=" kernel parameter specifies a comma-separated list -+ * of z/VM user IDs that are permitted to connect to the z/VM IUCV hypervisor -+ * device driver. -+ * The number of z/VM user IDs that are specified with the "hvc_iucv_allow=" -+ * kernel parameter exceeds the maximum of 500. -+ * User action: -+ * Correct the "hvc_iucv_allow=" setting by reducing the z/VM user IDs in -+ * the list and reboot Linux. -+ */ -+ -+/*? -+ * Text: "A connection request from z/VM user ID %s was refused\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: ID -+ * Description: -+ * An IUCV connection request from another z/VM guest virtual machine has been -+ * refused. The request was from a z/VM guest virtual machine that is not -+ * listed by the "hvc_iucv_allow=" kernel parameter. -+ * User action: -+ * Check the "hvc_iucv_allow=" kernel parameter setting. -+ * Consider adding the z/VM user ID to the "hvc_iucv_allow=" list in the kernel -+ * parameter line and reboot Linux. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/hypfs -@@ -0,0 +1,56 @@ -+/*? -+ * Text: "The hardware system does not support hypfs\n" -+ * Severity: Error -+ * Description: -+ * hypfs requires DIAGNOSE Code X'204' but this diagnose code is not available -+ * on your hardware. You need more recent hardware to use hypfs. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The hardware system does not provide all functions required by hypfs\n" -+ * Severity: Error -+ * Description: -+ * hypfs requires DIAGNOSE Code X'224' but this diagnode code is not available -+ * on your hardware. You need more recent hardware to use hypfs. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Updating the hypfs tree failed\n" -+ * Severity: Error -+ * Description: -+ * There was not enough memory available to update the hypfs tree. -+ * User action: -+ * Free some memory and try again to update the hypfs tree. Consider assigning -+ * more memory to your LPAR or z/VM guest virtual machine. -+ */ -+ -+/*? -+ * Text: "%s is not a valid mount option\n" -+ * Severity: Error -+ * Parameter: -+ * @1: mount option -+ * Description: -+ * hypfs has detected mount options that are not valid. -+ * User action: -+ * See "Device Drivers Features and Commands" for information about valid -+ * mount options for hypfs. -+ */ -+ -+/*? -+ * Text: "Initialization of hypfs failed with rc=%i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: error code -+ * Description: -+ * Initialization of hypfs failed because of resource or hardware constraints. -+ * Possible reasons for this problem are insufficient free memory or missing -+ * hardware interfaces. -+ * User action: -+ * See errno.h for information about the error codes. -+ */ -+ -+/*? Text: "Hypervisor filesystem mounted\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/iucv -@@ -0,0 +1,33 @@ -+/*? -+ * Text: "Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: CPU number -+ * @2: hexadecimal error value -+ * @3: short error code explanation -+ * Description: -+ * Defining an interrupt buffer for external interrupts failed. Error -+ * value 0x03 indicates a problem with the z/VM directory entry of the -+ * z/VM guest virtual machine. This problem can also be caused by a -+ * program error. -+ * User action: -+ * If the error value is 0x03, examine the z/VM directory entry of your -+ * z/VM guest virtual machine. If the directory entry is correct or if the -+ * error value is not 0x03, report this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "Suspending Linux did not completely close all IUCV connections\n" -+ * Severity: Warning -+ * Description: -+ * When resuming a suspended Linux instance, the IUCV base code found -+ * data structures from one or more IUCV connections that existed before the -+ * Linux instance was suspended. Modules that use IUCV connections must close -+ * these connections when a Linux instance is suspended. This problem -+ * indicates an error in a program that used an IUCV connection. -+ * User action: -+ * Report this problem to your support organization. -+ */ -+ -+/*? Text: "iucv_external_interrupt: out of memory\n" */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/lcs -@@ -0,0 +1,161 @@ -+/*? -+ * Text: "%s: Allocating a socket buffer to interface %s failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * @2: network interface -+ * Description: -+ * LAN channel station (LCS) devices require a socket buffer (SKB) structure -+ * for storing incoming data. The LCS device driver failed to allocate an SKB -+ * structure to the LCS device. A likely cause of this problem is memory -+ * constraints. -+ * User action: -+ * Free some memory and repeat the failed operation. -+ */ -+ -+/*? -+ * Text: "%s: Shutting down the LCS device failed\n " -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * Description: -+ * A request to shut down a LAN channel station (LCS) device resulted in an -+ * error. The error is logged in the LCS trace at trace level 4. -+ * User action: -+ * Try again to shut down the device. If the error persists, see the LCS trace -+ * to find out what causes the error. -+ */ -+ -+/*? -+ * Text: "%s: Detecting a network adapter for LCS devices failed with rc=%d (0x%x)\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * @2: lcs_detect return code in decimal notation -+ * @3: lcs_detect return code in hexadecimal notation -+ * Description: -+ * The LCS device driver could not initialize a network adapter. -+ * User action: -+ * Note the return codes from the error message and contact IBM support. -+ */ -+ -+/*? -+ * Text: "%s: A recovery process has been started for the LCS device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * Description: -+ * The LAN channel station (LCS) device is shut down and restarted. The recovery -+ * process might have been initiated by a user or started automatically as a -+ * response to a device problem. -+ * User action: -+ * Wait until a message indicates the completion of the recovery process. -+ */ -+ -+/*? -+ * Text: "%s: An I/O-error occurred on the LCS device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * Description: -+ * The LAN channel station (LCS) device reported a problem that can be recovered -+ * by the LCS device driver. Repeated occurrences of this problem indicate a -+ * malfunctioning device. -+ * User action: -+ * If this problem occurs frequently, initiate a recovery process for the -+ * device, for example, by writing '1' to the 'recover' sysfs attribute of the -+ * device. -+ */ -+ -+/*? -+ * Text: "%s: A command timed out on the LCS device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * Description: -+ * The LAN channel station (LCS) device reported a problem that can be recovered -+ * by the LCS device driver. Repeated occurrences of this problem indicate a -+ * malfunctioning device. -+ * User action: -+ * If this problem occurs frequently, initiate a recovery process for the -+ * device, for example, by writing '1' to the 'recover' sysfs attribute of the -+ * device. -+ */ -+ -+/*? -+ * Text: "%s: An error occurred on the LCS device, rc=%ld\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * @2: return code -+ * Description: -+ * The LAN channel station (LCS) device reported a problem that can be recovered -+ * by the LCS device driver. Repeated occurrences of this problem indicate a -+ * malfunctioning device. -+ * User action: -+ * If this problem occurs frequently, initiate a recovery process for the -+ * device, for example, by writing '1' to the 'recover' sysfs attribute of the -+ * device. -+ */ -+ -+/*? -+ * Text: "%s: The LCS device stopped because of an error, dstat=0x%X, cstat=0x%X \n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * @2: device status -+ * @3: subchannel status -+ * Description: -+ * The LAN channel station (LCS) device reported an error. The LCS device driver -+ * might start a device recovery process. -+ * User action: -+ * If the device driver does not start a recovery process, initiate a recovery -+ * process, for example, by writing '1' to the 'recover' sysfs attribute of the -+ * device. If the problem persists, note the status information provided with -+ * the message and contact IBM support. -+ */ -+ -+/*? -+ * Text: "%s: Starting an LCS device resulted in an error, rc=%d!\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * @2: ccw_device_start return code in decimal notation -+ * Description: -+ * The LAN channel station (LCS) device driver failed to initialize an LCS -+ * device. The device is not operational. -+ * User action: -+ * Initiate a recovery process, for example, by writing '1' to the 'recover' -+ * sysfs attribute of the device. If the problem persists, contact IBM support. -+ */ -+ -+/*? -+ * Text: "%s: Sending data from the LCS device to the LAN failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the LCS device -+ * @2: ccw_device_resume return code in decimal notation -+ * Description: -+ * The LAN channel station (LCS) device driver could not send data to the LAN -+ * using the LCS device. This might be a temporary problem. Operations continue -+ * on the LCS device. -+ * User action: -+ * If this problem occurs frequently, initiate a recovery process, for example, -+ * by writing '1' to the 'recover' sysfs attribute of the device. If the -+ * problem persists, contact IBM support. -+ */ -+ -+/*? Text: "Query IPAssist failed. Assuming unsupported!\n" */ -+/*? Text: "Stoplan for %s initiated by LGW.\n" */ -+/*? Text: "Not enough memory to add new multicast entry!\n" */ -+/*? Text: "Not enough memory for debug facility.\n" */ -+/*? Text: "Adding multicast address failed. Table possibly full!\n" */ -+/*? Text: "Error in opening device!\n" */ -+/*? Text: "LCS device %s %s IPv6 support\n" */ -+/*? Text: "Device %s successfully recovered!\n" */ -+/*? Text: "LCS device %s %s Multicast support\n" */ -+/*? Text: " Initialization failed\n" */ -+/*? Text: "Loading %s\n" */ -+/*? Text: "Initialization failed\n" */ -+/*? Text: "Terminating lcs module.\n" */ -+/*? Text: "Device %s could not be recovered!\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/monreader -@@ -0,0 +1,127 @@ -+/*? -+ * Text: "Reading monitor data failed with rc=%i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: return code -+ * Description: -+ * The z/VM *MONITOR record device driver failed to read monitor data -+ * because the IUCV REPLY function failed. The read function against -+ * the monitor record device returns EIO. All monitor data that has been read -+ * since the last read with 0 size is incorrect. -+ * User action: -+ * Disregard all monitor data that has been read since the last read with -+ * 0 size. If the device driver has been compiled as a separate module, unload -+ * and reload the monreader module. If the device driver has been compiled -+ * into the kernel, reboot Linux. For more information about possible causes -+ * of the error see the IUCV section in "z/VM CP Programming Services" and -+ * the *MONITOR section in "z/VM Performance". -+ */ -+ -+/*? -+ * Text: "z/VM *MONITOR system service disconnected with rc=%i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: IPUSER SEVER return code -+ * Description: -+ * The z/VM *MONITOR record device driver receives monitor records through -+ * an IUCV connection to the z/VM *MONITOR system service. This connection -+ * has been severed and the read function of the z/VM *MONITOR device driver -+ * returns EIO. All data received since the last read with 0 size is incorrect. -+ * User action: -+ * Disregard all monitor data read since the last read with 0 size. Close and -+ * reopen the monitor record device. For information about the IPUSER SEVER -+ * return codes see "z/VM Performance". -+ */ -+ -+/*? -+ * Text: "The read queue for monitor data is full\n" -+ * Severity: Warning -+ * Description: -+ * The read function of the z/VM *MONITOR device driver returns EOVERFLOW -+ * because not enough monitor data has been read since the monitor device -+ * has been opened. Monitor data already read are valid and subsequent reads -+ * return valid data but some intermediate data might be missing. -+ * User action: -+ * Be aware that monitor data might be missing. Assure that you regularly -+ * read monitor data after opening the monitor record device. -+ */ -+ -+/*? -+ * Text: "Connecting to the z/VM *MONITOR system service failed with rc=%i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: IUCV CONNECT return code -+ * Description: -+ * The z/VM *MONITOR record device driver receives monitor records through -+ * an IUCV connection to the z/VM *MONITOR system service. This connection -+ * could not be established when the monitor record device was opened. If -+ * the return code is 15, your z/VM guest virtual machine is not authorized -+ * to connect to the *MONITOR system service. -+ * User action: -+ * If the return code is 15, ensure that the IUCV *MONITOR statement is -+ * included in the z/VM directory entry for your z/VM guest virtual machine. -+ * For other IUCV CONNECT return codes see the IUCV section in "CP Programming -+ * Services" and the *MONITOR section in "z/VM Performance". -+ */ -+ -+/*? -+ * Text: "Disconnecting the z/VM *MONITOR system service failed with rc=%i\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: IUCV SEVER return code -+ * Description: -+ * The z/VM *MONITOR record device driver receives monitor data through an -+ * IUCV connection to the z/VM *MONITOR system service. This connection -+ * could not be closed when the monitor record device was closed. You might -+ * not be able to resume monitoring. -+ * User action: -+ * No immediate action is necessary. If you cannot open the monitor record -+ * device in the future, reboot Linux. For information about the IUCV SEVER -+ * return codes see the IUCV section in "CP Programming Services" and the -+ * *MONITOR section in "z/VM Performance". -+ */ -+ -+/*? -+ * Text: "The z/VM *MONITOR record device driver cannot be loaded without z/VM\n" -+ * Severity: Error -+ * Description: -+ * The z/VM *MONITOR record device driver uses z/VM system services to provide -+ * monitor data about z/VM guest operating systems to applications on Linux. -+ * On Linux instances that run in environments other than the z/VM hypervisor, -+ * the z/VM *MONITOR record device driver does not provide any useful -+ * function and the corresponding monreader module cannot be loaded. -+ * User action: -+ * Load the z/VM *MONITOR record device driver only on Linux instances that run -+ * as guest operating systems of the z/VM hypervisor. If the z/VM *MONITOR -+ * record device driver has been compiled into the kernel, ignore this message. -+ */ -+ -+/*? -+ * Text: "The z/VM *MONITOR record device driver failed to register with IUCV\n" -+ * Severity: Error -+ * Description: -+ * The z/VM *MONITOR record device driver receives monitor data through an IUCV -+ * connection and needs to register with the IUCV device driver. This -+ * registration failed and the z/VM *MONITOR record device driver was not -+ * loaded. A possible cause of this problem is insufficient memory. -+ * User action: -+ * Free some memory and try again to load the module. If the z/VM *MONITOR -+ * record device driver has been compiled into the kernel, you might have to -+ * configure more memory and reboot Linux. If you do not want to read monitor -+ * data, ignore this message. -+ */ -+ -+/*? -+ * Text: "The specified *MONITOR DCSS %s does not have the required type SC\n" -+ * Severity: Error -+ * Parameter: -+ * @1: DCSS name -+ * Description: -+ * The DCSS that was specified with the monreader.mondcss kernel parameter or -+ * with the mondcss module parameter cannot be a *MONITOR DCSS because it is -+ * not of type SC. -+ * User action: -+ * Confirm that you are using the name of the DCSS that has been configured as -+ * the *MONITOR DCSS on the z/VM hypervisor. If the default name, MONDCSS, is -+ * used, omit the monreader.mondcss or mondcss parameter. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/monwriter -@@ -0,0 +1,16 @@ -+/*? -+ * Text: "Writing monitor data failed with rc=%i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: return code -+ * Description: -+ * The monitor stream application device driver used the z/VM diagnose call -+ * DIAG X'DC' to start writing monitor data. z/VM returned an error and the -+ * monitor data cannot be written. If the return code is 5, your z/VM guest -+ * virtual machine is not authorized to write monitor data. -+ * User action: -+ * If the return code is 5, ensure that your z/VM guest virtual machine's -+ * entry in the z/VM directory includes the OPTION APPLMON statement. -+ * For other return codes see the section about DIAGNOSE Code X'DC' -+ * in "z/VM CP Programming Services". -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/netiucv -@@ -0,0 +1,139 @@ -+/*? -+ * Text: "%s: The peer interface of the IUCV device has closed the connection\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * Description: -+ * The peer interface on the remote z/VM guest virtual machine has closed the -+ * connection. Do not expect further packets on this interface. Any packets -+ * you send to this interface will be dropped. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The IUCV device failed to connect to z/VM guest %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * @2: z/VM user ID -+ * Description: -+ * The connection cannot be established because the z/VM guest virtual -+ * machine with the peer interface is not running. -+ * User action: -+ * Ensure that the z/VM guest virtual machine with the peer interface is -+ * running; then try again to establish the connection. -+ */ -+ -+/*? -+ * Text: "%s: The IUCV device failed to connect to the peer on z/VM guest %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * @2: z/VM user ID -+ * Description: -+ * The connection cannot be established because the z/VM guest virtual machine -+ * with the peer interface is not configured for IUCV connections. -+ * User action: -+ * Configure the z/VM guest virtual machine with the peer interface for IUCV -+ * connections; then try again to establish the connection. -+ */ -+ -+/*? -+ * Text: "%s: Connecting the IUCV device would exceed the maximum number of IUCV connections\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * Description: -+ * The connection cannot be established because the maximum number of IUCV -+ * connections has been reached on the local z/VM guest virtual machine. -+ * User action: -+ * Close some of the established IUCV connections on the local z/VM guest -+ * virtual machine; then try again to establish the connection. -+ */ -+ -+/*? -+ * Text: "%s: z/VM guest %s has too many IUCV connections to connect with the IUCV device\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * @2: remote z/VM user ID -+ * Description: -+ * Connecting to the remote z/VM guest virtual machine failed because the -+ * maximum number of IUCV connections for the remote z/VM guest virtual -+ * machine has been reached. -+ * User action: -+ * Close some of the established IUCV connections on the remote z/VM guest -+ * virtual machine; then try again to establish the connection. -+ */ -+ -+/*? -+ * Text: "%s: The IUCV device cannot connect to a z/VM guest with no IUCV authorization\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * Description: -+ * Because the remote z/VM guest virtual machine is not authorized for IUCV -+ * connections, the connection cannot be established. -+ * User action: -+ * Add the statements 'IUCV ALLOW' and 'IUCV ANY' to the z/VM directory -+ * entry of the remote z/VM guest virtual machine; then try again to -+ * establish the connection. See "z/VM CP Planning and Administration" -+ * for details about the IUCV statements. -+ */ -+ -+/*? -+ * Text: "%s: Connecting the IUCV device failed with error %d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * @2: error code -+ * Description: -+ * The connection cannot be established because of an IUCV CONNECT error. -+ * User action: -+ * Report this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The IUCV device has been connected successfully to %s\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * @2: remote z/VM user ID -+ * Description: -+ * The connection has been established and the interface is ready to -+ * transmit communication packages. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The IUCV interface to %s has been established successfully\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * @2: remote z/VM user ID -+ * Description: -+ * The IUCV interface to the remote z/VM guest virtual machine has been -+ * established and can be activated with "ifconfig up" or an equivalent -+ * command. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The IUCV device is connected to %s and cannot be removed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the IUCV device -+ * @2: remote z/VM user ID -+ * Description: -+ * Removing a connection failed because the interface is active with a peer -+ * interface on a remote z/VM guest virtual machine. -+ * User action: -+ * Deactivate the interface with "ifconfig down" or an equivalent command; -+ * then try again to remove the interface. -+ */ -+ -+/*? Text: "driver unloaded\n" */ -+/*? Text: "driver initialized\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/qeth -@@ -0,0 +1,606 @@ -+/*? -+ * Text: "%s: The LAN is offline\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * A start LAN command was sent by the qeth device driver but the physical or -+ * virtual adapter has not started the LAN. The LAN might take a few seconds -+ * to become available. -+ * User action: -+ * Check the status of the qeth device, for example, with the lsqeth command. -+ * If the device does not become operational within a few seconds, initiate a -+ * recovery process, for example, by writing '1' to the 'recover' sysfs -+ * attribute of the device. -+ */ -+ -+/*? -+ * Text: "%s: The user canceled setting the qeth device offline\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * A user initiated setting the device offline but subsequently canceled the -+ * operation, for example, with CTRL+C. -+ * User action: -+ * Check the status of the qeth device, for example, with the lsqeth command. -+ * If necessary, repeat the operation to set the device offline. -+ */ -+ -+/*? -+ * Text: "%s: A recovery process has been started for the device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * A recovery process was started either by the qeth device driver or through -+ * a user command. -+ * User action: -+ * Wait until a message indicates the completion of the recovery process. -+ */ -+ -+/*? -+ * Text: "%s: The qeth device driver failed to recover an error on the device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The qeth device driver performed an automatic recovery operation to recover -+ * an error on a qeth device. The recovery operation failed. -+ * User action: -+ * Try the following actions in the given order: i) Check the status of the -+ * qeth device, for example, with the lsqeth command. ii) Initiate a recovery -+ * process by writing '1' to the 'recover' sysfs attribute of the device. -+ * iii) Ungroup and regroup the subchannel triplet of the device. vi) Reboot -+ * Linux. v) If the problem persists, gather Linux debug data and report the -+ * problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The link for interface %s on CHPID 0x%X failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * @3: CHPID -+ * Description: -+ * A network link failed. A possible reason for this error is that a physical -+ * network cable has been disconnected. -+ * User action: -+ * Ensure that the network cable on the adapter hardware is connected properly. -+ * If the connection is to a guest LAN, ensure that the device is still coupled -+ * to the guest LAN. -+ */ -+ -+/*? -+ * Text: "%s: The link for %s on CHPID 0x%X has been restored\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * @3: CHPID -+ * Description: -+ * A failed network link has been re-established. A device recovery is in -+ * progress. -+ * User action: -+ * Wait until a message indicates the completion of the recovery process. -+ */ -+ -+/*? -+ * Text: "%s: A hardware operation timed out on the device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * A hardware operation timed out on the qeth device. -+ * User action: -+ * Check the status of the qeth device, for example, with the lsqeth command. -+ * If the device is not operational, initiate a recovery process, for example, -+ * by writing '1' to the 'recover' sysfs attribute of the device. -+ */ -+ -+/*? -+ * Text: "%s: The adapter hardware is of an unknown type\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The qeth device driver does not recognize the adapter hardware. The cause -+ * of this problem could be a hardware error or a Linux level that does not -+ * support your adapter hardware. -+ * User action: -+ * i) Investigate if your adapter hardware is supported by your Linux level. -+ * Consider using hardware that is supported by your Linux level or upgrading -+ * to a Linux level that supports your hardware. ii) Install the latest -+ * firmware on your adapter hardware. iii) If the problem persists and is not -+ * caused by a version mismatch, contact IBM support. -+ */ -+ -+/*? -+ * Text: "%s: The adapter is used exclusively by another host\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The qeth adapter is exclusively used by another host. -+ * User action: -+ * Use another qeth adapter or configure this one not exclusively to a -+ * particular host. -+ */ -+ -+/*? -+ * Text: "%s: QDIO reported an error, rc=%i\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: return code -+ * Description: -+ * The QDIO subsystem reported an error. -+ * User action: -+ * Check for related QDIO errors. Check the status of the qeth device, for -+ * example, with the lsqeth command. If the device is not operational, initiate -+ * a recovery process, for example, by writing '1' to the 'recover' sysfs -+ * attribute of the device. -+ */ -+ -+/*? -+ * Text: "%s: There is no kernel module to support discipline %d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: discipline -+ * Description: -+ * The qeth device driver or a user command requested a kernel module for a -+ * particular qeth discipline. Either the discipline is not supported by the -+ * qeth device driver or the requested module is not available to your Linux -+ * system. -+ * User action: -+ * Check if the requested discipline module has been compiled into the kernel -+ * or is present in /lib/modules//kernel/drivers/s390/net. -+ */ -+ -+/*? -+ * Text: "Initializing the qeth device driver failed\n" -+ * Severity: Error -+ * Parameter: -+ * Description: -+ * The base module of the qeth device driver could not be initialized. -+ * User action: -+ * See errno.h to determine the reason for the error. -+ * i) Reboot Linux. ii) If the problem persists, gather Linux debug data and -+ * report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Registering IP address %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: IP address -+ * Description: -+ * An IP address could not be registered with the network adapter. -+ * User action: -+ * Check if another operating system instance has already registered the -+ * IP address with the same network adapter or at the same logical IP subnet. -+ */ -+ -+/*? -+ * Text: "%s: Reading the adapter MAC address failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The qeth device driver could not read the MAC address from the network -+ * adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Starting ARP processing support for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not start ARP support on the network adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Starting IP fragmentation support for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not start IP fragmentation support on the -+ * network adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Starting proxy ARP support for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not start proxy ARP support on the network -+ * adapter. -+ * User action: -+ * None if you do not require proxy ARP support. If you need proxy ARP, -+ * ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Starting VLAN support for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not start VLAN support on the network adapter. -+ * User action: -+ * None if you do not require VLAN support. If you need VLAN support, -+ * ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Starting multicast support for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not start multicast support on the network -+ * adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Activating IPv6 support for %s failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not activate IPv6 support on the network -+ * adapter. -+ * User action: -+ * None if you do not require IPv6 communication. If you need IPv6 support, -+ * ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Enabling the passthrough mode for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not enable the passthrough mode on the -+ * network adapter. The passthrough mode is required for all network traffic -+ * other than IPv4. In particular, the passthrough mode is required for IPv6 -+ * traffic. -+ * User action: -+ * None if all you want to support is IPv4 communication. If you want to support -+ * IPv6 or other network traffic apart from IPv4, ungroup and regroup the -+ * subchannel triplet of the device. If this does not resolve the problem, -+ * reboot Linux. If the problem persists, gather Linux debug data and report -+ * the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Enabling broadcast filtering for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not enable broadcast filtering on the network -+ * adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Setting up broadcast filtering for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not set up broadcast filtering on the network -+ * adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Setting up broadcast echo filtering for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not set up broadcast echo filtering on the -+ * network adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Starting HW checksumming for %s failed, using SW checksumming\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The network adapter supports hardware checksumming for incoming IP packages -+ * but the qeth device driver could not start hardware checksumming on the -+ * adapter. The qeth device driver continues to use software checksumming for -+ * incoming IP packages. -+ * User action: -+ * None if you do not require hardware checksumming for incoming network -+ * traffic. If you want to enable hardware checksumming, ungroup and regroup -+ * the subchannel triplet of the device. If this does not resolve the problem, -+ * reboot Linux. If the problem persists, gather Linux debug data and report -+ * the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Enabling HW checksumming for %s failed, using SW checksumming\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The network adapter supports hardware checksumming for incoming IP packages -+ * but the qeth device driver could not enable hardware checksumming on the -+ * adapter. The qeth device driver continues to use software checksumming for -+ * incoming IP packages. -+ * User action: -+ * None if you do not require hardware checksumming for incoming network -+ * traffic. If you want to enable hardware checksumming, ungroup and regroup -+ * the subchannel triplet of the device. If this does not resolve the problem, -+ * reboot Linux. If the problem persists, gather Linux debug data and report -+ * the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Starting outbound TCP segmentation offload for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The network adapter supports TCP segmentation offload, but the qeth device -+ * driver could not start this support on the adapter. -+ * User action: -+ * None if you do not require TCP segmentation offload. If you want to -+ * enable TCP segmentation offload, ungroup and regroup the subchannel triplet -+ * of the device. If this does not resolve the problem, reboot Linux. If the -+ * problem persists, gather Linux debug data and report the problem to your -+ * support organization. -+ */ -+ -+/*? -+ * Text: "%s: The network adapter failed to generate a unique ID\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * In IBM mainframe environments, network interfaces are not identified by -+ * a specific MAC address. Therefore, the network adapters provide the network -+ * interfaces with unique IDs to be used in their IPv6 link local addresses. -+ * Without such a unique ID, duplicate addresses might be assigned in other -+ * LPARs. -+ * User action: -+ * Install the latest firmware on the adapter hardware. Manually, configure -+ * an IPv6 link local address for this device. -+ */ -+ -+/*? -+ * Text: "There is no IPv6 support for the layer 3 discipline\n" -+ * Severity: Warning -+ * Description: -+ * If you want to use IPv6 with the layer 3 discipline, you need a Linux kernel -+ * with IPv6 support. Because your Linux kernel has not been compiled with -+ * IPv6 support, you cannot use IPv6 with the layer 3 discipline, even if your -+ * adapter supports IPv6. -+ * User action: -+ * Use a Linux kernel that has been complied to include IPv6 support if you -+ * want to use IPv6 with layer 3 qeth devices. -+ */ -+ -+/*? -+ * Text: "%s: The qeth device is not configured for the OSI layer required by z/VM\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * A qeth device that connects to a virtual network on z/VM must be configured for the -+ * same Open Systems Interconnection (OSI) layer as the virtual network. An ETHERNET -+ * guest LAN or VSWITCH uses the data link layer (layer 2) while an IP guest LAN -+ * or VSWITCH uses the network layer (layer 3). -+ * User action: -+ * If you are connecting to an ETHERNET guest LAN or VSWITCH, set the layer2 sysfs -+ * attribute of the qeth device to 1. If you are connecting to an IP guest LAN or -+ * VSWITCH, set the layer2 sysfs attribute of the qeth device to 0. -+ */ -+ -+/*? -+ * Text: "%s: Starting source MAC-address support for %s failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: network interface name -+ * Description: -+ * The qeth device driver could not enable source MAC-address on the network -+ * adapter. -+ * User action: -+ * Ungroup and regroup the subchannel triplet of the device. If this does not -+ * resolve the problem, reboot Linux. If the problem persists, gather Linux -+ * debug data and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x already exists\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: first token of the MAC-address -+ * @3: second token of the MAC-address -+ * @4: third token of the MAC-address -+ * @5: fourth token of the MAC-address -+ * @6: fifth token of the MAC-address -+ * @7: sixth token of the MAC-address -+ * Description: -+ * Setting the MAC address for the qeth device fails, because this -+ * MAC address is already defined on the OSA CHPID. -+ * User action: -+ * Use a different MAC address for this qeth device. -+ */ -+ -+/*? -+ * Text: "%s: MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x is not authorized\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * @2: first token of the MAC-address -+ * @3: second token of the MAC-address -+ * @4: third token of the MAC-address -+ * @5: fourth token of the MAC-address -+ * @6: fifth token of the MAC-address -+ * @7: sixth token of the MAC-address -+ * Description: -+ * This qeth device is a virtual network interface card (NIC), to which z/VM -+ * has already assigned a MAC address. z/VM MAC address verification does -+ * not allow you to change this predefined address. -+ * User action: -+ * None; use the MAC address that has been assigned by z/VM. -+ */ -+ -+/*? -+ * Text: "%s: The HiperSockets network traffic analyzer is activated\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'. -+ * The corresponding HiperSockets interface has been switched into promiscuous mode. -+ * As a result, the HiperSockets network traffic analyzer is started on the device. -+ * User action: -+ * None. -+ */ -+ -+ /*? -+ * Text: "%s: The HiperSockets network traffic analyzer is deactivated\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'. -+ * Promiscuous mode has been switched off for the corresponding HiperSockets interface -+ * As a result, the HiperSockets network traffic analyzer is stopped on the device. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The device is not authorized to run as a HiperSockets network traffic analyzer\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'. -+ * The corresponding HiperSockets interface is switched into promiscuous mode -+ * but the network traffic analyzer (NTA) rules configured at the Support Element (SE) -+ * do not allow tracing. Possible reasons are: -+ * - Tracing is not authorized for all HiperSockets channels in the mainframe system -+ * - Tracing is not authorized for this HiperSockets channel -+ * - LPAR is not authorized to enable an NTA -+ * User action: -+ * Configure appropriate HiperSockets NTA rules at the SE. -+ */ -+ -+/*? -+ * Text: "%s: A HiperSockets network traffic analyzer is already active in the HiperSockets LAN\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the qeth device -+ * Description: -+ * The sysfs 'sniffer' attribute of the HiperSockets device has the value '1'. -+ * The HiperSockets interface is switched into promiscuous mode but another -+ * HiperSockets device on the same HiperSockets channel is already running as -+ * a network traffic analyzer. -+ * A HiperSockets channel can only have one active network traffic analyzer. -+ * User action: -+ * Do not configure multiple HiperSockets devices in the same HiperSockets channel as -+ * tracing devices. -+ */ -+ -+ -+/*? Text: "core functions removed\n" */ -+/*? Text: "%s: Device is a%s card%s%s%s\nwith link type %s.\n" */ -+/*? Text: "%s: Device is a%s card%s%s%s\nwith link type %s (no portname needed by interface).\n" */ -+/*? Text: "%s: Device is a%s card%s%s%s\nwith link type %s (portname: %s)\n" */ -+/*? Text: "%s: issue_next_read failed: no iob available!\n" */ -+/*? Text: "%s: Priority Queueing not supported\n" */ -+/*? Text: "%s: sense data available. cstat 0x%X dstat 0x%X\n" */ -+/*? Text: "loading core functions\n" */ -+/*? Text: "%s: MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x successfully registered on device %s\n" */ -+/*? Text: "%s: Device successfully recovered!\n" */ -+/*? Text: "register layer 2 discipline\n" */ -+/*? Text: "unregister layer 2 discipline\n" */ -+/*? Text: "%s: Hardware IP fragmentation not supported on %s\n" */ -+/*? Text: "%s: IPv6 not supported on %s\n" */ -+/*? Text: "%s: VLAN not supported on %s\n" */ -+/*? Text: "%s: Inbound source MAC-address not supported on %s\n" */ -+/*? Text: "%s: IPV6 enabled\n" */ -+/*? Text: "%s: ARP processing not supported on %s!\n" */ -+/*? Text: "%s: Hardware IP fragmentation enabled \n" */ -+/*? Text: "%s: set adapter parameters not supported.\n" */ -+/*? Text: "%s: VLAN enabled\n" */ -+/*? Text: "register layer 3 discipline\n" */ -+/*? Text: "%s: Outbound TSO enabled\n" */ -+/*? Text: "%s: Broadcast not supported on %s\n" */ -+/*? Text: "%s: Outbound TSO not supported on %s\n" */ -+/*? Text: "%s: Inbound HW Checksumming not supported on %s,\ncontinuing using Inbound SW Checksumming\n" */ -+/*? Text: "%s: Using no checksumming on %s.\n" */ -+/*? Text: "%s: Broadcast enabled\n" */ -+/*? Text: "%s: Multicast not supported on %s\n" */ -+/*? Text: "%s: Using SW checksumming on %s.\n" */ -+/*? Text: "%s: HW Checksumming (inbound) enabled\n" */ -+/*? Text: "unregister layer 3 discipline\n" */ -+/*? Text: "%s: Multicast enabled\n" */ -+/*? Text: "%s: QDIO data connection isolation is deactivated\n" */ -+/*? Text: "%s: QDIO data connection isolation is activated\n" */ -+/*? Text: "%s: Adapter does not support QDIO data connection isolation\n" */ -+/*? Text: "%s: Adapter is dedicated. QDIO data connection isolation not supported\n" */ -+/*? Text: "%s: TSO does not permit QDIO data connection isolation\n" */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/s390dbf -@@ -0,0 +1,83 @@ -+/*? -+ * Text: "Root becomes the owner of all s390dbf files in sysfs\n" -+ * Severity: Warning -+ * Description: -+ * The S/390 debug feature you are using only supports uid/gid = 0. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Registering debug feature %s failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: feature name -+ * Description: -+ * The initialization of an S/390 debug feature failed. A likely cause of this -+ * problem is memory constraints. The system keeps running, but the debug -+ * data for this feature will not be available in sysfs. -+ * User action: -+ * Consider assigning more memory to your LPAR or z/VM guest virtual machine. -+ */ -+ -+/*? -+ * Text: "Registering view %s/%s would exceed the maximum number of views %i\n" -+ * Severity: Error -+ * Parameter: -+ * @1: feature name -+ * @2: view name -+ * @3: maximum -+ * Description: -+ * The maximum number of allowed debug feature views has been reached. The -+ * view has not been registered. The system keeps running but the new view -+ * will not be available in sysfs. This is a program error. -+ * User action: -+ * Report this problem to your support partner. -+ */ -+ -+/*? -+ * Text: "%s is not a valid level for a debug feature\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: level -+ * Description: -+ * Setting a new level for a debug feature by using the 'level' sysfs attribute -+ * failed. Valid levels are the minus sign (-) and the integers in the -+ * range 0 to 6. The minus sign switches off the feature. The numbers switch -+ * the feature on, where higher numbers produce more debug output. -+ * User action: -+ * Write a valid value to the 'level' sysfs attribute. -+ */ -+ -+/*? -+ * Text: "Flushing debug data failed because %c is not a valid area\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: debug area number -+ * Description: -+ * Flushing a debug area by using the 'flush' sysfs attribute failed. Valid -+ * values are the minus sign (-) for flushing all areas, or the number of the -+ * respective area for flushing a single area. -+ * User action: -+ * Write a valid area number or the minus sign (-) to the 'flush' sysfs -+ * attribute. -+ */ -+ -+/*? -+ * Text: "Allocating memory for %i pages failed\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: number of pages -+ * Description: -+ * Setting the debug feature size by using the 'page' sysfs attribute failed. -+ * Linux did not have enough memory for expanding the debug feature to the -+ * requested size. -+ * User action: -+ * Use a smaller number of pages for the debug feature or allocate more -+ * memory to your LPAR or z/VM guest virtual machine. -+ */ -+ -+/*? Text: "%s: set new size (%i pages)\n" */ -+/*? Text: "%s: switched off\n" */ -+/*? Text: "%s: level %i is out of range (%i - %i)\n" */ -+/*? Text: "Registering view %s/%s failed due to out of memory\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/sclp_cmd -@@ -0,0 +1,16 @@ -+/*? Text: "sync request failed (cmd=0x%08x, status=0x%02x)\n" */ -+/*? Text: "readcpuinfo failed (response=0x%04x)\n" */ -+/*? Text: "configure cpu failed (cmd=0x%08x, response=0x%04x)\n" */ -+/*? Text: "configure channel-path failed (cmd=0x%08x, response=0x%04x)\n" */ -+/*? Text: "read channel-path info failed (response=0x%04x)\n" */ -+/*? Text: "assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n" */ -+ -+/*? -+ * Text: "Memory hotplug state changed, suspend refused.\n" -+ * Severity: Error -+ * Description: -+ * Suspend is refused after a memory hotplug operation was performed. -+ * User action: -+ * The system needs to be restarted and no memory hotplug operation must be -+ * performed in order to allow suspend. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/sclp_config -@@ -0,0 +1,3 @@ -+/*? Text: "cpu capability changed.\n" */ -+/*? Text: "no configuration management.\n" */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/sclp_cpi -@@ -0,0 +1,2 @@ -+/*? Text: "request failed (status=0x%02x)\n" */ -+/*? Text: "request failed with response code 0x%x\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/sclp_sdias -@@ -0,0 +1,4 @@ -+/*? Text: "sclp_send failed for get_nr_blocks\n" */ -+/*? Text: "SCLP error: %x\n" */ -+/*? Text: "sclp_send failed: %x\n" */ -+/*? Text: "Error from SCLP while copying hsa. Event status = %x\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/setup -@@ -0,0 +1,181 @@ -+/*? -+ * Text: "Execute protection active, mvcos available\n" -+ * Severity: Informational -+ * Description: -+ * The kernel parameter 'noexec' has been specified. The kernel will -+ * honor the execute bit of mappings and will use the mvcos instruction -+ * to copy between the user and kernel address space. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Execute protection active, mvcos not available\n" -+ * Severity: Informational -+ * Description: -+ * The kernel parameter 'noexec' has been specified. The kernel will -+ * honor the execute bit of mappings. The mvcos instruction is not -+ * available and the kernel will use the slower page table walk method -+ * to copy between the user and kernel address space. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Address spaces switched, mvcos available\n" -+ * Severity: Informational -+ * Description: -+ * The kernel parameter 'switch_amode' has been specified. The kernel -+ * will use the primary address space for user space processes and the -+ * home address space for the kernel. The mvcos instruction is used to -+ * copy between the user and kernel address space. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Address spaces switched, mvcos not available\n" -+ * Severity: Informational -+ * Description: -+ * The kernel parameter 'switch_amode' has been specified. The kernel -+ * will use the primary address space for user space processes and the -+ * home address space for the kernel. The mvcos instruction is not -+ * available and the kernel will use the slower page table walk method -+ * to copy between the user and kernel address space. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "initrd extends beyond end of memory (0x%08lx > 0x%08lx) disabling initrd\n" -+ * Severity: Error -+ * Parameter: -+ * @1: start address of the initial RAM disk -+ * @2: memory end address -+ * Description: -+ * The load address and the size of the initial RAM disk result in an end -+ * address of the initial RAM disk that is beyond the end of the system -+ * memory. -+ * User action: -+ * Lower the load address of the initial RAM disk, reduce the size of the -+ * initial RAM disk, or increase the size if the system memory to make the -+ * initial RAM disk fit into the memory. -+ */ -+ -+/*? -+ * Text: "Moving initrd (0x%08lx -> 0x%08lx, size: %ld)\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: old start address of the initial RAM disk -+ * @2: new start address of the initial RAM disk -+ * @3: size of the initial RAM disk -+ * Description: -+ * The location of the initial RAM disk conflicted with the boot memory bitmap. -+ * To resolve the conflict the initial RAM disk has been moved to a new -+ * location. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Linux is running as a z/VM guest operating system in 31-bit mode\n" -+ * Severity: Informational -+ * Description: -+ * The 31-bit Linux kernel detected that it is running as a guest operating -+ * system of the z/VM hypervisor. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Linux is running natively in 31-bit mode\n" -+ * Severity: Informational -+ * Description: -+ * The 31-bit Linux kernel detected that it is running on an IBM mainframe, -+ * either as the sole operating system in an LPAR or as the sole operating -+ * system on the entire mainframe. The Linux kernel is not running as a -+ * guest operating system of the z/VM hypervisor. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The hardware system has IEEE compatible floating point units\n" -+ * Severity: Informational -+ * Description: -+ * The Linux kernel detected that it is running on a hardware system with -+ * CPUs that have IEEE compatible floating point units. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The hardware system has no IEEE compatible floating point units\n" -+ * Severity: Informational -+ * Description: -+ * The Linux kernel detected that it is running on a hardware system with -+ * CPUs that do not have IEEE compatible floating point units. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Linux is running as a z/VM guest operating system in 64-bit mode\n" -+ * Severity: Informational -+ * Description: -+ * The 64-bit Linux kernel detected that it is running as a guest operating -+ * system of the z/VM hypervisor. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Linux is running natively in 64-bit mode\n" -+ * Severity: Informational -+ * Description: -+ * The 64-bit Linux kernel detected that it is running on an IBM mainframe, -+ * either as the sole operating system in an LPAR or as the sole operating -+ * system on the entire mainframe. The Linux kernel is not running as a -+ * guest operating system of the z/VM hypervisor. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "Defining the Linux kernel NSS failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: return code -+ * Description: -+ * The Linux kernel could not define the named saved system (NSS) with -+ * the z/VM CP DEFSYS command. The return code represents the numeric -+ * portion of the CP DEFSYS error message. -+ * User action: -+ * For return code 1, the z/VM guest virtual machine is not authorized -+ * to define named saved systems. -+ * Ensure that the z/VM guest virtual machine is authorized to issue -+ * the CP DEFSYS command (typically privilege class E). -+ * For other return codes, see the help and message documentation for -+ * the CP DEFSYS command. -+ */ -+ -+/*? -+ * Text: "Saving the Linux kernel NSS failed with rc=%d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: return code -+ * Description: -+ * The Linux kernel could not save the named saved system (NSS) with -+ * the z/VM CP SAVESYS command. The return code represents the numeric -+ * portion of the CP SAVESYS error message. -+ * User action: -+ * For return code 1, the z/VM guest virtual machine is not authorized -+ * to save named saved systems. -+ * Ensure that the z/VM guest virtual machine is authorized to issue -+ * the CP SAVESYS command (typically privilege class E). -+ * For other return codes, see the help and message documentation for -+ * the CP SAVESYS command. -+ */ -+ -+/*? Text: "Linux is running under KVM in 64-bit mode\n" */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/tape -@@ -0,0 +1,104 @@ -+/*? -+ * Text: "%s: A tape unit was detached while in use\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A tape unit has been detached from the I/O configuration while a tape -+ * was being accessed. This typically results in I/O error messages and -+ * potentially in damaged data on the tape. -+ * User action: -+ * Check the output of the application that accesses the tape device. -+ * If this problem occurred during a write-type operation, consider repeating -+ * the operation after bringing the tape device back online. -+ */ -+ -+/*? -+ * Text: "%s: A tape cartridge has been mounted\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A tape cartridge has been inserted into the tape unit. The tape in the -+ * tape unit is ready to be accessed. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The tape cartridge has been successfully unloaded\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape cartridge has been unloaded from the tape unit. Insert a tape -+ * cartridge before accessing the tape device. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: Determining the size of the recorded area...\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape block device driver is currently determining the size of the -+ * recorded area on the tape medium. This operation typically takes a -+ * few minutes. -+ * User action: -+ * Wait until the size is shown in a completion message. -+ */ -+ -+/*? -+ * Text: "%s: Opening the tape failed because of missing end-of-file marks\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape block device driver requires end-of-file marks at the end of -+ * the recorded area on a tape. If the tape device was to be opened in -+ * response to a mount command, the mount command will fail. -+ * User action: -+ * Insert a tape cartridge that has been prepared for use with the tape -+ * block device driver and try the operation again. -+ */ -+ -+/*? -+ * Text: "%s: The size of the recorded area is %i blocks\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: number of blocks -+ * Description: -+ * The tape block device driver has successfully determined the size of the -+ * recorded area on the tape medium. The tape device can now be used as -+ * a block device. See the mount(8) man page for details on how to access -+ * block devices. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "A cartridge is loaded in tape device %s, refusing to suspend\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A request to suspend a tape device currently loaded with a cartridge is -+ * rejected. -+ * User action: -+ * Unload the tape device. Then try to suspend the system again. -+ */ -+ -+/*? -+ * Text: "Tape device %s is busy, refusing to suspend\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A request to suspend a tape device being currently in use is rejected. -+ * User action: -+ * Terminate applications performing tape operations -+ * and then try to suspend the system again. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/tape_34xx -@@ -0,0 +1,418 @@ -+/*? -+ * Text: "%s: An unexpected condition %d occurred in tape error recovery\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: number -+ * Description: -+ * The control unit has reported an error condition that is not recognized by -+ * the error recovery process of the tape device driver. -+ * User action: -+ * Report this problem and the condition number from the message to your -+ * support organization. -+ */ -+ -+/*? -+ * Text: "%s: A data overrun occurred between the control unit and tape unit\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A data overrun error has occurred on the connection between the control -+ * unit and the tape unit. If this problem occurred during a write-type -+ * operation, the integrity of the data on the tape might be compromised. -+ * User action: -+ * Use a faster connection. If this problem occurred during a write-type -+ * operation, consider repositioning the tape and repeating the operation. -+ */ -+ -+/*? -+ * Text: "%s: The block ID sequence on the tape is incorrect\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The control unit has detected an incorrect block ID sequence on the tape. -+ * This problem typically indicates that the data on the tape is damaged. -+ * User action: -+ * If this problem occurred during a write-type operation reposition the tape -+ * and repeat the operation. -+ */ -+ -+/*? -+ * Text: "%s: A read error occurred that cannot be recovered\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A read error has occurred that cannot be recovered. The current tape might -+ * be damaged. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: A write error on the tape cannot be recovered\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A write error has occurred that could not be recovered by the automatic -+ * error recovery process. -+ * User action: -+ * Use a different tape cartridge. -+ */ -+ -+/*? -+ * Text: "%s: Writing the ID-mark failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The ID-mark at the beginning of tape could not be written. The tape medium -+ * might be write-protected. -+ * User action: -+ * Try a different tape cartridge. Ensure that the write-protection on the -+ * cartridge is switched off. -+ */ -+ -+/*? -+ * Text: "%s: Reading the tape beyond the end of the recorded area failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A read-type operation failed because it extended beyond the end of the -+ * recorded area on the tape medium. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The tape contains an incorrect block ID sequence\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The control unit has detected an incorrect block ID sequence on the tape. -+ * This problem typically indicates that the data on the tape is damaged. -+ * User action: -+ * If this problem occurred during a write-type operation reposition the tape -+ * and repeat the operation. -+ */ -+ -+/*? -+ * Text: "%s: A path equipment check occurred for the tape device\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A path equipment check has occurred. This check indicates problems with the -+ * connection between the mainframe system and the tape control unit. -+ * User action: -+ * Ensure that the cable connections between the mainframe system and the -+ * control unit are securely in place and not damaged. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit cannot process the tape format\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * Either the tape unit is not able to read the format ID mark, or the -+ * specified format is not supported by the tape unit. -+ * User action: -+ * If you do not need the data recorded on the current tape, use a different -+ * tape or write a new format ID mark at the beginning of the tape. Be aware -+ * that writing a new ID mark leads to a loss of all data that has been -+ * recorded on the tape. If you need the data on the current tape, use a tape -+ * unit that supports the tape format. -+ */ -+ -+/*? -+ * Text: "%s: The tape medium is write-protected\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A write-type operation failed because the tape medium is write-protected. -+ * User action: -+ * Eject the tape cartridge, switch off the write protection on the cartridge, -+ * insert the cartridge, and try the operation again. -+ */ -+ -+/*? -+ * Text: "%s: The tape does not have the required tape tension\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape does not have the required tape tension. -+ * User action: -+ * Rewind and reposition the tape, then repeat the operation. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit failed to load the cartridge\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * An error has occurred while loading the tape cartridge. -+ * User action: -+ * Unload the cartridge and load it again. -+ */ -+ -+/*? -+ * Text: "%s: Automatic unloading of the tape cartridge failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit failed to unload the cartridge. -+ * User action: -+ * Unload the cartridge manually by using the eject button on the tape unit. -+ */ -+ -+/*? -+ * Text: "%s: An equipment check has occurred on the tape unit\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * Possible reasons for the check condition are a unit adapter error, a buffer -+ * error on the lower interface, an unusable internal path, or an error that -+ * has occurred while loading the cartridge. -+ * User action: -+ * Examine the tape unit and the cartridge loader. Consult the tape unit -+ * documentation for details. -+ */ -+ -+/*? -+ * Text: "%s: The tape information states an incorrect length\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape is shorter than stated at the beginning of the tape data. A -+ * possible reason for this problem is that the tape might have been physically -+ * truncated. Data written to the tape might be incomplete or damaged. -+ * User action: -+ * If this problem occurred during a write-type operation, consider repeating -+ * the operation with a different tape cartridge. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit is not ready\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit is online but not ready. -+ * User action: -+ * Turn the ready switch on the tape unit to the ready position and try the -+ * operation again. -+ */ -+ -+/*? -+ * Text: "%s: The tape medium has been rewound or unloaded manually\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit rewind button, unload button, or both have been used to -+ * rewind or unload the tape cartridge. A tape cartridge other than the -+ * intended cartridge might have been inserted or the tape medium might not -+ * be at the expected position. -+ * User action: -+ * Verify that the correct tape cartridge has been inserted and that the tape -+ * medium is at the required position before continuing to work with the tape. -+ */ -+ -+/*? -+ * Text: "%s: The tape subsystem is running in degraded mode\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape subsystem is not operating at its maximum performance. -+ * User action: -+ * Contact your service representative for the tape unit and report this -+ * problem. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit is already assigned\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit is already assigned to another channel path. -+ * User action: -+ * Free the tape unit from the operating system instance to which it is -+ * currently assigned then try again. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit is not online\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit is not online to the tape device driver. -+ * User action: -+ * Ensure that the tape unit is operational and that the cable connections -+ * between the control unit and the tape unit are securely in place and not -+ * damaged. -+ */ -+ -+/*? -+ * Text: "%s: The control unit has fenced access to the tape volume\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The control unit fences further access to the current tape volume. The data -+ * integrity on the tape volume might have been compromised. -+ * User action: -+ * Rewind and unload the tape cartridge. -+ */ -+ -+/*? -+ * Text: "%s: A parity error occurred on the tape bus\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * A data parity check error occurred on the bus. Data that was read or written -+ * while the error occurred is not valid. -+ * User action: -+ * Reposition the tape and repeat the read-type or write-type operation. -+ */ -+ -+/*? -+ * Text: "%s: I/O error recovery failed on the tape control unit\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * An I/O error occurred that cannot be recovered by the automatic error -+ * recovery process of the tape control unit. The application that operates -+ * the tape unit will receive a return value of -EIO which indicates an -+ * I/O error. The data on the tape might be damaged. -+ * User action: -+ * If this problem occurred during a write-type operation, consider -+ * repositioning the tape and repeating the operation. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit requires a firmware update\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit requires firmware patches from the tape control unit but the -+ * required patches are not available on the control unit. -+ * User action: -+ * Make the require patches available on the control unit then reposition the -+ * tape and retry the operation. For details about obtaining and installing -+ * firmware updates see the control unit documentation. -+ */ -+ -+/*? -+ * Text: "%s: The maximum block size for buffered mode is exceeded\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The block to be written is larger than allowed for the buffered mode. -+ * User action: -+ * Use a smaller block size. -+ */ -+ -+/*? -+ * Text: "%s: A channel interface error cannot be recovered\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * An error has occurred on the channel interface. This error cannot -+ * be recovered by the control unit error recovery process. -+ * User action: -+ * See the documentation of the control unit. -+ */ -+ -+/*? -+ * Text: "%s: A channel protocol error occurred\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * An error was detected in the channel protocol. -+ * User action: -+ * Reposition the tape and try the operation again. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit does not support the compaction algorithm\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit cannot read the current tape. The data on the tape has been -+ * compressed with an algorithm that is not supported by the tape unit. -+ * User action: -+ * Use a tape unit that supports the compaction algorithm used for the -+ * current tape. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit does not support tape format 3480-2 XF\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit does not support tapes recorded in the 3480-2 XF format. -+ * User action: -+ * If you do not need the data recorded on the current tape, rewind the tape -+ * and overwrite it with a supported format. If you need the data on the -+ * current tape, use a tape unit that supports the tape format. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit does not support format 3480 XF\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit does not support tapes recorded in the 3480 XF format. -+ * User action: -+ * If you do not need the data recorded on the current tape, rewind the tape -+ * and overwrite it with a supported format. If you need the data on the -+ * current tape, use a tape unit that supports the tape format. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit does not support the current tape length\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The length of the tape in the cartridge is incompatible with the tape unit. -+ * User action: -+ * Either use a different tape unit or use a tape with a supported length. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit does not support the tape length\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The length of the tape in the cartridge is incompatible with the tape -+ * unit. -+ * User action: -+ * Either use a different tape unit or use a tape with a supported length. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/tape_3590 -@@ -0,0 +1,184 @@ -+/*? -+ * Text: "%s: The tape medium must be loaded into a different tape unit\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape device has indicated an error condition that requires loading -+ * the tape cartridge into a different tape unit to recover. -+ * User action: -+ * Unload the cartridge and use a different tape unit to retry the operation. -+ */ -+ -+/*? -+ * Text: "%s: Tape media information: exception %s, service %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: exception -+ * @3: service -+ * Description: -+ * This is an operating system independent tape medium information message -+ * that was issued by the tape unit. The information in the message is -+ * intended for the IBM customer engineer. -+ * User action: -+ * See the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: Device subsystem information: exception %s, service %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: exception -+ * @3: required service action -+ * Description: -+ * This is an operating system independent device subsystem information message -+ * that was issued by the tape unit. The information in the message is -+ * intended for the IBM customer engineer. -+ * User action: -+ * See the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: I/O subsystem information: exception %s, service %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: exception -+ * @3: required service action -+ * Description: -+ * This is an operating system independent I/O subsystem information message -+ * that was issued by the tape unit. The information in the message is -+ * intended for the IBM customer engineer. -+ * User action: -+ * See the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit has issued sense message %s\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: sense message code -+ * Description: -+ * The tape unit has issued an operating system independent sense message. -+ * User action: -+ * See the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit has issued an unknown sense message code 0x%x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: code -+ * Description: -+ * The tape device driver has received an unknown sense message from the -+ * tape unit. -+ * driver. -+ * User action: -+ * See the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: MIM SEV=%i, MC=%02x, ES=%x/%x, RC=%02x-%04x-%02x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: SEV -+ * @3: message code -+ * @4: exception -+ * @5: required service action -+ * @6: refcode -+ * @7: mid -+ * @8: fid -+ * Description: -+ * This is an operating system independent information message that was -+ * issued by the tape unit. The information in the message is intended for -+ * the IBM customer engineer. -+ * User action: -+ * See to the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: IOSIM SEV=%i, DEVTYPE=3590/%02x, MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: SEV -+ * @3: model -+ * @4: message code -+ * @5: exception -+ * @6: required service action -+ * @7: refcode1 -+ * @8: refcode2 -+ * @9: refcode3 -+ * Description: -+ * This is an operating system independent I/O subsystem information message -+ * that was issued by the tape unit. The information in the message is -+ * intended for the IBM customer engineer. -+ * User action: -+ * See the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: DEVSIM SEV=%i, DEVTYPE=3590/%02x, MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: SEV -+ * @3: model -+ * @4: message code -+ * @5: exception -+ * @6: required service action -+ * @7: refcode1 -+ * @8: refcode2 -+ * @9: refcode3 -+ * Description: -+ * This is an operating system independent device subsystem information message -+ * issued by the tape unit. The information in the message is intended for -+ * the IBM customer engineer. -+ * User action: -+ * See the documentation for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit has issued an unknown sense message code %x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * @2: code -+ * Description: -+ * The tape device has issued a sense message, that is unknown to the device -+ * driver. -+ * User action: -+ * Use the message code printed as hexadecimal value and see the documentation -+ * for the tape unit for further information. -+ */ -+ -+/*? -+ * Text: "%s: The tape unit failed to obtain the encryption key from EKM\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * The tape unit was unable to retrieve the encryption key required to decode -+ * the data on the tape from the enterprise key manager (EKM). -+ * User action: -+ * See the EKM and tape unit documentation for information about how to enable -+ * the tape unit to retrieve the encryption key. -+ */ -+ -+/*? -+ * Text: "%s: A different host has privileged access to the tape unit\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the tape device -+ * Description: -+ * You cannot access the tape unit because a different operating system -+ * instance has privileged access to the unit. -+ * User action: -+ * Unload the current cartridge to solve this problem. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/time -@@ -0,0 +1,36 @@ -+/*? -+ * Text: "The ETR interface has adjusted the clock by %li microseconds\n" -+ * Severity: Notice -+ * Parameter: -+ * @1: number of microseconds -+ * Description: -+ * The external time reference (ETR) interface has synchronized the system -+ * clock with the external reference and set it to a new value. The time -+ * difference between the old and new clock value has been passed to the -+ * network time protocol (NTP) as a single shot adjustment. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "The real or virtual hardware system does not provide an ETR interface\n" -+ * Severity: Warning -+ * Description: -+ * The 'etr=' parameter has been passed on the kernel parameter line for -+ * a Linux instance that does not have access to the external time reference -+ * (ETR) facility. -+ * User action: -+ * To avoid this warning remove the 'etr=' kernel parameter. -+ */ -+ -+/*? -+ * Text: "The real or virtual hardware system does not provide an STP interface\n" -+ * Severity: Warning -+ * Description: -+ * The 'stp=' parameter has been passed on the kernel parameter line for -+ * a Linux instance that does not have access to the server time protocol -+ * (STP) facility. -+ * User action: -+ * To avoid this warning remove the 'stp=' kernel parameter. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/vmcp -@@ -0,0 +1,13 @@ -+/*? -+ * Text: "The z/VM CP interface device driver cannot be loaded without z/VM\n" -+ * Severity: Warning -+ * Description: -+ * With the z/VM CP interface you can issue z/VM CP commands from a Linux -+ * terminal session. On Linux instances that run in environments other than -+ * the z/VM hypervisor, the z/VM CP interface does not provide any useful -+ * function and the corresponding vmcp device driver cannot be loaded. -+ * User action: -+ * Load the vmcp device driver only on Linux instances that run as guest -+ * operating systems of the z/VM hypervisor. If the device driver has been -+ * compiled into the kernel, ignore this message. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/vmlogrdr -@@ -0,0 +1,18 @@ -+/*? Text: "vmlogrdr: failed to start recording automatically\n" */ -+/*? Text: "vmlogrdr: connection severed with reason %i\n" */ -+/*? Text: "vmlogrdr: iucv connection to %s failed with rc %i \n" */ -+/*? Text: "vmlogrdr: failed to stop recording automatically\n" */ -+/*? Text: "not running under VM, driver not loaded.\n" */ -+ -+/*? -+ * Text: "vmlogrdr: device %s is busy. Refuse to suspend.\n" -+ * Severity: Error -+ * Parameter: -+ * @1: device name -+ * Description: -+ * Suspending vmlogrdr devices that are in uses is not supported. -+ * A request to suspend such a device is refused. -+ * User action: -+ * Close all applications that use any of the vmlogrdr devices -+ * and then try to suspend the system again. -+ */ ---- /dev/null -+++ b/Documentation/kmsg/s390/vmur -@@ -0,0 +1,47 @@ -+/*? -+ * Text: "The %s cannot be loaded without z/VM\n" -+ * Severity: Error -+ * Parameter: -+ * @1: z/VM virtual unit record device driver -+ * Description: -+ * The z/VM virtual unit record device driver provides Linux with access to -+ * z/VM virtual unit record devices like punch card readers, card punches, and -+ * line printers. On Linux instances that run in environments other than the -+ * z/VM hypervisor, the device driver does not provide any useful function and -+ * the corresponding vmur module cannot be loaded. -+ * User action: -+ * Load the vmur module only on Linux instances that run as guest operating -+ * systems of the z/VM hypervisor. If the z/VM virtual unit record device -+ * has been compiled into the kernel, ignore this message. -+ */ -+ -+/*? -+ * Text: "Kernel function alloc_chrdev_region failed with error code %d\n" -+ * Severity: Error -+ * Parameter: -+ * @1: error code according to errno definitions -+ * Description: -+ * The z/VM virtual unit record device driver (vmur) needs to register a range -+ * of character device minor numbers from 0x0000 to 0xffff. -+ * This registration failed, probably because of memory constraints. -+ * User action: -+ * Free some memory and reload the vmur module. If the z/VM virtual unit -+ * record device driver has been compiled into the kernel reboot Linux. -+ * Consider assigning more memory to your LPAR or z/VM guest virtual machine. -+ */ -+ -+/*? -+ * Text: "Unit record device %s is busy, %s refusing to suspend.\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the unit record device -+ * @1: z/VM virtual unit record device driver -+ * Description: -+ * Linux cannot be suspended while a unit record device is in use. -+ * User action: -+ * Stop all applications that work on z/VM spool file queues, for example, the -+ * vmur tool. Then try again to suspend Linux. -+ */ -+ -+/*? Text: "%s loaded.\n" */ -+/*? Text: "%s unloaded.\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/vmwatchdog -@@ -0,0 +1,26 @@ -+/*? -+ * Text: "The system cannot be suspended while the watchdog is in use\n" -+ * Severity: Error -+ * Description: -+ * A program is currently using the vmwatchdog device node. The watchdog -+ * device driver prevents the system from being suspended while the watchdog -+ * device is in use. -+ * User action: -+ * If you want to suspend the system, find out which program uses the watchdog -+ * device. Stop the program or reconfigure it to not use the watchdog. -+ */ -+ -+ -+/*? -+ * Text: "The system cannot be suspended while the watchdog is running\n" -+ * Severity: Error -+ * Description: -+ * The watchdog must not time out during hibernation. The watchdog -+ * device driver prevents the system from being suspended while the watchdog -+ * timer is running. -+ * User action: -+ * If you want to suspend the system, stop the watchdog, for example, by entering -+ * the command: 'echo V > /dev/vmwatchdog'. Alternatively, stop the program that -+ * uses the watchdog or reconfigure the program to not use the watchdog. -+ */ -+ ---- /dev/null -+++ b/Documentation/kmsg/s390/xpram -@@ -0,0 +1,73 @@ -+/*? -+ * Text: "%d is not a valid number of XPRAM devices\n" -+ * Severity: Error -+ * Parameter: -+ * @1: number of partitions -+ * Description: -+ * The number of XPRAM partitions specified for the 'devs' module parameter -+ * or with the 'xpram.parts' kernel parameter must be an integer in the -+ * range 1 to 32. The XPRAM device driver created a maximum of 32 partitions -+ * that are probably not configured as intended. -+ * User action: -+ * If the XPRAM device driver has been compiled as a separate module, -+ * unload the module and load it again with a correct value for the 'devs' -+ * module parameter. If the XPRAM device driver has been compiled -+ * into the kernel, correct the 'xpram.parts' parameter in the kernel -+ * command line and restart Linux. -+ */ -+ -+/*? -+ * Text: "Not enough expanded memory available\n" -+ * Severity: Error -+ * Description: -+ * The amount of expanded memory required to set up your XPRAM partitions -+ * depends on the 'sizes' parameter specified for the xpram module or on -+ * the specifications for the 'xpram.parts' parameter if the XPRAM device -+ * driver has been compiled into the kernel. Your -+ * current specification exceed the amount of available expanded memory. -+ * Your XPRAM partitions are probably not configured as intended. -+ * User action: -+ * If the XPRAM device driver has been compiled as a separate module, -+ * unload the xpram module and load it again with an appropriate value -+ * for the 'sizes' module parameter. If the XPRAM device driver has been -+ * compiled into the kernel, adjust the 'xpram.parts' parameter in the -+ * kernel command line and restart Linux. If you need more than the -+ * available expanded memory, increase the expanded memory allocation for -+ * your virtual hardware or LPAR. -+ */ -+ -+/*? -+ * Text: "No expanded memory available\n" -+ * Severity: Error -+ * Description: -+ * The XPRAM device driver has been loaded in a Linux instance that runs -+ * in an LPAR or virtual hardware without expanded memory. -+ * No XPRAM partitions are created. -+ * User action: -+ * Allocate expanded memory for your LPAR or virtual hardware or do not -+ * load the xpram module. You can ignore this message, if you do not want -+ * to create XPRAM partitions. -+ */ -+ -+/*? -+ * Text: "Resuming the system failed: %s\n" -+ * Severity: Error -+ * Parameter: -+ * @1: cause of the failure -+ * Description: -+ * A system cannot be resumed if the expanded memory setup changes -+ * after hibernation. Possible reasons for the failure are: -+ * - Expanded memory was removed after hibernation. -+ * - Size of the expanded memory changed after hibernation. -+ * The system is stopped with a kernel panic. -+ * User action: -+ * Reboot Linux. -+ */ -+ -+/*? Text: " number of devices (partitions): %d \n" */ -+/*? Text: " size of partition %d: %u kB\n" */ -+/*? Text: " size of partition %d to be set automatically\n" */ -+/*? Text: " memory needed (for sized partitions): %lu kB\n" */ -+/*? Text: " partitions to be sized automatically: %d\n" */ -+/*? Text: " automatically determined partition size: %lu kB\n" */ -+/*? Text: " %u pages expanded memory found (%lu KB).\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/zdump -@@ -0,0 +1,12 @@ -+/*? -+ * Text: "The 32-bit dump tool cannot be used for a 64-bit system\n" -+ * Severity: Alert -+ * Description: -+ * The dump process ends without creating a system dump. -+ * User action: -+ * Use a 64-bit dump tool to obtain a system dump for 64-bit Linux instance. -+ */ -+ -+/*? Text: "DETECTED 'S390 (32 bit) OS'\n" */ -+/*? Text: "0x%x is an unknown architecture.\n" */ -+/*? Text: "DETECTED 'S390X (64 bit) OS'\n" */ ---- /dev/null -+++ b/Documentation/kmsg/s390/zfcp -@@ -0,0 +1,865 @@ -+/*? -+ * Text: "%s is not a valid SCSI device\n" -+ * Severity: Error -+ * Parameter: -+ * @1: device specification -+ * Description: -+ * The specification for an initial SCSI device provided with the 'zfcp.device' -+ * kernel parameter or with the 'device' module parameter is syntactically -+ * incorrect. The specified SCSI device could not be attached to the Linux -+ * system. -+ * User action: -+ * Correct the value for the 'zfcp.device' or 'device' parameter and reboot -+ * Linux. See "Device Drivers, Features, and Commands" for information about -+ * the syntax. -+ */ -+ -+/*? -+ * Text: "Registering the misc device zfcp_cfdc failed\n" -+ * Severity: Error -+ * Description: -+ * The zfcp device driver failed to register the device that provides access to -+ * the adapter access control file (ACL tables). The device driver -+ * initialization failed. A possible cause for this problem is memory -+ * constraints. -+ * User action: -+ * Free some memory and try again to load the zfcp device driver. If the zfcp -+ * device driver has been compiled into the kernel, reboot Linux. Consider -+ * assigning more memory to your LPAR or z/VM guest virtual machine. If the -+ * problem persists, contact your support organization. -+ */ -+ -+/*? -+ * Text: "The zfcp device driver could not register with the common I/O layer\n" -+ * Severity: Error -+ * Description: -+ * The device driver initialization failed. A possible cause of this problem is -+ * memory constraints. -+ * User action: -+ * Free some memory and try again to load the zfcp device driver. If the zfcp -+ * device driver has been compiled into the kernel, reboot Linux. Consider -+ * assigning more memory to your LPAR or z/VM guest virtual machine. If the -+ * problem persists, contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Setting up data structures for the FCP adapter failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The zfcp device driver could not allocate data structures for an FCP adapter. -+ * A possible reason for this problem is memory constraints. -+ * User action: -+ * Set the FCP adapter offline or detach it from the Linux system, free some -+ * memory and set the FCP adapter online again or attach it again. If this -+ * problem persists, gather Linux debug data, collect the FCP adapter -+ * hardware logs, and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The FCP device is operational again\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * An FCP device has been unavailable because it had been detached from the -+ * Linux system or because the corresponding CHPID was offline. The FCP device -+ * is now available again and the zfcp device driver resumes all operations to -+ * the FCP device. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: The CHPID for the FCP device is offline\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The CHPID for an FCP device has been set offline, either logically in Linux -+ * or on the hardware. -+ * User action: -+ * Find out which CHPID corresponds to the FCP device, for example, with the -+ * lscss command. Check if the CHPID has been set logically offline in sysfs. -+ * Write 'on' to the CHPID's status attribute to set it online. If the CHPID is -+ * online in sysfs, find out if it has been varied offline through a hardware -+ * management interface, for example the service element (SE). -+ */ -+ -+/*? -+ * Text: "%s: The FCP device has been detached\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * An FCP device is no longer available to Linux. -+ * User action: -+ * Ensure that the FCP adapter is operational and attached to the LPAR or z/VM -+ * virtual machine. -+ */ -+ -+/*? -+ * Text: "%s: The FCP device did not respond within the specified time\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The common I/O layer waited for a response from the FCP adapter but -+ * no response was received within the specified time limit. This might -+ * indicate a hardware problem. -+ * User action: -+ * Consult your hardware administrator. If this problem persists, -+ * gather Linux debug data, collect the FCP adapter hardware logs, and -+ * report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Registering the FCP device with the SCSI stack failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP adapter could not be registered with the Linux SCSI -+ * stack. A possible reason for this problem is memory constraints. -+ * User action: -+ * Set the FCP adapter offline or detach it from the Linux system, free some -+ * memory and set the FCP adapter online again or attach it again. If this -+ * problem persists, gather Linux debug data, collect the FCP adapter -+ * hardware logs, and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: ERP cannot recover an error on the FCP device\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * An error occurred on an FCP device. The error recovery procedure (ERP) -+ * could not resolve the error. The FCP device driver cannot use the FCP device. -+ * User action: -+ * Check for previous error messages for the same FCP device to find the -+ * cause of the problem. -+ */ -+ -+/*? -+ * Text: "%s: Creating an ERP thread for the FCP device failed.\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The zfcp device driver could not set up error recovery procedure (ERP) -+ * processing for the FCP device. The FCP device is not available for use -+ * in Linux. -+ * User action: -+ * Free some memory and try again to load the zfcp device driver. If the zfcp -+ * device driver has been compiled into the kernel, reboot Linux. Consider -+ * assigning more memory to your LPAR or z/VM guest virtual machine. If the -+ * problem persists, contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: ERP failed for unit 0x%016Lx on port 0x%016Lx\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * An error occurred on the SCSI device at the specified LUN. The error recovery -+ * procedure (ERP) could not resolve the error. The SCSI device is not -+ * available. -+ * User action: -+ * Verify that the LUN is correct. Check the fibre channel fabric for errors -+ * related to the specified WWPN and LUN, the storage server, and Linux. -+ */ -+ -+/*? -+ * Text: "%s: ERP failed for remote port 0x%016Lx\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: WWPN -+ * Description: -+ * An error occurred on a remote port. The error recovery procedure (ERP) -+ * could not resolve the error. The port is not available. -+ * User action: -+ * Verify that the WWPN is correct and check the fibre channel fabric for -+ * errors related to the WWPN. -+ */ -+ -+/*? -+ * Text: "%s: Attaching the name server port to the FCP device failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The zfcp device driver could not attach the name server port of the fibre -+ * channel fabric to an FCP device. A possible cause of this problem is -+ * memory constraints. -+ * User action: -+ * Set the FCP device offline, free some memory, then set the FCP device online -+ * again. If this does not resolve the problem, reboot Linux and try again to -+ * set the FCP device online. -+ */ -+ -+/*? -+ * Text: "%s: Registering unit 0x%016Lx on port 0x%016Lx failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * The Linux kernel could not allocate enough memory to register the SCSI -+ * device at the indicated LUN with the SCSI stack. The SCSI device is not -+ * available. -+ * User action: -+ * Free some memory then detach the LUN and attach it again. -+ */ -+ -+/*? -+ * Text: "%s: Registering port 0x%016Lx failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: WWPN -+ * Description: -+ * The Linux kernel could not allocate enough memory to register the -+ * remote port with the indicated WWPN with the SCSI stack. The remote -+ * port is not available. -+ * User action: -+ * Free some memory and trigger the rescan for ports. -+ */ -+ -+/*? -+ * Text: "%s: A QDIO problem occurred\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * QDIO reported a problem to the zfcp device driver. The zfcp device driver -+ * tries to recover this problem. -+ * User action: -+ * Check for related error messages. If this problem occurs frequently, gather -+ * Linux debug data and contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: A QDIO protocol error occurred, operations continue\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The zfcp device driver detected a missing flag in a QDIO queue. The device -+ * driver tries to keep the FCP device operational. -+ * User action: -+ * Check for related error messages. If this problem occurs frequently, gather -+ * Linux debug data, collect the FCP adapter hardware logs, and report the -+ * problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Setting up the QDIO connection to the FCP adapter failed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The zfcp device driver failed to establish a QDIO connection with the FCP -+ * adapter. -+ * User action: -+ * Set the FCP adapter offline or detach it from the Linux system, free some -+ * memory and set the FCP adapter online again or attach it again. If this -+ * problem persists, gather Linux debug data, collect the FCP adapter -+ * hardware logs, and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The FCP adapter reported a problem that cannot be recovered\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP adapter has a problem that cannot be recovered by the zfcp device -+ * driver. The zfcp device driver stopped using the FCP device. -+ * User action: -+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report -+ * this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: There is a wrap plug instead of a fibre channel cable\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP adapter is not physically connected to the fibre channel fabric. -+ * User action: -+ * Remove the wrap plug from the FCP adapter and connect the adapter with the -+ * fibre channel fabric. -+ */ -+ -+/*? -+ * Text: "%s: Access denied to unit 0x%016Lx on port 0x%016Lx\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * The Linux system is not allowed to access the SCSI device at the indicated -+ * LUN. -+ * User action: -+ * Update the access control table of the FCP device to grant the Linux -+ * system access to the LUN or remove the LUN from the Linux system. -+ */ -+ -+/*? -+ * Text: "%s: FCP device not operational because of an unsupported FC class\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP adapter hardware does not support the fibre channel service class -+ * requested by the zfcp device driver. This problem indicates a program error -+ * in the zfcp device driver. -+ * User action: -+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report -+ * this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: 0x%Lx is an ambiguous request identifier\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: request ID -+ * Description: -+ * The FCP adapter reported that it received the same request ID twice. This is -+ * an error. The zfcp device driver stopped using the FCP device. -+ * User action: -+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report -+ * this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: QTCB version 0x%x not supported by FCP adapter (0x%x to 0x%x)\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: requested version -+ * @3: lowest supported version -+ * @4: highest supported version -+ * Description: -+ * See message text. -+ * The queue transfer control block (QTCB) version requested by the zfcp device -+ * driver is not supported by the FCP adapter hardware. -+ * User action: -+ * If the requested version is higher than the highest version supported by the -+ * hardware, install more recent firmware on the FCP adapter. If the requested -+ * version is lower then the lowest version supported by the hardware, upgrade -+ * to a Linux level with a more recent zfcp device driver. -+ */ -+ -+/*? -+ * Text: "%s: The FCP adapter could not log in to the fibre channel fabric\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The fibre channel switch rejected the login request from the FCP adapter. -+ * User action: -+ * Check the fibre channel fabric or switch logs for possible errors. -+ */ -+ -+/*? -+ * Text: "%s: The FCP device is suspended because of a firmware update\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP device is not available while a firmware update is in progress. This -+ * problem is temporary. The FCP device will resume operations when the -+ * firmware update is completed. -+ * User action: -+ * Wait 10 seconds and try the operation again. -+ */ -+ -+/*? -+ * Text: "%s: All NPIV ports on the FCP adapter have been assigned\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The number of N_Port ID Virtualization (NPIV) ports that can be assigned -+ * on an FCP adapter is limited. Once assigned, NPIV ports are not released -+ * automatically but have to be released explicitly through the support -+ * element (SE). -+ * User action: -+ * Identify NPIV ports that have been assigned but are no longer in use and -+ * release them from the SE. -+ */ -+ -+/*? -+ * Text: "%s: The link between the FCP adapter and the FC fabric is down\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP adapter is not usable. Specific error information is not available. -+ * User action: -+ * Check the cabling and the fibre channel fabric configuration. If this -+ * problem persists, gather Linux debug data, collect the FCP adapter -+ * hardware logs, and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Access denied to port 0x%016Lx\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: WWPN -+ * Description: -+ * The Linux system is not allowed to access the remote port with the specified -+ * WWPN. -+ * User action: -+ * Update the access control table of the FCP device to grant the Linux -+ * system access to the WWPN or remove the WWPN from the Linux system. -+ */ -+ -+/*? -+ * Text: "%s: The QTCB type is not supported by the FCP adapter\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The queue transfer control block (QTCB) type requested by the zfcp device -+ * driver is not supported by the FCP adapter hardware. -+ * User action: -+ * Install the latest firmware on your FCP adapter hardware. If this does not -+ * resolve the problem, upgrade to a Linux level with a more recent zfcp device -+ * driver. If the problem persists, contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The error threshold for checksum statistics has been exceeded\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP adapter has reported a large number of bit errors. This might -+ * indicate a problem with the physical components of the fibre channel fabric. -+ * Details about the errors have been written to the HBA trace for the FCP -+ * adapter. -+ * User action: -+ * Check for problems in the fibre channel fabric and ensure that all cables -+ * are properly plugged. -+ */ -+ -+/*? -+ * Text: "%s: The local link has been restored\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * A problem with the connection between the FCP adapter and the adjacent node -+ * on the fibre channel fabric has been resolved. The FCP adapter is now -+ * available again. -+ * User action: -+ * None. -+ */ -+ -+/*? -+ * Text: "%s: Access denied according to ACT rule type %s, rule %d\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: access rule type -+ * @3: access rule -+ * Description: -+ * A rule in the access control table (ACT) for the FCP device denies access -+ * to a remote port or a LUN. -+ * User action: -+ * Examine the access control tables for the FCP device to see if the -+ * specified rule is correct. -+ */ -+ -+/*? -+ * Text: "%s: The mode table on the FCP adapter has been damaged\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * This is an FCP adapter hardware problem. -+ * User action: -+ * Report this problem with FCP hardware logs to IBM support. -+ */ -+ -+/*? -+ * Text: "%s: The adjacent fibre channel node does not support FCP\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The fibre channel switch or storage system that is connected to the FCP -+ * channel does not support the fibre channel protocol (FCP). The zfcp -+ * device driver stopped using the FCP device. -+ * User action: -+ * Check the adjacent fibre channel node. -+ */ -+ -+/*? -+ * Text: "%s: The FCP adapter does not recognize the command 0x%x\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: command -+ * Description: -+ * A command code that was sent from the zfcp device driver to the FCP adapter -+ * is not valid. The zfcp device driver stopped using the FCP device. -+ * User action: -+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report -+ * this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: There is no light signal from the local fibre channel cable\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * There is no signal on the fibre channel cable that connects the FCP adapter -+ * to the fibre channel fabric. -+ * User action: -+ * Ensure that the cable is in place and connected properly to the FCP adapter -+ * and to the adjacent fibre channel switch or storage system. -+ */ -+ -+/*? -+ * Text: "%s: The WWPN assignment file on the FCP adapter has been damaged\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * This is an FCP adapter hardware problem. -+ * User action: -+ * Report this problem with FCP hardware logs to IBM support. -+ */ -+ -+/*? -+ * Text: "%s: The FCP device detected a WWPN that is duplicate or not valid\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * This condition indicates an error in the FCP adapter hardware or in the z/VM -+ * hypervisor. -+ * User action: -+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report -+ * this problem to IBM support. -+ */ -+ -+/*? -+ * Text: "%s: The fibre channel fabric does not support NPIV\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP adapter requires N_Port ID Virtualization (NPIV) from the adjacent -+ * fibre channel node. Either the FCP adapter is connected to a fibre channel -+ * switch that does not support NPIV or the FCP adapter tries to use NPIV in a -+ * point-to-point setup. The connection is not operational. -+ * User action: -+ * Verify that NPIV is correctly used for this connection. Check the FCP adapter -+ * configuration and the fibre channel switch configuration. If necessary, -+ * update the fibre channel switch firmware. -+ */ -+ -+/*? -+ * Text: "%s: The FCP adapter cannot support more NPIV ports\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * N_Port ID Virtualization (NPIV) ports consume physical resources on the FCP -+ * adapter. The FCP adapter resources are exhausted. The connection is not -+ * operational. -+ * User action: -+ * Analyze the number of available NPIV ports and which operating system -+ * instances use them. If necessary, reconfigure your setup to move some -+ * NPIV ports to an FCP adapter with free resources. -+ */ -+ -+/*? -+ * Text: "%s: The adjacent switch cannot support more NPIV ports\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * N_Port ID Virtualization (NPIV) ports consume physical resources. The -+ * resources of the fibre channel switch that is connected to the FCP adapter -+ * are exhausted. The connection is not operational. -+ * User action: -+ * Analyze the number of available NPIV ports on the adjacent fibre channel -+ * switch and how they are used. If necessary, reconfigure your fibre channel -+ * fabric to accommodate the required NPIV ports. -+ */ -+ -+/*? -+ * Text: "%s: 0x%x is not a valid transfer protocol status\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: status information -+ * Description: -+ * The transfer protocol status information reported by the FCP adapter is not -+ * a valid status for the zfcp device driver. The zfcp device driver stopped -+ * using the FCP device. -+ * User action: -+ * Gather Linux debug data, collect the FCP adapter hardware logs, and report -+ * this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Unknown or unsupported arbitrated loop fibre channel topology detected\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The FCP device is connected to a fibre channel arbitrated loop or the FCP adapter -+ * reported an unknown fibre channel topology. The zfcp device driver supports -+ * point-to-point connections and switched fibre channel fabrics but not arbitrated -+ * loop topologies. The FCP device cannot be used. -+ * User action: -+ * Check the fibre channel setup and ensure that only supported topologies are -+ * connected to the FCP adapter. -+ */ -+ -+/*? -+ * Text: "%s: FCP adapter maximum QTCB size (%d bytes) is too small\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: maximum supported size -+ * @3: requested QTCB size -+ * Description: -+ * The queue transfer control block (QTCB) size requested by the zfcp -+ * device driver is not supported by the FCP adapter hardware. -+ * User action: -+ * Update the firmware on your FCP adapter hardware to the latest -+ * available level and update the Linux kernel to the latest supported -+ * level. If the problem persists, contact your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The FCP adapter only supports newer control block versions\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The protocol supported by the FCP adapter is not compatible with the zfcp -+ * device driver. -+ * User action: -+ * Upgrade your Linux kernel to a level that includes a zfcp device driver -+ * with support for the control block version required by your FCP adapter. -+ */ -+ -+/*? -+ * Text: "%s: The FCP adapter only supports older control block versions\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * Description: -+ * The protocol supported by the FCP adapter is not compatible with the zfcp -+ * device driver. -+ * User action: -+ * Install the latest firmware on your FCP adapter. -+ */ -+ -+/*? -+ * Text: "%s: Not enough FCP adapter resources to open remote port 0x%016Lx\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: WWPN -+ * Description: -+ * Each port that is opened consumes physical resources of the FCP adapter to -+ * which it is attached. These resources are exhausted and the specified port -+ * cannot be opened. -+ * User action: -+ * Reduce the total number of remote ports that are attached to the -+ * FCP adapter. -+ */ -+ -+/*? -+ * Text: "%s: LUN 0x%Lx on port 0x%Lx is already in use by CSS%d, MIF Image ID %x\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: remote port WWPN -+ * @4: channel subsystem ID -+ * @5: MIF Image ID of the LPAR -+ * Description: -+ * The SCSI device at the indicated LUN is already in use by another system. -+ * Only one system at a time can use the SCSI device. -+ * User action: -+ * Ensure that the other system stops using the device before trying to use it. -+ */ -+ -+/*? -+ * Text: "%s: No handle is available for LUN 0x%016Lx on port 0x%016Lx\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * The FCP adapter can only open a limited number of SCSI devices. This limit -+ * has been reached and the SCSI device at the indicated LUN cannot be opened. -+ * User action: -+ * Check all SCSI devices opened through the FCP adapter and close some of them. -+ */ -+ -+/*? -+ * Text: "%s: SCSI device at LUN 0x%016Lx on port 0x%016Lx opened read-only\n" -+ * Severity: Informational -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * The access control tables in the FCP adapter allow read-only access for the -+ * LUN. Write access is not permitted for your Linux instance. The SCSI -+ * device has been opened successfully in read-only access mode. -+ * User action: -+ * None if read-only access is sufficient. If you require write access, change -+ * the access control tables in the FCP adapter. -+ */ -+ -+/*? -+ * Text: "%s: Exclusive read-only access not supported (unit 0x%016Lx, port 0x%016Lx)\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * The access configuration specified in the access control tables of the FCP -+ * adapter is not valid. The SCSI device at the indicated LUN cannot be -+ * accessed. -+ * User action: -+ * Change the access control tables in the FCP adapter. -+ */ -+ -+/*? -+ * Text: "%s: Shared read-write access not supported (unit 0x%016Lx, port 0x%016Lx)\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * The access configuration specified in the access control tables of the FCP -+ * adapter is not valid. The SCSI device at the indicated LUN cannot be -+ * accessed. -+ * User action: -+ * Change the access control tables in the FCP adapter. -+ */ -+ -+/*? -+ * Text: "%s: Incorrect direction %d, unit 0x%016Lx on port 0x%016Lx closed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: value in direction field -+ * @3: LUN -+ * @4: WWPN -+ * Description: -+ * The direction field in a SCSI request contains an incorrect value. The zfcp -+ * device driver closed down the SCSI device at the indicated LUN. -+ * User action: -+ * Gather Linux debug data and report this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Incorrect CDB length %d, unit 0x%016Lx on port 0x%016Lx closed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: value in length field -+ * @3: LUN -+ * @4: WWPN -+ * Description: -+ * The control-data-block (CDB) length field in a SCSI request is not valid or -+ * too large for the FCP adapter. The zfcp device driver closed down the SCSI -+ * device at the indicated LUN. -+ * User action: -+ * Gather Linux debug data and report this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Oversize data package, unit 0x%016Lx on port 0x%016Lx closed\n" -+ * Severity: Error -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: LUN -+ * @3: WWPN -+ * Description: -+ * A SCSI request with too much data has been sent to the SCSI device at the -+ * indicated LUN. The FCP adapter cannot handle data packets of this size and -+ * the SCSI device driver closed down the SCSI device. -+ * User action: -+ * Gather Linux debug data and report this problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: Opening WKA port 0x%x failed\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: destination ID of the WKA port -+ * Description: -+ * The FCP adapter rejected a request to open the specified -+ * well-known address (WKA) port. No retry is possible. -+ * User action: -+ * Verify the setup and check if the maximum number of remote ports -+ * used through this adapter is below the maximum allowed. If the -+ * problem persists, gather Linux debug data, collect the FCP adapter -+ * hardware logs, and report the problem to your support organization. -+ */ -+ -+/*? -+ * Text: "%s: The name server reported %d words residual data\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: number of words in residual data -+ * Description: -+ * The fibre channel name server sent too much information about remote ports. -+ * The zfcp device driver did not receive sufficient information to attach all -+ * available remote ports in the SAN. -+ * User action: -+ * Verify that you are running the latest firmware level on the FCP -+ * adapter. Check your SAN setup and consider reducing the number of ports -+ * visible to the FCP adapter by using more restrictive zoning in the SAN. -+ */ -+ -+/*? -+ * Text: "%s: A port opened with WWPN 0x%016Lx returned data that identifies it as WWPN 0x%016Lx\n" -+ * Severity: Warning -+ * Parameter: -+ * @1: bus ID of the zfcp device -+ * @2: expected WWPN -+ * @3: reported WWPN -+ * Description: -+ * A remote port was opened successfully, but it reported an -+ * unexpected WWPN in the returned port login (PLOGI) data. This -+ * condition might have been caused by a change applied to the SAN -+ * configuration while the port was being opened. -+ * User action: -+ * If this condition is only temporary and access to the remote port -+ * is possible, no action is required. If the condition persists, -+ * identify the storage system with the specified WWPN and contact the -+ * support organization of the storage system. -+ */ ---- a/Makefile -+++ b/Makefile -@@ -65,6 +65,20 @@ ifndef KBUILD_CHECKSRC - KBUILD_CHECKSRC = 0 - endif - -+# Call message checker as part of the C compilation -+# -+# Use 'make D=1' to enable checking -+# Use 'make D=2' to create the message catalog -+ -+ifdef D -+ ifeq ("$(origin D)", "command line") -+ KBUILD_KMSG_CHECK = $(D) -+ endif -+endif -+ifndef KBUILD_KMSG_CHECK -+ KBUILD_KMSG_CHECK = 0 -+endif -+ - # Use make M=dir to specify directory of external module to build - # Old syntax make ... SUBDIRS=$PWD is still supported - # Setting the environment variable KBUILD_EXTMOD take precedence -@@ -331,6 +345,7 @@ CHECK = sparse - - CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ - -Wbitwise -Wno-return-void $(CF) -+KMSG_CHECK = $(srctree)/scripts/kmsg-doc - CFLAGS_MODULE = - AFLAGS_MODULE = - LDFLAGS_MODULE = -@@ -379,6 +394,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAG - export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE - export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE - export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL -+export KBUILD_KMSG_CHECK KMSG_CHECK - - # When compiling out-of-tree modules, put MODVERDIR in the module - # tree rather than in the kernel tree. The kernel tree might ---- a/arch/s390/Kconfig -+++ b/arch/s390/Kconfig -@@ -581,6 +581,14 @@ bool "s390 guest support for KVM (EXPERI - virtio transport. If KVM is detected, the virtio console will be - the default console. - -+config KMSG_IDS -+ bool "Kernel message numbers" -+ default y -+ help -+ Select this option if you want to include a message number to the -+ prefix for kernel messages issued by the s390 architecture and -+ driver code. See "Documentation/s390/kmsg.txt" for more details. -+ - config SECCOMP - def_bool y - prompt "Enable seccomp to safely compute untrusted bytecode" ---- a/include/linux/device.h -+++ b/include/linux/device.h -@@ -643,6 +643,38 @@ extern const char *dev_driver_string(con - - #ifdef CONFIG_PRINTK - -+#if defined(KMSG_COMPONENT) && (defined(CONFIG_KMSG_IDS) || defined(__KMSG_CHECKER)) -+/* dev_printk_hash for message documentation */ -+#if defined(__KMSG_CHECKER) && defined(KMSG_COMPONENT) -+ -+/* generate magic string for scripts/kmsg-doc to parse */ -+#define dev_printk_hash(level, dev, format, arg...) \ -+ __KMSG_DEV(level _FMT_ format _ARGS_ dev, ## arg _END_) -+ -+#elif defined(CONFIG_KMSG_IDS) && defined(KMSG_COMPONENT) -+ -+int printk_dev_hash(const char *, const char *, const char *, ...); -+#define dev_printk_hash(level, dev, format, arg...) \ -+ printk_dev_hash(level "%s.%06x: ", dev_driver_string(dev), \ -+ "%s: " format, dev_name(dev), ## arg) -+ -+#endif -+ -+#define dev_emerg(dev, format, arg...) \ -+ dev_printk_hash(KERN_EMERG , dev , format , ## arg) -+#define dev_alert(dev, format, arg...) \ -+ dev_printk_hash(KERN_ALERT , dev , format , ## arg) -+#define dev_crit(dev, format, arg...) \ -+ dev_printk_hash(KERN_CRIT , dev , format , ## arg) -+#define dev_err(dev, format, arg...) \ -+ dev_printk_hash(KERN_ERR , dev , format , ## arg) -+#define dev_warn(dev, format, arg...) \ -+ dev_printk_hash(KERN_WARNING , dev , format , ## arg) -+#define dev_notice(dev, format, arg...) \ -+ dev_printk_hash(KERN_NOTICE , dev , format , ## arg) -+#define _dev_info(dev, format, arg...) \ -+ dev_printk_hash(KERN_INFO , dev , format , ## arg) -+#else - extern int dev_printk(const char *level, const struct device *dev, - const char *fmt, ...) - __attribute__ ((format (printf, 3, 4))); -@@ -660,7 +692,7 @@ extern int dev_notice(const struct devic - __attribute__ ((format (printf, 2, 3))); - extern int _dev_info(const struct device *dev, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -- -+#endif - #else - - static inline int dev_printk(const char *level, const struct device *dev, ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -412,23 +412,42 @@ extern int hex_to_bin(char ch); - #define pr_fmt(fmt) fmt - #endif - -+#if defined(__KMSG_CHECKER) && defined(KMSG_COMPONENT) -+ -+/* generate magic string for scripts/kmsg-doc to parse */ -+#define pr_printk_hash(level, format, ...) \ -+ __KMSG_PRINT(level _FMT_ format _ARGS_ ##__VA_ARGS__ _END_) -+ -+#elif defined(CONFIG_KMSG_IDS) && defined(KMSG_COMPONENT) -+ -+int printk_hash(const char *, const char *, ...); -+#define pr_printk_hash(level, format, ...) \ -+ printk_hash(level KMSG_COMPONENT ".%06x" ": ", format, ##__VA_ARGS__) -+ -+#else /* !defined(CONFIG_KMSG_IDS) */ -+ -+#define pr_printk_hash(level, format, ...) \ -+ printk(level pr_fmt(format), ##__VA_ARGS__) -+ -+#endif -+ - #define pr_emerg(fmt, ...) \ -- printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_EMERG, fmt, ##__VA_ARGS__) - #define pr_alert(fmt, ...) \ -- printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_ALERT, fmt, ##__VA_ARGS__) - #define pr_crit(fmt, ...) \ -- printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_CRIT, fmt, ##__VA_ARGS__) - #define pr_err(fmt, ...) \ -- printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_ERR, fmt, ##__VA_ARGS__) - #define pr_warning(fmt, ...) \ -- printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_WARNING, fmt, ##__VA_ARGS__) - #define pr_warn pr_warning - #define pr_notice(fmt, ...) \ -- printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_NOTICE, fmt, ##__VA_ARGS__) - #define pr_info(fmt, ...) \ -- printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -+ pr_printk_hash(KERN_INFO, fmt, ##__VA_ARGS__) - #define pr_cont(fmt, ...) \ -- printk(KERN_CONT fmt, ##__VA_ARGS__) -+ pr_printk_hash(KERN_CONT, fmt, ##__VA_ARGS__) - - /* pr_devel() should produce zero code unless DEBUG is defined */ - #ifdef DEBUG ---- a/kernel/printk.c -+++ b/kernel/printk.c -@@ -39,6 +39,8 @@ - #include - #include - #include -+#include -+#include - - #include - -@@ -1573,3 +1575,46 @@ void kmsg_dump(enum kmsg_dump_reason rea - rcu_read_unlock(); - } - #endif -+ -+#if defined CONFIG_PRINTK && defined CONFIG_KMSG_IDS -+ -+/** -+ * printk_hash - print a kernel message include a hash over the message -+ * @prefix: message prefix including the ".%06x" for the hash -+ * @fmt: format string -+ */ -+asmlinkage int printk_hash(const char *prefix, const char *fmt, ...) -+{ -+ va_list args; -+ int r; -+ -+ r = printk(prefix, jhash(fmt, strlen(fmt), 0) & 0xffffff); -+ va_start(args, fmt); -+ r += vprintk(fmt, args); -+ va_end(args); -+ -+ return r; -+} -+EXPORT_SYMBOL(printk_hash); -+ -+/** -+ * printk_dev_hash - print a kernel message include a hash over the message -+ * @prefix: message prefix including the ".%06x" for the hash -+ * @dev: device this printk is all about -+ * @fmt: format string -+ */ -+asmlinkage int printk_dev_hash(const char *prefix, const char *driver_name, -+ const char *fmt, ...) -+{ -+ va_list args; -+ int r; -+ -+ r = printk(prefix, driver_name, jhash(fmt, strlen(fmt), 0) & 0xffffff); -+ va_start(args, fmt); -+ r += vprintk(fmt, args); -+ va_end(args); -+ -+ return r; -+} -+EXPORT_SYMBOL(printk_dev_hash); -+#endif ---- a/scripts/Makefile.build -+++ b/scripts/Makefile.build -@@ -232,12 +232,14 @@ endef - # Built-in and composite module parts - $(obj)/%.o: $(src)/%.c FORCE - $(call cmd,force_checksrc) -+ $(call cmd,force_check_kmsg) - $(call if_changed_rule,cc_o_c) - - # Single-part modules are special since we need to mark them in $(MODVERDIR) - - $(single-used-m): $(obj)/%.o: $(src)/%.c FORCE - $(call cmd,force_checksrc) -+ $(call cmd,force_check_kmsg) - $(call if_changed_rule,cc_o_c) - @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod) - -@@ -361,6 +363,18 @@ $(multi-used-m) : %.o: $(multi-objs-m) F - - targets += $(multi-used-y) $(multi-used-m) - -+# kmsg check tool -+ifneq ($(KBUILD_KMSG_CHECK),0) -+ ifeq ($(KBUILD_KMSG_CHECK),2) -+ kmsg_cmd := print -+ quiet_cmd_force_check_kmsg = KMSG_PRINT $< -+ $(shell [ -d $(objtree)/man ] || mkdir -p $(objtree)/man) -+ else -+ kmsg_cmd := check -+ quiet_cmd_force_check_kmsg = KMSG_CHECK $< -+ endif -+ cmd_force_check_kmsg = $(KMSG_CHECK) $(kmsg_cmd) $(CC) $(c_flags) $< ; -+endif - - # Descending - # --------------------------------------------------------------------------- ---- /dev/null -+++ b/scripts/kmsg-doc -@@ -0,0 +1,479 @@ -+#!/usr/bin/perl -w -+# -+# kmsg kernel messages check and print tool. -+# -+# To check the source code for missing messages the script is called -+# with check, the name compiler and the compile parameters -+# kmsg-doc check $(CC) $(c_flags) $< -+# To create man pages for the messages the script is called with -+# kmsg-doc print $(CC) $(c_flags) $< -+# -+# Copyright IBM Corp. 2008 -+# Author(s): Martin Schwidefsky -+# Michael Holzheu -+# -+ -+use Cwd; -+use Switch; -+use bigint; -+ -+my $errors = 0; -+my $warnings = 0; -+my $srctree = ""; -+my $objtree = ""; -+my $kmsg_count = 0; -+ -+sub remove_quotes($) -+{ -+ my ($string) = @_; -+ my $inside = 0; -+ my $slash = 0; -+ my $result = ""; -+ -+ foreach my $str (split(/([\\"])/, $string)) { -+ if ($inside && ($str ne "\"" || $slash)) { -+ $result .= $str; -+ } -+ # Check for backslash before quote -+ if ($str eq "\"") { -+ if (!$slash) { -+ $inside = !$inside; -+ } -+ $slash = 0; -+ } elsif ($str eq "\\") { -+ $slash = !$slash; -+ } elsif ($str ne "") { -+ $slash = 0; -+ } -+ } -+ return $result; -+} -+ -+sub string_to_bytes($) -+{ -+ my ($string) = @_; -+ my %is_escape = ('"', 0x22, '\'', 0x27, 'n', 0x0a, 'r', 0x0d, 'b', 0x08, -+ 't', 0x09, 'f', 0x0c, 'a', 0x07, 'v', 0x0b, '?', 0x3f); -+ my (@ar, $slash, $len); -+ -+ # scan string, interpret backslash escapes and write bytes to @ar -+ $len = 0; -+ foreach my $ch (split(//, $string)) { -+ if ($ch eq '\\') { -+ $slash = !$slash; -+ if (!$slash) { -+ $ar[$len] = ord('\\'); -+ $len++; -+ } -+ } elsif ($slash && defined $is_escape{$ch}) { -+ # C99 backslash escapes: \\ \" \' \n \r \b \t \f \a \v \? -+ $ar[$len] = $is_escape{$ch}; -+ $len++; -+ $slash = 0; -+ } elsif ($slash) { -+ # FIXME: C99 backslash escapes \nnn \xhh -+ die("Unknown backslash escape in message $string."); -+ } else { -+ # normal character -+ $ar[$len] = ord($ch); -+ $len++; -+ } -+ } -+ return @ar; -+} -+ -+sub calc_jhash($) -+{ -+ my ($string) = @_; -+ my @ar; -+ my ($a, $b, $c, $i, $length, $len); -+ -+ @ar = string_to_bytes($string); -+ $length = @ar; -+ # add dummy elements to @ar to avoid if then else hell -+ push @ar, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); -+ $a = 0x9e3779b9; -+ $b = 0x9e3779b9; -+ $c = 0; -+ $i = 0; -+ for ($len = $length + 12; $len >= 12; $len -= 12) { -+ if ($len < 24) { -+ # add length for last round -+ $c += $length; -+ } -+ $a += $ar[$i] + ($ar[$i+1]<<8) + ($ar[$i+2]<<16) + ($ar[$i+3]<<24); -+ $b += $ar[$i+4] + ($ar[$i+5]<<8) + ($ar[$i+6]<<16) + ($ar[$i+7]<<24); -+ if ($len >= 24) { -+ $c += $ar[$i+8] + ($ar[$i+9]<<8) + ($ar[$i+10]<<16) + ($ar[$i+11]<<24); -+ } else { -+ $c += ($ar[$i+8]<<8) + ($ar[$i+9]<<16) + ($ar[$i+10]<<24); -+ } -+ $a &= 0xffffffff; $b &= 0xffffffff; $c &= 0xffffffff; -+ $a -= $b; $a -= $c; $a ^= ($c >> 13); $a &= 0xffffffff; -+ $b -= $c; $b -= $a; $b ^= ($a << 8); $b &= 0xffffffff; -+ $c -= $a; $c -= $b; $c ^= ($b >> 13); $c &= 0xffffffff; -+ $a -= $b; $a -= $c; $a ^= ($c >> 12); $a &= 0xffffffff; -+ $b -= $c; $b -= $a; $b ^= ($a << 16); $b &= 0xffffffff; -+ $c -= $a; $c -= $b; $c ^= ($b >> 5); $c &= 0xffffffff; -+ $a -= $b; $a -= $c; $a ^= ($c >> 3); $a &= 0xffffffff; -+ $b -= $c; $b -= $a; $b ^= ($a << 10); $b &= 0xffffffff; -+ $c -= $a; $c -= $b; $c ^= ($b >> 15); $c &= 0xffffffff; -+ $i += 12; -+ } -+ return $c; -+} -+ -+sub add_kmsg_desc($$$$$$) -+{ -+ my ($component, $text, $sev, $argv, $desc, $user) = @_; -+ my ($hash, $tag); -+ -+ $text = remove_quotes($text); -+ $hash = substr(sprintf("%08x", calc_jhash($text)), 2, 6); -+ $tag = $component . "." . $hash; -+ -+ if ($kmsg_desc{$tag}) { -+ if ($text ne $kmsg_desc{$tag}->{'TEXT'}) { -+ warn "Duplicate message with tag $tag\n"; -+ warn " --- $kmsg_desc{$tag}->{'TEXT'}\n"; -+ warn " +++ $text\n"; -+ } else { -+ warn "Duplicate message description for \"$text\"\n"; -+ } -+ $errors++; -+ return; -+ } -+ $kmsg_desc{$tag}->{'TEXT'} = $text; -+ $kmsg_desc{$tag}->{'SEV'} = $sev; -+ $kmsg_desc{$tag}->{'ARGV'} = $argv; -+ $kmsg_desc{$tag}->{'DESC'} = $desc; -+ $kmsg_desc{$tag}->{'USER'} = $user; -+} -+ -+sub add_kmsg_print($$$$) -+{ -+ my ($component, $sev, $text, $argv) = @_; -+ my ($hash, $tag, $count, $parm); -+ -+ $text = remove_quotes($text); -+ $hash = substr(sprintf("%08x", calc_jhash($text)), 2, 6); -+ $tag = $component . "." . $hash; -+ -+ # Pretty print severity -+ $sev =~ s/"<0>"/Emerg/; -+ $sev =~ s/"<1>"/Alert/; -+ $sev =~ s/"<2>"/Critical/; -+ $sev =~ s/"<3>"/Error/; -+ $sev =~ s/"<4>"/Warning/; -+ $sev =~ s/"<5>"/Notice/; -+ $sev =~ s/"<6>"/Informational/; -+ $sev =~ s/"<7>"/Debug/; -+ $kmsg_print{$kmsg_count}->{'TAG'} = $tag; -+ $kmsg_print{$kmsg_count}->{'TEXT'} = $text; -+ $kmsg_print{$kmsg_count}->{'SEV'} = $sev; -+ $kmsg_print{$kmsg_count}->{'ARGV'} = $argv; -+ $kmsg_count += 1; -+} -+ -+sub process_source_file($$) -+{ -+ my ($component, $file) = @_; -+ my $state; -+ my ($text, $sev, $argv, $desc, $user); -+ -+ if (!open(FD, "$file")) { -+ return ""; -+ } -+ -+ $state = 0; -+ while () { -+ chomp; -+ # kmsg message component: #define KMSG_COMPONENT "" -+ if (/^#define\s+KMSG_COMPONENT\s+\"(.*)\"[^\"]*$/o) { -+ $component = $1; -+ } -+ if ($state == 0) { -+ # single line kmsg for undocumented messages, format: -+ # /*? Text: "" */ -+ if (/^\s*\/\*\?\s*Text:\s*(\".*\")\s*\*\/\s*$/o) { -+ add_kmsg_desc($component, $1, "", "", "", ""); -+ } -+ # kmsg message start: '/*?' -+ if (/^\s*\/\*\?\s*$/o) { -+ $state = 1; -+ ($text, $sev, $argv, $desc, $user) = ( "", "", "", "", "" ); -+ } -+ } elsif ($state == 1) { -+ # kmsg message end: ' */' -+ if (/^\s*\*\/\s*/o) { -+ add_kmsg_desc($component, $text, $sev, $argv, $desc, $user); -+ $state = 0; -+ } -+ # kmsg message text: ' * Text: ""' -+ elsif (/^\s*\*\s*Text:\s*(\".*\")\s*$/o) { -+ $text = $1; -+ } -+ # kmsg message severity: ' * Severity: ' -+ elsif (/^\s*\*\s*Severity:\s*(\S*)\s*$/o) { -+ $sev = $1; -+ } -+ # kmsg message parameter: ' * Parameter: ' -+ elsif (/^\s*\*\s*Parameter:\s*(\S*)\s*$/o) { -+ if (!defined($1)) { -+ $argv = ""; -+ } else { -+ $argv = $1; -+ } -+ $state = 2; -+ } -+ # kmsg message description start: ' * Description:' -+ elsif (/^\s*\*\s*Description:\s*(\S*)\s*$/o) { -+ if (!defined($1)) { -+ $desc = ""; -+ } else { -+ $desc = $1; -+ } -+ $state = 3; -+ } -+ # kmsg has unrecognizable lines -+ else { -+ warn "Warning(${file}:$.): Cannot understand $_"; -+ $warnings++; -+ $state = 0; -+ } -+ } elsif ($state == 2) { -+ # kmsg message end: ' */' -+ if (/^\s*\*\//o) { -+ warn "Warning(${file}:$.): Missing description, skipping message"; -+ $warnings++; -+ $state = 0; -+ } -+ # kmsg message description start: ' * Description:' -+ elsif (/^\s*\*\s*Description:\s*$/o) { -+ $desc = $1; -+ $state = 3; -+ } -+ # kmsg message parameter line: ' * ' -+ elsif (/^\s*\*(.*)$/o) { -+ $argv .= "\n" . $1; -+ } else { -+ warn "Warning(${file}:$.): Cannot understand $_"; -+ $warnings++; -+ $state = 0; -+ } -+ } elsif ($state == 3) { -+ # kmsg message end: ' */' -+ if (/^\s*\*\/\s*/o) { -+ add_kmsg_desc($component, $text, $sev, $argv, $desc, $user); -+ $state = 0; -+ } -+ # kmsg message description start: ' * User action:' -+ elsif (/^\s*\*\s*User action:\s*$/o) { -+ $user = $1; -+ $state = 4; -+ } -+ # kmsg message description line: ' * ' -+ elsif (/^\s*\*\s*(.*)$/o) { -+ $desc .= "\n" . $1; -+ } else { -+ warn "Warning(${file}:$.): Cannot understand $_"; -+ $warnings++; -+ $state = 0; -+ } -+ } elsif ($state == 4) { -+ # kmsg message end: ' */' -+ if (/^\s*\*\/\s*/o) { -+ add_kmsg_desc($component, $text, $sev, $argv, $desc, $user); -+ $state = 0; -+ } -+ # kmsg message user action line: ' * ' -+ elsif (/^\s*\*\s*(.*)$/o) { -+ $user .= "\n" . $1; -+ } else { -+ warn "Warning(${file}:$.): Cannot understand $_"; -+ $warnings++; -+ $state = 0; -+ } -+ } -+ } -+ return $component; -+} -+ -+sub process_cpp_file($$$$) -+{ -+ my ($cc, $options, $file, $component) = @_; -+ -+ open(FD, "$cc $gcc_options|") or die ("Preprocessing failed."); -+ -+ while () { -+ chomp; -+ if (/.*__KMSG_PRINT\(\s*(\S*)\s*_FMT_(.*)_ARGS_\s*(.*)?_END_\s*\)/o) { -+ if ($component ne "") { -+ add_kmsg_print($component, $1, $2, $3); -+ } else { -+ warn "Error(${file}:$.): kmsg without component\n"; -+ $errors++; -+ } -+ } elsif (/.*__KMSG_DEV\(\s*(\S*)\s*_FMT_(.*)_ARGS_\s*(.*)?_END_\s*\)/o) { -+ if ($component ne "") { -+ add_kmsg_print($component, $1, "\"%s: \"" . $2, $3); -+ } else { -+ warn "Error(${file}:$.): kmsg without component\n"; -+ $errors++; -+ } -+ } -+ } -+} -+ -+sub check_messages($) -+{ -+ my $component = "@_"; -+ my $failed = 0; -+ -+ for ($i = 0; $i < $kmsg_count; $i++) { -+ $tag = $kmsg_print{$i}->{'TAG'}; -+ if (!defined($kmsg_desc{$tag})) { -+ add_kmsg_desc($component, -+ "\"" . $kmsg_print{$i}->{'TEXT'} . "\"", -+ $kmsg_print{$i}->{'SEV'}, -+ $kmsg_print{$i}->{'ARGV'}, -+ "Please insert description here", -+ "What is the user supposed to do"); -+ $kmsg_desc{$tag}->{'CHECK'} = 1; -+ $failed = 1; -+ warn "$component: Missing description for: ". -+ $kmsg_print{$i}->{'TEXT'}."\n"; -+ $errors++; -+ next; -+ } -+ if ($kmsg_desc{$tag}->{'SEV'} ne "" && -+ $kmsg_desc{$tag}->{'SEV'} ne $kmsg_print{$i}->{'SEV'}) { -+ warn "Message severity mismatch for \"$kmsg_print{$i}->{'TEXT'}\"\n"; -+ warn " --- $kmsg_desc{$tag}->{'SEV'}\n"; -+ warn " +++ $kmsg_print{$i}->{'SEV'}\n"; -+ } -+ } -+ return $failed; -+} -+ -+sub print_templates() -+{ -+ print "Templates for missing messages:\n"; -+ foreach $tag ( sort { $kmsg_desc{$a} <=> $kmsg_desc{$b} } keys %kmsg_desc ) { -+ if (!defined($kmsg_desc{$tag}->{'CHECK'})) { -+ next; -+ } -+ print "/*?\n"; -+ print " * Text: \"$kmsg_desc{$tag}->{'TEXT'}\"\n"; -+ print " * Severity: $kmsg_desc{$tag}->{'SEV'}\n"; -+ $argv = $kmsg_desc{$tag}->{'ARGV'}; -+ if ($argv ne "") { -+ print " * Parameter:\n"; -+ @parms = split(/\s*,\s*/,$kmsg_desc{$tag}->{'ARGV'}); -+ $count = 0; -+ foreach $parm (@parms) { -+ $count += 1; -+ if (!($parm eq "")) { -+ print " * \@$count: $parm\n"; -+ } -+ } -+ } -+ print " * Description:\n"; -+ print " * $kmsg_desc{$tag}->{'DESC'}\n"; -+ print " * User action:\n"; -+ print " * $kmsg_desc{$tag}->{'USER'}\n"; -+ print " */\n\n"; -+ } -+} -+ -+sub write_man_pages() -+{ -+ my ($i, $file); -+ -+ for ($i = 0; $i < $kmsg_count; $i++) { -+ $tag = $kmsg_print{$i}->{'TAG'}; -+ if (!defined($kmsg_desc{$tag}) || -+ defined($kmsg_desc{$tag}->{'CHECK'}) || -+ $kmsg_desc{$tag}->{'DESC'} eq "") { -+ next; -+ } -+ $file = $objtree . "man/" . $tag . ".9"; -+ if (!open(WR, ">$file")) { -+ warn "Error: Cannot open file $file\n"; -+ $errors++; -+ return; -+ } -+ print WR ".TH \"$tag\" 9 \"Linux Messages\" LINUX\n"; -+ print WR ".SH Message\n"; -+ print WR $tag . ": " . $kmsg_desc{$tag}->{'TEXT'} . "\n"; -+ print WR ".SH Severity\n"; -+ print WR "$kmsg_desc{$tag}->{'SEV'}\n"; -+ $argv = $kmsg_desc{$tag}->{'ARGV'}; -+ if ($argv ne "") { -+ print WR ".SH Parameters\n"; -+ @parms = split(/\s*\n\s*/,$kmsg_desc{$tag}->{'ARGV'}); -+ foreach $parm (@parms) { -+ $parm =~ s/^\s*(.*)\s*$/$1/; -+ if (!($parm eq "")) { -+ print WR "$parm\n\n"; -+ } -+ } -+ } -+ print WR ".SH Description"; -+ print WR "$kmsg_desc{$tag}->{'DESC'}\n"; -+ $user = $kmsg_desc{$tag}->{'USER'}; -+ if ($user ne "") { -+ print WR ".SH User action"; -+ print WR "$user\n"; -+ } -+ } -+} -+ -+if (defined($ENV{'srctree'})) { -+ $srctree = "$ENV{'srctree'}" . "/"; -+} else { -+ $srctree = getcwd; -+} -+ -+if (defined($ENV{'objtree'})) { -+ $objtree = "$ENV{'objtree'}" . "/"; -+} else { -+ $objtree = getcwd; -+} -+ -+if (defined($ENV{'SRCARCH'})) { -+ $srcarch = "$ENV{'SRCARCH'}" . "/"; -+} else { -+ print "kmsg-doc called without a valid \$SRCARCH\n"; -+ exit 1; -+} -+ -+$option = shift; -+ -+$cc = shift; -+$gcc_options = "-E -D __KMSG_CHECKER "; -+foreach $tmp (@ARGV) { -+ $tmp =~ s/\(/\\\(/; -+ $tmp =~ s/\)/\\\)/; -+ $gcc_options .= " $tmp"; -+ $filename = $tmp; -+} -+ -+$component = process_source_file("", $filename); -+if ($component ne "") { -+ process_source_file($component, $srctree . "Documentation/kmsg/" . -+ $srcarch . $component); -+ process_source_file($component, $srctree . "Documentation/kmsg/" . -+ $component); -+} -+ -+process_cpp_file($cc, $gcc_options, $filename, $component); -+if ($option eq "check") { -+ if (check_messages($component)) { -+ print_templates(); -+ } -+} elsif ($option eq "print") { -+ write_man_pages(); -+} -+ -+exit($errors); diff --git a/patches.arch/x86-apic-force-bigsmp-apic-on-IBM-EXA3-4.patch b/patches.arch/x86-apic-force-bigsmp-apic-on-IBM-EXA3-4.patch deleted file mode 100644 index a3d3570..0000000 --- a/patches.arch/x86-apic-force-bigsmp-apic-on-IBM-EXA3-4.patch +++ /dev/null @@ -1,87 +0,0 @@ -From: IBM -Subject: Use apic=bigsmp on specific xseries machines -References: bnc#440497 -Patch-Mainline: not yet - -Signed-off-by: Thomas Renninger - - - arch/x86/kernel/apic/bigsmp_32.c | 30 +++++++++++++++++++++++++++--- - arch/x86/kernel/apic/probe_32.c | 4 ++-- - 2 files changed, 29 insertions(+), 5 deletions(-) - ---- a/arch/x86/kernel/apic/bigsmp_32.c -+++ b/arch/x86/kernel/apic/bigsmp_32.c -@@ -156,7 +156,7 @@ static void bigsmp_send_IPI_all(int vect - - static int dmi_bigsmp; /* can be set by dmi scanners */ - --static int hp_ht_bigsmp(const struct dmi_system_id *d) -+static int force_bigsmp_apic(const struct dmi_system_id *d) - { - printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); - dmi_bigsmp = 1; -@@ -166,17 +166,41 @@ static int hp_ht_bigsmp(const struct dmi - - - static const struct dmi_system_id bigsmp_dmi_table[] = { -- { hp_ht_bigsmp, "HP ProLiant DL760 G2", -+ { force_bigsmp_apic, "HP ProLiant DL760 G2", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P44-"), - } - }, - -- { hp_ht_bigsmp, "HP ProLiant DL740", -+ { force_bigsmp_apic, "HP ProLiant DL740", - { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P47-"), - } - }, -+ -+ { force_bigsmp_apic, "IBM x260 / x366 / x460", -+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"), -+ DMI_MATCH(DMI_BIOS_VERSION, "-[ZT"), -+ } -+ }, -+ -+ { force_bigsmp_apic, "IBM x3800 / x3850 / x3950", -+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"), -+ DMI_MATCH(DMI_BIOS_VERSION, "-[ZU"), -+ } -+ }, -+ -+ { force_bigsmp_apic, "IBM x3800 / x3850 / x3950", -+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"), -+ DMI_MATCH(DMI_BIOS_VERSION, "-[ZS"), -+ } -+ }, -+ -+ { force_bigsmp_apic, "IBM x3850 M2 / x3950 M2", -+ { DMI_MATCH(DMI_BIOS_VENDOR, "IBM"), -+ DMI_MATCH(DMI_BIOS_VERSION, "-[A3"), -+ } -+ }, - { } /* NULL entry stops DMI scanning */ - }; - ---- a/arch/x86/kernel/apic/probe_32.c -+++ b/arch/x86/kernel/apic/probe_32.c -@@ -267,7 +267,7 @@ generic_mps_oem_check(struct mpc_table * - if (!apic_probe[i]->mps_oem_check(mpc, oem, productid)) - continue; - -- if (!cmdline_apic) { -+ if (!cmdline_apic && apic == &apic_default) { - apic = apic_probe[i]; - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); -@@ -287,7 +287,7 @@ int __init default_acpi_madt_oem_check(c - if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) - continue; - -- if (!cmdline_apic) { -+ if (!cmdline_apic && apic == &apic_default) { - apic = apic_probe[i]; - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - apic->name); diff --git a/patches.arch/x86-hpet-pre-read b/patches.arch/x86-hpet-pre-read deleted file mode 100644 index 16b70bc..0000000 --- a/patches.arch/x86-hpet-pre-read +++ /dev/null @@ -1,26 +0,0 @@ -From: Takashi Iwai -Subject: x86: workaround for mccreary HPET read problem -Patch-mainline: not yet -References: bnc#433746 - -On mccreacy platform, the read of HPET CMP register seems not updated -immediately after the write and returns the previous value instead. -A workaround is to read the register twice. - -Signed-off-by: Takashi Iwai - ---- ---- - arch/x86/kernel/hpet.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/x86/kernel/hpet.c -+++ b/arch/x86/kernel/hpet.c -@@ -386,6 +386,7 @@ static int hpet_next_event(unsigned long - cnt += (u32) delta; - hpet_writel(cnt, HPET_Tn_CMP(timer)); - -+ hpet_readl(HPET_Tn_CMP(timer)); /* pre-read for bnc#433746 */ - /* - * We need to read back the CMP register on certain HPET - * implementations (ATI chipsets) which seem to delay the diff --git a/patches.arch/x86-mcp51-no-dac b/patches.arch/x86-mcp51-no-dac deleted file mode 100644 index 4a85e19..0000000 --- a/patches.arch/x86-mcp51-no-dac +++ /dev/null @@ -1,38 +0,0 @@ -From: Tejun Heo -Subject: x86: disallow DAC for MCP51 PCI bridge -References: bnc#463829 -Patch-mainline: not yet - -MCP51 corrupts DAC transfers. Disallow it. Reported by pgnet on -bnc#463829. - - https://bugzilla.novell.com/show_bug.cgi?id=463829 - -Signed-off-by: Tejun Heo -Reported-by: pgnet -Signed-off-by: Tejun Heo ---- - arch/x86/kernel/pci-dma.c | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - ---- a/arch/x86/kernel/pci-dma.c -+++ b/arch/x86/kernel/pci-dma.c -@@ -322,4 +322,18 @@ static __devinit void via_no_dac(struct - } - } - DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); -+ -+/* -+ * MCP51 PCI bridge corrupts data for DAC. Disable it. Reported in -+ * bnc#463829. -+ */ -+static __devinit void mcp51_no_dac(struct pci_dev *dev) -+{ -+ if (forbid_dac == 0) { -+ printk(KERN_INFO -+ "PCI: MCP51 PCI bridge detected. Disabling DAC.\n"); -+ forbid_dac = 1; -+ } -+} -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x026f, mcp51_no_dac); - #endif diff --git a/patches.arch/x86_64-hpet-64bit-timer.patch b/patches.arch/x86_64-hpet-64bit-timer.patch deleted file mode 100644 index d7b889c..0000000 --- a/patches.arch/x86_64-hpet-64bit-timer.patch +++ /dev/null @@ -1,223 +0,0 @@ -From: Jiri Bohac -Subject: allow 64-bit mode for HPET Timer0 -References: bnc#456700 - -The kernel uses the HPET timers in 32-bit mode for clock-events. -While 32 bits, with a wrap-around time of >4 minutes, is probably -good enough for the clock-event purposes, on some chipsets this -has a negative side-effect on the HPET main counter. - -Unlike the original HPET specification 1.0 from 2004, which does not -mention any side-effects of setting TN_32MODE_CNF on the -individual timers, the ICH9 documentation, for example, says: - - NOTE: When this bit is set to ‘1’, the hardware counter will - do a 32-bit operation on comparator match and rollovers, thus - the upper 32-bit of the Timer 0 Comparator Value register is - ignored. The upper 32-bit of the main counter is not involved - in any rollover from lower 32-bit of the main counter and - becomes all zeros. - -(see http://www.intel.com/assets/pdf/datasheet/316972.pdf, page -819, section 21.1.5, Bit 8). I've seen this behaviour also on -ICH8. I have no idea what other chipsets are affected. But I have -seen AMD chipsets that Do The Right Thing. - -This means, that when the kernel configures the Timer 0 to 32-bit -mode, on these chipsets it also cripples the 64-bit main counter -to 32 bits. - -The HPET may be mmapped in userspace and the main counter -accessed directly by applications, expecting a 64-bit main -counter. - -This patch allows the Timer0 to be configured in 64-bit mode -on x86_64 when a hpet64 command-line option is specified. - -Updated-by: Jeff Mahoney -Signed-off-by: Jiri Bohac - ---- - Documentation/kernel-parameters.txt | 2 - arch/x86/kernel/hpet.c | 88 ++++++++++++++++++++++++++++++++---- - 2 files changed, 81 insertions(+), 9 deletions(-) - ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -497,6 +497,8 @@ and is between 256 and 4096 characters. - Range: 0 - 8192 - Default: 64 - -+ hpet64 [X86-64,HPET] enable 64-bit mode of the HPET timer (bnc#456700) -+ - com20020= [HW,NET] ARCnet - COM20020 chipset - Format: - [,[,[,[,[,]]]]] ---- a/arch/x86/kernel/hpet.c -+++ b/arch/x86/kernel/hpet.c -@@ -37,6 +37,7 @@ unsigned long hpet_address; - static unsigned long hpet_num_timers; - #endif - static void __iomem *hpet_virt_address; -+static int hpet_legacy_use_64_bits; - - struct hpet_dev { - struct clock_event_device evt; -@@ -59,6 +60,33 @@ static inline void hpet_writel(unsigned - - #ifdef CONFIG_X86_64 - #include -+static inline unsigned long hpet_read_value(unsigned long a) -+{ -+ if (hpet_legacy_use_64_bits) -+ return readq(hpet_virt_address + a); -+ else -+ return readl(hpet_virt_address + a); -+} -+ -+static void hpet_write_value(unsigned long d, unsigned long a) -+{ -+ if (hpet_legacy_use_64_bits) -+ writeq(d, hpet_virt_address + a); -+ else -+ writel(d, hpet_virt_address + a); -+} -+ -+#else -+ -+static inline unsigned long hpet_read_value(unsigned long a) -+{ -+ return readl(hpet_virt_address + a); -+} -+ -+static void hpet_write_value(unsigned long d, unsigned long a) -+{ -+ writel(d, hpet_virt_address + a); -+} - #endif - - static inline void hpet_set_mapping(void) -@@ -103,6 +131,17 @@ static int __init disable_hpet(char *str - } - __setup("nohpet", disable_hpet); - -+#ifdef CONFIG_X86_64 -+static int hpet64 = 0; -+static int __init hpet64_setup(char *str) -+{ -+ hpet64 = 1; -+ return 1; -+} -+__setup("hpet64", hpet64_setup); -+#endif -+ -+ - static inline int is_hpet_capable(void) - { - return !boot_hpet_disable && hpet_address; -@@ -212,6 +251,7 @@ static void hpet_reserve_platform_timers - * Common hpet info - */ - static unsigned long hpet_period; -+static int hpet_legacy_use_64_bits; /* configure T0 in 64-bit mode? */ - - static void hpet_legacy_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt); -@@ -278,10 +318,38 @@ static void hpet_enable_legacy_int(void) - hpet_legacy_int_enabled = 1; - } - -+static int timer0_use_64_bits(void) -+{ -+#ifndef CONFIG_X86_64 -+ /* using the HPET in 64-bit mode without atomic 64-bit -+ * accesses is too inefficient -+ */ -+ return 0; -+#else -+ -+ if (unlikely(hpet64)) { -+ u32 id, t0_cfg; -+ id = hpet_readl(HPET_ID); -+ t0_cfg = hpet_readl(HPET_Tn_CFG(0)); -+ -+ if ((id & HPET_ID_64BIT) && (t0_cfg & HPET_TN_64BIT_CAP)) { -+ printk(KERN_DEBUG "hpet timer0 configured in 64-bit mode\n"); -+ return 1; -+ } -+ else { -+ printk(KERN_DEBUG "hpet timer0 does not support 64-bit mode\n"); -+ return 0; -+ } -+ } -+ else return 0; -+#endif -+} -+ - static void hpet_legacy_clockevent_register(void) - { - /* Start HPET legacy interrupts */ - hpet_enable_legacy_int(); -+ hpet_legacy_use_64_bits = timer0_use_64_bits(); - - /* - * The mult factor is defined as (include/linux/clockchips.h) -@@ -328,9 +396,10 @@ static void hpet_set_mode(enum clock_eve - /* Make sure we use edge triggered interrupts */ - cfg &= ~HPET_TN_LEVEL; - cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | -- HPET_TN_SETVAL | HPET_TN_32BIT; -+ HPET_TN_SETVAL | -+ (hpet_legacy_use_64_bits ? 0 : HPET_TN_32BIT); - hpet_writel(cfg, HPET_Tn_CFG(timer)); -- hpet_writel(cmp, HPET_Tn_CMP(timer)); -+ hpet_write_value(cmp, HPET_Tn_CMP(timer)); - udelay(1); - /* - * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL -@@ -339,7 +408,7 @@ static void hpet_set_mode(enum clock_eve - * (See AMD-8111 HyperTransport I/O Hub Data Sheet, - * Publication # 24674) - */ -- hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); -+ hpet_write_value((unsigned long) delta, HPET_Tn_CMP(timer)); - hpet_start_counter(); - hpet_print_config(); - break; -@@ -347,7 +416,8 @@ static void hpet_set_mode(enum clock_eve - case CLOCK_EVT_MODE_ONESHOT: - cfg = hpet_readl(HPET_Tn_CFG(timer)); - cfg &= ~HPET_TN_PERIODIC; -- cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; -+ cfg |= HPET_TN_ENABLE | -+ (hpet_legacy_use_64_bits ? 0 : HPET_TN_32BIT); - hpet_writel(cfg, HPET_Tn_CFG(timer)); - break; - -@@ -376,11 +446,11 @@ static void hpet_set_mode(enum clock_eve - static int hpet_next_event(unsigned long delta, - struct clock_event_device *evt, int timer) - { -- u32 cnt; -+ unsigned long cnt; - -- cnt = hpet_readl(HPET_COUNTER); -+ cnt = hpet_read_value(HPET_COUNTER); - cnt += (u32) delta; -- hpet_writel(cnt, HPET_Tn_CMP(timer)); -+ hpet_write_value(cnt, HPET_Tn_CMP(timer)); - - hpet_readl(HPET_Tn_CMP(timer)); /* pre-read for bnc#433746 */ - /* -@@ -388,9 +458,9 @@ static int hpet_next_event(unsigned long - * what we wrote hit the chip before we compare it to the - * counter. - */ -- WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); -+ WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != (u32)cnt); - -- return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; -+ return (s32)((u32)hpet_readl(HPET_COUNTER) - (u32)cnt) >= 0 ? -ETIME : 0; - } - - static void hpet_legacy_set_mode(enum clock_event_mode mode, diff --git a/patches.arch/x86_64-unwind-annotations b/patches.arch/x86_64-unwind-annotations deleted file mode 100644 index 3f4fa68..0000000 --- a/patches.arch/x86_64-unwind-annotations +++ /dev/null @@ -1,439 +0,0 @@ -From: jbeulich@novell.com -Subject: fix unwind annotations -Patch-mainline: tbd -References: bnc#472783, bnc#588458 - ---- - arch/x86/kernel/entry_64.S | 131 +++++++++++++++++++++++---------------------- - arch/x86/kernel/head_64.S | 13 ++++ - lib/rwsem_64.S | 56 ++++++++++++++--------- - 3 files changed, 114 insertions(+), 84 deletions(-) - ---- a/arch/x86/kernel/entry_64.S -+++ b/arch/x86/kernel/entry_64.S -@@ -234,21 +234,21 @@ ENDPROC(native_usergs_sysret64) - /* - * initial frame state for interrupts (and exceptions without error code) - */ -- .macro EMPTY_FRAME start=1 offset=0 -- .if \start -+ .macro EMPTY_FRAME offset=0 - CFI_STARTPROC simple - CFI_SIGNAL_FRAME -- CFI_DEF_CFA rsp,8+\offset -- .else -- CFI_DEF_CFA_OFFSET 8+\offset -- .endif -+ CFI_DEF_CFA rsp,\offset - .endm - - /* - * initial frame state for interrupts (and exceptions without error code) - */ - .macro INTR_FRAME start=1 offset=0 -- EMPTY_FRAME \start, SS+8+\offset-RIP -+ .if \start -+ EMPTY_FRAME SS+8+\offset-RIP -+ .else -+ CFI_DEF_CFA_OFFSET SS+8+\offset-RIP -+ .endif - /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ - CFI_REL_OFFSET rsp, RSP+\offset-RIP - /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ -@@ -262,14 +262,15 @@ ENDPROC(native_usergs_sysret64) - */ - .macro XCPT_FRAME start=1 offset=0 - INTR_FRAME \start, RIP+\offset-ORIG_RAX -- /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ - .endm - - /* - * frame that enables calling into C. - */ - .macro PARTIAL_FRAME start=1 offset=0 -+ .if \start >= 0 - XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET -+ .endif - CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET - CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET - CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET -@@ -285,7 +286,9 @@ ENDPROC(native_usergs_sysret64) - * frame that enables passing a complete pt_regs to a C function. - */ - .macro DEFAULT_FRAME start=1 offset=0 -+ .if \start >= -1 - PARTIAL_FRAME \start, R11+\offset-R15 -+ .endif - CFI_REL_OFFSET rbx, RBX+\offset - CFI_REL_OFFSET rbp, RBP+\offset - CFI_REL_OFFSET r12, R12+\offset -@@ -297,25 +300,27 @@ ENDPROC(native_usergs_sysret64) - /* save partial stack frame */ - .pushsection .kprobes.text, "ax" - ENTRY(save_args) -- XCPT_FRAME -+ XCPT_FRAME offset=ORIG_RAX-RBP+8 - cld - /* - * start from rbp in pt_regs and jump over - * return address. - */ - movq_cfi rdi, RDI+8-RBP -- movq_cfi rsi, RSI+8-RBP -- movq_cfi rdx, RDX+8-RBP -- movq_cfi rcx, RCX+8-RBP -+ movq %rsi, RSI+8-RBP(%rsp) -+ movq %rdx, RDX+8-RBP(%rsp) -+ movq %rcx, RCX+8-RBP(%rsp) - movq_cfi rax, RAX+8-RBP -- movq_cfi r8, R8+8-RBP -- movq_cfi r9, R9+8-RBP -- movq_cfi r10, R10+8-RBP -- movq_cfi r11, R11+8-RBP -+ movq %r8, R8+8-RBP(%rsp) -+ movq %r9, R9+8-RBP(%rsp) -+ movq %r10, R10+8-RBP(%rsp) -+ movq %r11, R11+8-RBP(%rsp) - - leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ - movq_cfi rbp, 8 /* push %rbp */ - leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ -+ CFI_DEF_CFA_REGISTER rbp -+ CFI_ADJUST_CFA_OFFSET -8 - testl $3, CS(%rdi) - je 1f - SWAPGS -@@ -327,11 +332,10 @@ ENTRY(save_args) - */ - 1: incl PER_CPU_VAR(irq_count) - jne 2f -- popq_cfi %rax /* move return address... */ -+ popq %rax /* move return address... */ - mov PER_CPU_VAR(irq_stack_ptr),%rsp -- EMPTY_FRAME 0 -- pushq_cfi %rbp /* backlink for unwinder */ -- pushq_cfi %rax /* ... to the new stack */ -+ pushq %rbp /* backlink for unwinder */ -+ pushq %rax /* ... to the new stack */ - /* - * We entered an interrupt context - irqs are off: - */ -@@ -342,14 +346,14 @@ END(save_args) - .popsection - - ENTRY(save_rest) -- PARTIAL_FRAME 1 REST_SKIP+8 -+ CFI_STARTPROC - movq 5*8+16(%rsp), %r11 /* save return address */ -- movq_cfi rbx, RBX+16 -- movq_cfi rbp, RBP+16 -- movq_cfi r12, R12+16 -- movq_cfi r13, R13+16 -- movq_cfi r14, R14+16 -- movq_cfi r15, R15+16 -+ movq %rbx, RBX+16(%rsp) -+ movq %rbp, RBP+16(%rsp) -+ movq %r12, R12+16(%rsp) -+ movq %r13, R13+16(%rsp) -+ movq %r14, R14+16(%rsp) -+ movq %r15, R15+16(%rsp) - movq %r11, 8(%rsp) /* return address */ - FIXUP_TOP_OF_STACK %r11, 16 - ret -@@ -359,23 +363,23 @@ END(save_rest) - /* save complete stack frame */ - .pushsection .kprobes.text, "ax" - ENTRY(save_paranoid) -- XCPT_FRAME 1 RDI+8 -+ XCPT_FRAME offset=ORIG_RAX-R15+8 - cld -- movq_cfi rdi, RDI+8 -- movq_cfi rsi, RSI+8 -+ movq %rdi, RDI+8(%rsp) -+ movq %rsi, RSI+8(%rsp) - movq_cfi rdx, RDX+8 - movq_cfi rcx, RCX+8 - movq_cfi rax, RAX+8 -- movq_cfi r8, R8+8 -- movq_cfi r9, R9+8 -- movq_cfi r10, R10+8 -- movq_cfi r11, R11+8 -+ movq %r8, R8+8(%rsp) -+ movq %r9, R9+8(%rsp) -+ movq %r10, R10+8(%rsp) -+ movq %r11, R11+8(%rsp) - movq_cfi rbx, RBX+8 -- movq_cfi rbp, RBP+8 -- movq_cfi r12, R12+8 -- movq_cfi r13, R13+8 -- movq_cfi r14, R14+8 -- movq_cfi r15, R15+8 -+ movq %rbp, RBP+8(%rsp) -+ movq %r12, R12+8(%rsp) -+ movq %r13, R13+8(%rsp) -+ movq %r14, R14+8(%rsp) -+ movq %r15, R15+8(%rsp) - movl $1,%ebx - movl $MSR_GS_BASE,%ecx - rdmsr -@@ -677,7 +681,7 @@ ENTRY(\label) - subq $REST_SKIP, %rsp - CFI_ADJUST_CFA_OFFSET REST_SKIP - call save_rest -- DEFAULT_FRAME 0 8 /* offset 8: return address */ -+ DEFAULT_FRAME -2 8 /* offset 8: return address */ - leaq 8(%rsp), \arg /* pt_regs pointer */ - call \func - jmp ptregscall_common -@@ -794,7 +798,9 @@ END(interrupt) - subq $ORIG_RAX-RBP, %rsp - CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP - call save_args -- PARTIAL_FRAME 0 -+ PARTIAL_FRAME -1 ARGOFFSET-RBP -+ CFI_REL_OFFSET rbp, 0 -+ CFI_DEF_CFA_REGISTER rbp - call \func - .endm - -@@ -813,7 +819,6 @@ ret_from_intr: - TRACE_IRQS_OFF - decl PER_CPU_VAR(irq_count) - leaveq -- - CFI_RESTORE rbp - CFI_DEF_CFA_REGISTER rsp - CFI_ADJUST_CFA_OFFSET -8 -@@ -1021,7 +1026,7 @@ ENTRY(\sym) - subq $ORIG_RAX-R15, %rsp - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call error_entry -- DEFAULT_FRAME 0 -+ DEFAULT_FRAME -1 - movq %rsp,%rdi /* pt_regs pointer */ - xorl %esi,%esi /* no error code */ - call \do_sym -@@ -1038,6 +1043,7 @@ ENTRY(\sym) - subq $ORIG_RAX-R15, %rsp - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call save_paranoid -+ DEFAULT_FRAME -1 - TRACE_IRQS_OFF - movq %rsp,%rdi /* pt_regs pointer */ - xorl %esi,%esi /* no error code */ -@@ -1056,6 +1062,7 @@ ENTRY(\sym) - subq $ORIG_RAX-R15, %rsp - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call save_paranoid -+ DEFAULT_FRAME -1 - TRACE_IRQS_OFF - movq %rsp,%rdi /* pt_regs pointer */ - xorl %esi,%esi /* no error code */ -@@ -1074,7 +1081,7 @@ ENTRY(\sym) - subq $ORIG_RAX-R15, %rsp - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call error_entry -- DEFAULT_FRAME 0 -+ DEFAULT_FRAME -1 - movq %rsp,%rdi /* pt_regs pointer */ - movq ORIG_RAX(%rsp),%rsi /* get error code */ - movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ -@@ -1092,7 +1099,7 @@ ENTRY(\sym) - subq $ORIG_RAX-R15, %rsp - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call save_paranoid -- DEFAULT_FRAME 0 -+ DEFAULT_FRAME -1 - TRACE_IRQS_OFF - movq %rsp,%rdi /* pt_regs pointer */ - movq ORIG_RAX(%rsp),%rsi /* get error code */ -@@ -1435,25 +1442,24 @@ END(paranoid_exit) - * returns in "no swapgs flag" in %ebx. - */ - ENTRY(error_entry) -- XCPT_FRAME -- CFI_ADJUST_CFA_OFFSET 15*8 -+ XCPT_FRAME offset=ORIG_RAX-R15+8 - /* oldrax contains error code */ - cld -- movq_cfi rdi, RDI+8 -- movq_cfi rsi, RSI+8 -- movq_cfi rdx, RDX+8 -- movq_cfi rcx, RCX+8 -- movq_cfi rax, RAX+8 -- movq_cfi r8, R8+8 -- movq_cfi r9, R9+8 -- movq_cfi r10, R10+8 -- movq_cfi r11, R11+8 -+ movq %rdi, RDI+8(%rsp) -+ movq %rsi, RSI+8(%rsp) -+ movq %rdx, RDX+8(%rsp) -+ movq %rcx, RCX+8(%rsp) -+ movq %rax, RAX+8(%rsp) -+ movq %r8, R8+8(%rsp) -+ movq %r9, R9+8(%rsp) -+ movq %r10, R10+8(%rsp) -+ movq %r11, R11+8(%rsp) - movq_cfi rbx, RBX+8 -- movq_cfi rbp, RBP+8 -- movq_cfi r12, R12+8 -- movq_cfi r13, R13+8 -- movq_cfi r14, R14+8 -- movq_cfi r15, R15+8 -+ movq %rbp, RBP+8(%rsp) -+ movq %r12, R12+8(%rsp) -+ movq %r13, R13+8(%rsp) -+ movq %r14, R14+8(%rsp) -+ movq %r15, R15+8(%rsp) - xorl %ebx,%ebx - testl $3,CS+8(%rsp) - je error_kernelspace -@@ -1471,6 +1477,7 @@ error_sti: - * compat mode. Check for these here too. - */ - error_kernelspace: -+ CFI_REL_OFFSET rcx, RCX+8 - incl %ebx - leaq irq_return(%rip),%rcx - cmpq %rcx,RIP+8(%rsp) -@@ -1518,7 +1523,7 @@ ENTRY(nmi) - subq $ORIG_RAX-R15, %rsp - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call save_paranoid -- DEFAULT_FRAME 0 -+ DEFAULT_FRAME -1 - /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ - movq %rsp,%rdi - movq $-1,%rsi ---- a/arch/x86/kernel/head_64.S -+++ b/arch/x86/kernel/head_64.S -@@ -284,6 +284,8 @@ early_idt_handlers: - - ENTRY(early_idt_handler) - #ifdef CONFIG_EARLY_PRINTK -+#include -+#include - cmpl $2,early_recursion_flag(%rip) - jz 1f - incl early_recursion_flag(%rip) -@@ -299,6 +301,16 @@ ENTRY(early_idt_handler) - testl $0x27d00,%eax - je 0f - popq %r8 # get error code -+ -+ CFI_STARTPROC simple -+ CFI_SIGNAL_FRAME -+ CFI_DEF_CFA rsp, SS+8-RIP -+# CFI_REL_OFFSET ss, SS-RIP -+ CFI_REL_OFFSET rsp, RSP-RIP -+# CFI_REL_OFFSET rflags, EFLAGS-RIP -+# CFI_REL_OFFSET cs, CS-RIP -+ CFI_REL_OFFSET rip, RIP-RIP -+ - 0: movq 0(%rsp),%rcx # get ip - movq 8(%rsp),%rdx # get cs - xorl %eax,%eax -@@ -312,6 +324,7 @@ ENTRY(early_idt_handler) - movq 0(%rsp),%rsi # get rip again - call __print_symbol - #endif -+ CFI_ENDPROC - #endif /* EARLY_PRINTK */ - 1: hlt - jmp 1b ---- a/arch/x86/lib/rwsem_64.S -+++ b/arch/x86/lib/rwsem_64.S -@@ -23,43 +23,50 @@ - #include - - #define save_common_regs \ -- pushq %rdi; \ -- pushq %rsi; \ -- pushq %rcx; \ -- pushq %r8; \ -- pushq %r9; \ -- pushq %r10; \ -- pushq %r11 -+ pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \ -+ pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \ -+ pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \ -+ pushq_cfi %r8; CFI_REL_OFFSET r8, 0; \ -+ pushq_cfi %r9; CFI_REL_OFFSET r9, 0; \ -+ pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \ -+ pushq_cfi %r11; CFI_REL_OFFSET r11, 0 - - #define restore_common_regs \ -- popq %r11; \ -- popq %r10; \ -- popq %r9; \ -- popq %r8; \ -- popq %rcx; \ -- popq %rsi; \ -- popq %rdi -+ popq_cfi %r11; CFI_RESTORE r11; \ -+ popq_cfi %r10; CFI_RESTORE r10; \ -+ popq_cfi %r9; CFI_RESTORE r9; \ -+ popq_cfi %r8; CFI_RESTORE r8; \ -+ popq_cfi %rcx; CFI_RESTORE rcx; \ -+ popq_cfi %rsi; CFI_RESTORE rsi; \ -+ popq_cfi %rdi; CFI_RESTORE rdi - - /* Fix up special calling conventions */ - ENTRY(call_rwsem_down_read_failed) -+ CFI_STARTPROC - save_common_regs -- pushq %rdx -+ pushq_cfi %rdx -+ CFI_REL_OFFSET rdx, 0 - movq %rax,%rdi - call rwsem_down_read_failed -- popq %rdx -+ popq_cfi %rdx -+ CFI_RESTORE rdx - restore_common_regs - ret -- ENDPROC(call_rwsem_down_read_failed) -+ CFI_ENDPROC -+ENDPROC(call_rwsem_down_read_failed) - - ENTRY(call_rwsem_down_write_failed) -+ CFI_STARTPROC - save_common_regs - movq %rax,%rdi - call rwsem_down_write_failed - restore_common_regs - ret -- ENDPROC(call_rwsem_down_write_failed) -+ CFI_ENDPROC -+ENDPROC(call_rwsem_down_write_failed) - - ENTRY(call_rwsem_wake) -+ CFI_STARTPROC - decl %edx /* do nothing if still outstanding active readers */ - jnz 1f - save_common_regs -@@ -67,15 +74,20 @@ ENTRY(call_rwsem_wake) - call rwsem_wake - restore_common_regs - 1: ret -- ENDPROC(call_rwsem_wake) -+ CFI_ENDPROC -+ENDPROC(call_rwsem_wake) - - /* Fix up special calling conventions */ - ENTRY(call_rwsem_downgrade_wake) -+ CFI_STARTPROC - save_common_regs -- pushq %rdx -+ pushq_cfi %rdx -+ CFI_REL_OFFSET rdx, 0 - movq %rax,%rdi - call rwsem_downgrade_wake -- popq %rdx -+ popq_cfi %rdx -+ CFI_RESTORE rdx - restore_common_regs - ret -- ENDPROC(call_rwsem_downgrade_wake) -+ CFI_ENDPROC -+ENDPROC(call_rwsem_downgrade_wake) diff --git a/patches.arch/x86_agpgart-g33-stoeln-fix-2.patch b/patches.arch/x86_agpgart-g33-stoeln-fix-2.patch deleted file mode 100644 index 51ba3ec..0000000 --- a/patches.arch/x86_agpgart-g33-stoeln-fix-2.patch +++ /dev/null @@ -1,74 +0,0 @@ -From: Brandon Philips -Subject: Avoid oops on G33 in 1MB stolen Mem case -References: bnc#391261 -Patch-Mainline: soon (see bug for ref) - -This is similar to f443675affe3f16dd428e46f0f7fd3f4d703eeab which was -reverted because it broke with older XOrg driver. This patch only fixes -the 1MB stolen case since it causes an oops due to a calculation -problem. - -This will not work with older X drivers without the accompanying patch -but I think avoiding an oops and making it possible to work with an -up-to-date xorg driver is reasonable. - -Explanation of the oops: - -> static void intel_i830_init_gtt_entries(void) -... -> } else if (IS_G33) { -> /* G33's GTT size defined in gmch_ctrl */ -> switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { -> case G33_PGETBL_SIZE_1M: -> size = 1024; -> break; -... -> size += 4; - -size = 1028 - -Then since we have the BIOS setting 1MB for the device in the GMCH -control we get to here: - -> } else { -> switch (gmch_ctrl & I855_GMCH_GMS_MASK) { -> case I855_GMCH_GMS_STOLEN_1M: -> gtt_entries = MB(1) - KB(size); -> break; - -MB(1) = 1 * 1024 * 1024 -KB(1028) = 1028 * 1024 - -MB(1) - KB(1028) = -4096 - -> gtt_entries /= KB(4); -> intel_private.gtt_entries = gtt_entries; - -We end up with -1 in gtt_entries. - -This leads to intel_i915_configure reading/writing to areas outside of -mapped memory and the oops. - -Signed-off-by: Brandon Philips -Acked-by: Thomas Renninger - ---- - drivers/char/agp/intel-gtt.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/drivers/char/agp/intel-gtt.c -+++ b/drivers/char/agp/intel-gtt.c -@@ -648,6 +648,13 @@ static void intel_i830_init_gtt_entries( - } else { - switch (gmch_ctrl & I855_GMCH_GMS_MASK) { - case I855_GMCH_GMS_STOLEN_1M: -+ if (IS_G33) { -+ size = 0; -+ printk(KERN_WARNING PFX -+ "Warning: G33 chipset with 1MB" -+ " allocated. Older X.org Intel drivers" -+ " will not work.\n"); -+ } - gtt_entries = MB(1) - KB(size); - break; - case I855_GMCH_GMS_STOLEN_4M: diff --git a/patches.drivers/0001-drm-i915-Sanitize-the-output-registers-after-resume.patch b/patches.drivers/0001-drm-i915-Sanitize-the-output-registers-after-resume.patch deleted file mode 100644 index 2a39ff8..0000000 --- a/patches.drivers/0001-drm-i915-Sanitize-the-output-registers-after-resume.patch +++ /dev/null @@ -1,126 +0,0 @@ -From 3cd8c73bebbc64ab13173931012ed4a58717b446 Mon Sep 17 00:00:00 2001 -From: Chris Wilson -Date: Tue, 12 Apr 2011 18:06:51 +0100 -Subject: [PATCH 1/2] drm/i915: Sanitize the output registers after resume - -commit f6e5b1603b8bb7131b6778d0d4e2e5dda120a379 upstream. - -Similar to booting, we need to inspect the state left by the BIOS and -remove any conflicting bits before we take over. The example reported by -Seth Forshee is very similar to the bug we encountered with the state left -by grub2, that the crtc pipe<->planning mapping was reversed from our -expectations and so we failed to turn off the outputs when booting or, -in this case, resuming. This may be in fact the same bug, but triggered -at resume time. - -This patch rearranges the code we already have to clear up the -conflicting state upon init and calls it from reset (which is called -after we have lost control of the hardware, i.e. along both the boot and -resume paths) instead. - -Reported-and-tested-by: Seth Forshee -Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=35796 -Signed-off-by: Chris Wilson -Reviewed-by: Keith Packard -Signed-off-by: Keith Packard -Signed-off-by: Greg Kroah-Hartman ---- - drivers/gpu/drm/i915/intel_display.c | 68 ++++++++++++++++++---------------- - 1 files changed, 36 insertions(+), 32 deletions(-) - -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index cef853b..c7403e7 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -5764,36 +5764,6 @@ cleanup_work: - return ret; - } - --static void intel_crtc_reset(struct drm_crtc *crtc) --{ -- struct intel_crtc *intel_crtc = to_intel_crtc(crtc); -- -- /* Reset flags back to the 'unknown' status so that they -- * will be correctly set on the initial modeset. -- */ -- intel_crtc->dpms_mode = -1; --} -- --static struct drm_crtc_helper_funcs intel_helper_funcs = { -- .dpms = intel_crtc_dpms, -- .mode_fixup = intel_crtc_mode_fixup, -- .mode_set = intel_crtc_mode_set, -- .mode_set_base = intel_pipe_set_base, -- .mode_set_base_atomic = intel_pipe_set_base_atomic, -- .load_lut = intel_crtc_load_lut, -- .disable = intel_crtc_disable, --}; -- --static const struct drm_crtc_funcs intel_crtc_funcs = { -- .reset = intel_crtc_reset, -- .cursor_set = intel_crtc_cursor_set, -- .cursor_move = intel_crtc_cursor_move, -- .gamma_set = intel_crtc_gamma_set, -- .set_config = drm_crtc_helper_set_config, -- .destroy = intel_crtc_destroy, -- .page_flip = intel_crtc_page_flip, --}; -- - static void intel_sanitize_modesetting(struct drm_device *dev, - int pipe, int plane) - { -@@ -5830,6 +5800,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev, - intel_disable_pipe(dev_priv, pipe); - } - -+static void intel_crtc_reset(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); -+ -+ /* Reset flags back to the 'unknown' status so that they -+ * will be correctly set on the initial modeset. -+ */ -+ intel_crtc->dpms_mode = -1; -+ -+ /* We need to fix up any BIOS configuration that conflicts with -+ * our expectations. -+ */ -+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); -+} -+ -+static struct drm_crtc_helper_funcs intel_helper_funcs = { -+ .dpms = intel_crtc_dpms, -+ .mode_fixup = intel_crtc_mode_fixup, -+ .mode_set = intel_crtc_mode_set, -+ .mode_set_base = intel_pipe_set_base, -+ .mode_set_base_atomic = intel_pipe_set_base_atomic, -+ .load_lut = intel_crtc_load_lut, -+ .disable = intel_crtc_disable, -+}; -+ -+static const struct drm_crtc_funcs intel_crtc_funcs = { -+ .reset = intel_crtc_reset, -+ .cursor_set = intel_crtc_cursor_set, -+ .cursor_move = intel_crtc_cursor_move, -+ .gamma_set = intel_crtc_gamma_set, -+ .set_config = drm_crtc_helper_set_config, -+ .destroy = intel_crtc_destroy, -+ .page_flip = intel_crtc_page_flip, -+}; -+ - static void intel_crtc_init(struct drm_device *dev, int pipe) - { - drm_i915_private_t *dev_priv = dev->dev_private; -@@ -5879,8 +5885,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) - - setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, - (unsigned long)intel_crtc); -- -- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); - } - - int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, --- -1.7.6.5 - diff --git a/patches.drivers/0001-drm-i915-Use-ACPI-OpRegion-to-determine-lid-status.patch b/patches.drivers/0001-drm-i915-Use-ACPI-OpRegion-to-determine-lid-status.patch deleted file mode 100644 index 2089369..0000000 --- a/patches.drivers/0001-drm-i915-Use-ACPI-OpRegion-to-determine-lid-status.patch +++ /dev/null @@ -1,64 +0,0 @@ -From 38c1a19fb78da8c2a617b1d8a3fcafb691c1409f Mon Sep 17 00:00:00 2001 -From: Chris Wilson -Date: Sun, 16 Jan 2011 19:37:30 +0000 -Subject: [PATCH 1/3] drm/i915: Use ACPI OpRegion to determine lid status - -Admittedly, trusting ACPI or the BIOS at all to be correct is littered -with numerous examples where it is wrong. Maybe, just maybe, we will -have better luck using the ACPI OpRegion lid status... - -Signed-off-by: Chris Wilson ---- - drivers/gpu/drm/i915/i915_drv.h | 1 + - drivers/gpu/drm/i915/intel_lvds.c | 7 +++++++ - drivers/gpu/drm/i915/intel_opregion.c | 2 ++ - 3 files changed, 10 insertions(+), 0 deletions(-) - -diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index 456f404..a299cc6 100644 ---- a/drivers/gpu/drm/i915/i915_drv.h -+++ b/drivers/gpu/drm/i915/i915_drv.h -@@ -111,6 +111,7 @@ struct intel_opregion { - struct opregion_swsci *swsci; - struct opregion_asle *asle; - void *vbt; -+ u32 __iomem *lid_state; - }; - #define OPREGION_SIZE (8*1024) - -diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c -index bcdba7b..aa29228 100644 ---- a/drivers/gpu/drm/i915/intel_lvds.c -+++ b/drivers/gpu/drm/i915/intel_lvds.c -@@ -472,8 +472,15 @@ static enum drm_connector_status - intel_lvds_detect(struct drm_connector *connector, bool force) - { - struct drm_device *dev = connector->dev; -+ struct drm_i915_private *dev_priv = dev->dev_private; - enum drm_connector_status status = connector_status_connected; - -+ /* Assume that the BIOS does not lie through the OpRegion... */ -+ if (dev_priv->opregion.lid_state) -+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ? -+ connector_status_connected : -+ connector_status_disconnected; -+ - /* ACPI lid methods were generally unreliable in this generation, so - * don't even bother. - */ -diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c -index 64fd644..9efccb9 100644 ---- a/drivers/gpu/drm/i915/intel_opregion.c -+++ b/drivers/gpu/drm/i915/intel_opregion.c -@@ -489,6 +489,8 @@ int intel_opregion_setup(struct drm_device *dev) - opregion->header = base; - opregion->vbt = base + OPREGION_VBT_OFFSET; - -+ opregion->lid_state = base + 0x01ac; -+ - mboxes = opregion->header->mboxes; - if (mboxes & MBOX_ACPI) { - DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); --- -1.7.6.5 - diff --git a/patches.drivers/0002-drm-i915-don-t-enable-plane-pipe-and-PLL-prematurely.patch b/patches.drivers/0002-drm-i915-don-t-enable-plane-pipe-and-PLL-prematurely.patch deleted file mode 100644 index 1f09f95..0000000 --- a/patches.drivers/0002-drm-i915-don-t-enable-plane-pipe-and-PLL-prematurely.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 9e4eb0947431c5a6b55f442aee3eb505e5a334d5 Mon Sep 17 00:00:00 2001 -From: Jesse Barnes -Date: Tue, 4 Jan 2011 15:09:29 -0800 -Subject: [PATCH 2/3] drm/i915: don't enable plane, pipe and PLL prematurely - -On Ironlake+ we need to enable these in a specific order. - -Signed-off-by: Jesse Barnes -Signed-off-by: Chris Wilson ---- - drivers/gpu/drm/i915/intel_display.c | 8 +++++--- - 1 files changed, 5 insertions(+), 3 deletions(-) - -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index 49fb54f..711beca 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -4322,9 +4322,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - pipeconf &= ~PIPECONF_DOUBLE_WIDE; - } - -- dspcntr |= DISPLAY_PLANE_ENABLE; -- pipeconf |= PIPECONF_ENABLE; -- dpll |= DPLL_VCO_ENABLE; -+ if (!HAS_PCH_SPLIT(dev)) { -+ dspcntr |= DISPLAY_PLANE_ENABLE; -+ pipeconf |= PIPECONF_ENABLE; -+ dpll |= DPLL_VCO_ENABLE; -+ } - - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); - drm_mode_debug_printmodeline(mode); --- -1.7.6.5 - diff --git a/patches.drivers/0003-drm-i915-add-pipe-plane-enable-disable-functions.patch b/patches.drivers/0003-drm-i915-add-pipe-plane-enable-disable-functions.patch deleted file mode 100644 index 9cc0428..0000000 --- a/patches.drivers/0003-drm-i915-add-pipe-plane-enable-disable-functions.patch +++ /dev/null @@ -1,429 +0,0 @@ -From 152d92c3e618d1c17c6a84c66aec00af227c3f0e Mon Sep 17 00:00:00 2001 -From: Jesse Barnes -Date: Tue, 4 Jan 2011 15:09:30 -0800 -Subject: [PATCH 3/3] drm/i915: add pipe/plane enable/disable functions - -Add plane enable/disable functions to prevent duplicated code and allow -us to easily check for plane enable/disable requirements (such as pipe -enable, plane status, pll status etc). - -Signed-off-by: Jesse Barnes -Signed-off-by: Chris Wilson ---- - drivers/gpu/drm/i915/i915_reg.h | 5 +- - drivers/gpu/drm/i915/intel_display.c | 308 +++++++++++++++++++++++----------- - 2 files changed, 216 insertions(+), 97 deletions(-) - -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h -index 12c547a..b0f1290 100644 ---- a/drivers/gpu/drm/i915/i915_reg.h -+++ b/drivers/gpu/drm/i915/i915_reg.h -@@ -2537,9 +2537,10 @@ - #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) - #define DISPPLANE_STEREO_ENABLE (1<<25) - #define DISPPLANE_STEREO_DISABLE 0 --#define DISPPLANE_SEL_PIPE_MASK (1<<24) -+#define DISPPLANE_SEL_PIPE_SHIFT 24 -+#define DISPPLANE_SEL_PIPE_MASK (3<> -+ DISPPLANE_SEL_PIPE_SHIFT; -+ WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, -+ "plane %d assertion failure, should be off on pipe %c but is still active\n", -+ i, pipe ? 'B' : 'A'); -+ } -+} -+ -+/** -+ * intel_enable_pipe - enable a pipe, assertiing requirements -+ * @dev_priv: i915 private structure -+ * @pipe: pipe to enable -+ * -+ * Enable @pipe, making sure that various hardware specific requirements -+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. -+ * -+ * @pipe should be %PIPE_A or %PIPE_B. -+ * -+ * Will wait until the pipe is actually running (i.e. first vblank) before -+ * returning. -+ */ -+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) -+{ -+ int reg; -+ u32 val; -+ -+ /* -+ * A pipe without a PLL won't actually be able to drive bits from -+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't -+ * need the check. -+ */ -+ if (!HAS_PCH_SPLIT(dev_priv->dev)) -+ assert_pll_enabled(dev_priv, pipe); -+ -+ reg = PIPECONF(pipe); -+ val = I915_READ(reg); -+ val |= PIPECONF_ENABLE; -+ I915_WRITE(reg, val); -+ POSTING_READ(reg); -+ intel_wait_for_vblank(dev_priv->dev, pipe); -+} -+ -+/** -+ * intel_disable_pipe - disable a pipe, assertiing requirements -+ * @dev_priv: i915 private structure -+ * @pipe: pipe to disable -+ * -+ * Disable @pipe, making sure that various hardware specific requirements -+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc. -+ * -+ * @pipe should be %PIPE_A or %PIPE_B. -+ * -+ * Will wait until the pipe has shut down before returning. -+ */ -+static void intel_disable_pipe(struct drm_i915_private *dev_priv, -+ enum pipe pipe) -+{ -+ int reg; -+ u32 val; -+ -+ /* -+ * Make sure planes won't keep trying to pump pixels to us, -+ * or we might hang the display. -+ */ -+ assert_planes_disabled(dev_priv, pipe); -+ -+ /* Don't disable pipe A or pipe A PLLs if needed */ -+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) -+ return; -+ -+ reg = PIPECONF(pipe); -+ val = I915_READ(reg); -+ val &= ~PIPECONF_ENABLE; -+ I915_WRITE(reg, val); -+ POSTING_READ(reg); -+ intel_wait_for_pipe_off(dev_priv->dev, pipe); -+} -+ -+/** -+ * intel_enable_plane - enable a display plane on a given pipe -+ * @dev_priv: i915 private structure -+ * @plane: plane to enable -+ * @pipe: pipe being fed -+ * -+ * Enable @plane on @pipe, making sure that @pipe is running first. -+ */ -+static void intel_enable_plane(struct drm_i915_private *dev_priv, -+ enum plane plane, enum pipe pipe) -+{ -+ int reg; -+ u32 val; -+ -+ /* If the pipe isn't enabled, we can't pump pixels and may hang */ -+ assert_pipe_enabled(dev_priv, pipe); -+ -+ reg = DSPCNTR(plane); -+ val = I915_READ(reg); -+ val |= DISPLAY_PLANE_ENABLE; -+ I915_WRITE(reg, val); -+ POSTING_READ(reg); -+ intel_wait_for_vblank(dev_priv->dev, pipe); -+} -+ -+/* -+ * Plane regs are double buffered, going from enabled->disabled needs a -+ * trigger in order to latch. The display address reg provides this. -+ */ -+static void intel_flush_display_plane(struct drm_i915_private *dev_priv, -+ enum plane plane) -+{ -+ u32 reg = DSPADDR(plane); -+ I915_WRITE(reg, I915_READ(reg)); -+} -+ -+/** -+ * intel_disable_plane - disable a display plane -+ * @dev_priv: i915 private structure -+ * @plane: plane to disable -+ * @pipe: pipe consuming the data -+ * -+ * Disable @plane; should be an independent operation. -+ */ -+static void intel_disable_plane(struct drm_i915_private *dev_priv, -+ enum plane plane, enum pipe pipe) -+{ -+ int reg; -+ u32 val; -+ -+ reg = DSPCNTR(plane); -+ val = I915_READ(reg); -+ val &= ~DISPLAY_PLANE_ENABLE; -+ I915_WRITE(reg, val); -+ POSTING_READ(reg); -+ intel_flush_display_plane(dev_priv, plane); -+ intel_wait_for_vblank(dev_priv->dev, pipe); -+} -+ - static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) - { - struct drm_device *dev = crtc->dev; -@@ -2003,14 +2200,6 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc) - } - } - --static void intel_flush_display_plane(struct drm_device *dev, -- int plane) --{ -- struct drm_i915_private *dev_priv = dev->dev_private; -- u32 reg = DSPADDR(plane); -- I915_WRITE(reg, I915_READ(reg)); --} -- - /* - * When we disable a pipe, we need to clear any pending scanline wait events - * to avoid hanging the ring, which we assume we are waiting on. -@@ -2158,22 +2347,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) - dev_priv->pch_pf_size); - } - -- /* Enable CPU pipe */ -- reg = PIPECONF(pipe); -- temp = I915_READ(reg); -- if ((temp & PIPECONF_ENABLE) == 0) { -- I915_WRITE(reg, temp | PIPECONF_ENABLE); -- POSTING_READ(reg); -- intel_wait_for_vblank(dev, intel_crtc->pipe); -- } -- -- /* configure and enable CPU plane */ -- reg = DSPCNTR(plane); -- temp = I915_READ(reg); -- if ((temp & DISPLAY_PLANE_ENABLE) == 0) { -- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); -- intel_flush_display_plane(dev, plane); -- } -+ intel_enable_pipe(dev_priv, pipe); -+ intel_enable_plane(dev_priv, plane, pipe); - - /* Skip the PCH stuff if possible */ - if (!is_pch_port) -@@ -2285,27 +2460,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) - drm_vblank_off(dev, pipe); - intel_crtc_update_cursor(crtc, false); - -- /* Disable display plane */ -- reg = DSPCNTR(plane); -- temp = I915_READ(reg); -- if (temp & DISPLAY_PLANE_ENABLE) { -- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); -- intel_flush_display_plane(dev, plane); -- } -+ intel_disable_plane(dev_priv, plane, pipe); - - if (dev_priv->cfb_plane == plane && - dev_priv->display.disable_fbc) - dev_priv->display.disable_fbc(dev); - -- /* disable cpu pipe, disable after all planes disabled */ -- reg = PIPECONF(pipe); -- temp = I915_READ(reg); -- if (temp & PIPECONF_ENABLE) { -- I915_WRITE(reg, temp & ~PIPECONF_ENABLE); -- POSTING_READ(reg); -- /* wait for cpu pipe off, pipe state */ -- intel_wait_for_pipe_off(dev, intel_crtc->pipe); -- } -+ intel_disable_pipe(dev_priv, pipe); - - /* Disable PF */ - I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); -@@ -2500,19 +2661,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) - udelay(150); - } - -- /* Enable the pipe */ -- reg = PIPECONF(pipe); -- temp = I915_READ(reg); -- if ((temp & PIPECONF_ENABLE) == 0) -- I915_WRITE(reg, temp | PIPECONF_ENABLE); -- -- /* Enable the plane */ -- reg = DSPCNTR(plane); -- temp = I915_READ(reg); -- if ((temp & DISPLAY_PLANE_ENABLE) == 0) { -- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); -- intel_flush_display_plane(dev, plane); -- } -+ intel_enable_pipe(dev_priv, pipe); -+ intel_enable_plane(dev_priv, plane, pipe); - - intel_crtc_load_lut(crtc); - intel_update_fbc(dev); -@@ -2544,33 +2694,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) - dev_priv->display.disable_fbc) - dev_priv->display.disable_fbc(dev); - -- /* Disable display plane */ -- reg = DSPCNTR(plane); -- temp = I915_READ(reg); -- if (temp & DISPLAY_PLANE_ENABLE) { -- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); -- /* Flush the plane changes */ -- intel_flush_display_plane(dev, plane); -- -- /* Wait for vblank for the disable to take effect */ -- if (IS_GEN2(dev)) -- intel_wait_for_vblank(dev, pipe); -- } -+ intel_disable_plane(dev_priv, plane, pipe); - - /* Don't disable pipe A or pipe A PLLs if needed */ - if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) - goto done; - -- /* Next, disable display pipes */ -- reg = PIPECONF(pipe); -- temp = I915_READ(reg); -- if (temp & PIPECONF_ENABLE) { -- I915_WRITE(reg, temp & ~PIPECONF_ENABLE); -- -- /* Wait for the pipe to turn off */ -- POSTING_READ(reg); -- intel_wait_for_pipe_off(dev, pipe); -- } -+ intel_disable_pipe(dev_priv, pipe); - - reg = DPLL(pipe); - temp = I915_READ(reg); -@@ -4322,11 +4452,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - pipeconf &= ~PIPECONF_DOUBLE_WIDE; - } - -- if (!HAS_PCH_SPLIT(dev)) { -- dspcntr |= DISPLAY_PLANE_ENABLE; -- pipeconf |= PIPECONF_ENABLE; -+ if (!HAS_PCH_SPLIT(dev)) - dpll |= DPLL_VCO_ENABLE; -- } - - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); - drm_mode_debug_printmodeline(mode); -@@ -4535,6 +4662,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - - I915_WRITE(PIPECONF(pipe), pipeconf); - POSTING_READ(PIPECONF(pipe)); -+ if (!HAS_PCH_SPLIT(dev)) -+ intel_enable_pipe(dev_priv, pipe); - - intel_wait_for_vblank(dev, pipe); - -@@ -4545,6 +4674,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - } - - I915_WRITE(DSPCNTR(plane), dspcntr); -+ POSTING_READ(DSPCNTR(plane)); -+ if (!HAS_PCH_SPLIT(dev)) -+ intel_enable_plane(dev_priv, plane, pipe); - - ret = intel_pipe_set_base(crtc, x, y, old_fb); - -@@ -5694,22 +5826,8 @@ static void intel_sanitize_modesetting(struct drm_device *dev, - pipe = !pipe; - - /* Disable the plane and wait for it to stop reading from the pipe. */ -- I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); -- intel_flush_display_plane(dev, plane); -- -- if (IS_GEN2(dev)) -- intel_wait_for_vblank(dev, pipe); -- -- if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) -- return; -- -- /* Switch off the pipe. */ -- reg = PIPECONF(pipe); -- val = I915_READ(reg); -- if (val & PIPECONF_ENABLE) { -- I915_WRITE(reg, val & ~PIPECONF_ENABLE); -- intel_wait_for_pipe_off(dev, pipe); -- } -+ intel_disable_plane(dev_priv, plane, pipe); -+ intel_disable_pipe(dev_priv, pipe); - } - - static void intel_crtc_init(struct drm_device *dev, int pipe) --- -1.7.6.5 - diff --git a/patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl b/patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl deleted file mode 100644 index 4a4c134..0000000 --- a/patches.drivers/alsa-asihpi-check-adapter-index-in-hpi_ioctl +++ /dev/null @@ -1,35 +0,0 @@ -From 4a122c10fbfe9020df469f0f669da129c5757671 Mon Sep 17 00:00:00 2001 -From: Dan Rosenberg -Date: Thu, 17 Mar 2011 18:32:24 -0400 -Subject: [PATCH] ALSA: sound/pci/asihpi: check adapter index in hpi_ioctl -Git-commit: 4a122c10fbfe9020df469f0f669da129c5757671 -Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git -Patch-mainline: 2.6.39-rc1 -References: bnc#680816 - -The user-supplied index into the adapters array needs to be checked, or -an out-of-bounds kernel pointer could be accessed and used, leading to -potentially exploitable memory corruption. - -Signed-off-by: Dan Rosenberg -Cc: -Signed-off-by: Takashi Iwai - ---- - sound/pci/asihpi/hpioctl.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/sound/pci/asihpi/hpioctl.c -+++ b/sound/pci/asihpi/hpioctl.c -@@ -155,6 +155,11 @@ - goto out; - } - -+ if (hm->h.adapter_index >= HPI_MAX_ADAPTERS) { -+ err = -EINVAL; -+ goto out; -+ } -+ - pa = &adapters[hm->h.adapter_index]; - hr->h.size = 0; - if (hm->h.object == HPI_OBJ_SUBSYSTEM) { diff --git a/patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo b/patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo deleted file mode 100644 index dbc74f9..0000000 --- a/patches.drivers/alsa-hda-0018-Fix-pin-config-of-Gigabyte-mobo +++ /dev/null @@ -1,96 +0,0 @@ -From c6b358748e19ce7e230b0926ac42696bc485a562 Mon Sep 17 00:00:00 2001 -From: Takashi Iwai -Date: Mon, 28 Mar 2011 12:05:31 +0200 -Subject: [PATCH] ALSA: hda - Fix pin-config of Gigabyte mobo -Git-commit: c6b358748e19ce7e230b0926ac42696bc485a562 -Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git -Patch-mainline: (to be) 2.6.39-rc2 -References: bnc#677256 - -Use pin-fix instead of the static quirk for Gigabyte mobos 1458:a002. - -Bugzilla: https://bugzilla.novell.com/show_bug.cgi?id=677256 -Signed-off-by: Takashi Iwai - ---- - sound/pci/hda/patch_realtek.c | 21 ++++++++++++++++++--- - 1 file changed, 18 insertions(+), 3 deletions(-) - ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -9932,7 +9932,6 @@ static struct snd_pci_quirk alc882_cfg_t - SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), - SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), - SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch), -- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG), - - SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG), - SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG), -@@ -10769,6 +10768,7 @@ enum { - PINFIX_LENOVO_Y530, - PINFIX_PB_M5210, - PINFIX_ACER_ASPIRE_7736, -+ PINFIX_GIGABYTE_880GM, - }; - - static const struct alc_fixup alc882_fixups[] = { -@@ -10800,6 +10800,13 @@ static const struct alc_fixup alc882_fix - .type = ALC_FIXUP_SKU, - .v.sku = ALC_FIXUP_SKU_IGNORE, - }, -+ [PINFIX_GIGABYTE_880GM] = { -+ .type = ALC_FIXUP_PINS, -+ .v.pins = (const struct alc_pincfg[]) { -+ { 0x14, 0x1114410 }, /* set as speaker */ -+ { } -+ } -+ }, - }; - - static struct snd_pci_quirk alc882_fixup_tbl[] = { -@@ -10807,6 +10814,7 @@ static struct snd_pci_quirk alc882_fixup - SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), - SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), - SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), -+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", PINFIX_GIGABYTE_880GM), - {} - }; - -@@ -18851,8 +18859,6 @@ static struct snd_pci_quirk alc662_cfg_t - ALC662_3ST_6ch_DIG), - SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB20x", ALC662_AUTO), - SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10), -- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L", -- ALC662_3ST_6ch_DIG), - SND_PCI_QUIRK(0x152d, 0x2304, "Quanta WH1", ALC663_ASUS_H13), - SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG), - SND_PCI_QUIRK(0x1631, 0xc10c, "PB RS65", ALC663_ASUS_M51VA), -@@ -19526,6 +19532,7 @@ enum { - ALC662_FIXUP_IDEAPAD, - ALC272_FIXUP_MARIO, - ALC662_FIXUP_CZC_P10T, -+ ALC662_FIXUP_GIGABYTE, - }; - - static const struct alc_fixup alc662_fixups[] = { -@@ -19554,12 +19561,20 @@ static const struct alc_fixup alc662_fix - {} - } - }, -+ [ALC662_FIXUP_GIGABYTE] = { -+ .type = ALC_FIXUP_PINS, -+ .v.pins = (const struct alc_pincfg[]) { -+ { 0x14, 0x1114410 }, /* set as speaker */ -+ { } -+ } -+ }, - }; - - static struct snd_pci_quirk alc662_fixup_tbl[] = { - SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), - SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), - SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), -+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", ALC662_FIXUP_GIGABYTE), - SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), - SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), - SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), diff --git a/patches.drivers/alsa-hda-0019-Increase-default-buffer-size b/patches.drivers/alsa-hda-0019-Increase-default-buffer-size deleted file mode 100644 index 4dd093c..0000000 --- a/patches.drivers/alsa-hda-0019-Increase-default-buffer-size +++ /dev/null @@ -1,22 +0,0 @@ -From: Takashi Iwai -Subject: ALSA: hda - Increase the default buffer size -Patch-mainline: Never -References: 682725 - -Signed-off-by: Takashi Iwai - ---- - sound/pci/hda/hda_intel.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/sound/pci/hda/hda_intel.c -+++ b/sound/pci/hda/hda_intel.c -@@ -2057,7 +2057,7 @@ - /* buffer pre-allocation */ - snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, - snd_dma_pci_data(chip->pci), -- 1024 * 64, 32 * 1024 * 1024); -+ 1024 * 1024, 32 * 1024 * 1024); - return 0; - } - diff --git a/patches.drivers/bnx2-entropy-source.patch b/patches.drivers/bnx2-entropy-source.patch deleted file mode 100644 index 02a86de..0000000 --- a/patches.drivers/bnx2-entropy-source.patch +++ /dev/null @@ -1,40 +0,0 @@ -From: Brandon Philips -Subject: [PATCH] bnx2: entropy source -Patch-mainline: never -References: FATE#307517 - -Current disk-less systems have no entropy source whatsoever. Therefore, the -network drivers tg3, bnx2, e1000, e1000e, igb and ixgbe should be enabled to -feed entropy to the kernel via the IRQF_SAMPLE_RANDOM flag when loaded. This -option shall not be enabled by default but implemented via a module option to -be activated by the administrator. - -Signed-off-by: Brandon Philips - ---- - drivers/net/bnx2.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/drivers/net/bnx2.c -+++ b/drivers/net/bnx2.c -@@ -84,6 +84,10 @@ MODULE_FIRMWARE(FW_MIPS_FILE_09); - MODULE_FIRMWARE(FW_RV2P_FILE_09); - MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax); - -+static int entropy = 0; -+module_param(entropy, int, 0); -+MODULE_PARM_DESC(entropy, "Allow bnx2 to populate the /dev/random entropy pool"); -+ - static int disable_msi = 0; - - module_param(disable_msi, int, 0); -@@ -6116,6 +6120,9 @@ bnx2_request_irq(struct bnx2 *bp) - else - flags = IRQF_SHARED; - -+ if (entropy) -+ flags |= IRQF_SAMPLE_RANDOM; -+ - for (i = 0; i < bp->irq_nvecs; i++) { - irq = &bp->irq_tbl[i]; - rc = request_irq(irq->vector, irq->handler, flags, irq->name, diff --git a/patches.drivers/disable-catas_reset-by-default-to-avoid-problems-with-eeh.patch b/patches.drivers/disable-catas_reset-by-default-to-avoid-problems-with-eeh.patch deleted file mode 100644 index dbf8ef8..0000000 --- a/patches.drivers/disable-catas_reset-by-default-to-avoid-problems-with-eeh.patch +++ /dev/null @@ -1,45 +0,0 @@ -From: Xiuling Ma -Subject: [PATCH] disable catas_reset by default to avoid problems with EEH -References: bnc#456389 -Patch-mainline: not yet - -PPC machines with EEH and Mellanox ib/net cards with catastrophic error -recovery that encounter a PCI bus error can crash and become -unresponsive. - -Disable the card reset to avoid this. - -NOTE: an upstream fix will come later once IBM can review a couple of -approaches I suggested since this fix is brute force. This driver didn't have -this reset on error feature in SLES10 so it isn't a feature removal. - -Signed-off-by: Xiuling Ma -Acked-by: Brandon Philips - ---- - drivers/infiniband/hw/mthca/mthca_catas.c | 2 +- - drivers/net/mlx4/catas.c | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/infiniband/hw/mthca/mthca_catas.c -+++ b/drivers/infiniband/hw/mthca/mthca_catas.c -@@ -51,7 +51,7 @@ static LIST_HEAD(catas_list); - static struct workqueue_struct *catas_wq; - static struct work_struct catas_work; - --static int catas_reset_disable; -+static int catas_reset_disable = 1; - module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); - MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); - ---- a/drivers/net/mlx4/catas.c -+++ b/drivers/net/mlx4/catas.c -@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(catas_lock); - static LIST_HEAD(catas_list); - static struct work_struct catas_work; - --static int internal_err_reset = 1; -+static int internal_err_reset = 0; - module_param(internal_err_reset, int, 0644); - MODULE_PARM_DESC(internal_err_reset, - "Reset device on internal errors if non-zero (default 1)"); diff --git a/patches.drivers/e1000-entropy-source.patch b/patches.drivers/e1000-entropy-source.patch deleted file mode 100644 index d1e6c9b..0000000 --- a/patches.drivers/e1000-entropy-source.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Jiri Benc -Subject: Enable e1000 as entropy source (disabled by default) -References: FATE#307517 -Patch-mainline: never - -Based on the patch by Oracle: - -> e1000: Add IRQF_SAMPLE_RANDOM flag to e1000 as a module option -> -> This patch allows for the bnx2 to add to the /dev/random entropy pool -> via a module parameter, entropy. -> -> 0 - default for EL5 - do not populate the entropy pool -> 1 - optional - Uses IRQF_SAMPLE_RANDOM flag on request_irq calls to populate -> the /dev/random pool -> -> Signed-off-by: John Sobecki - -Signed-off-by: Brandon Philips - ---- - drivers/net/e1000/e1000_main.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/drivers/net/e1000/e1000_main.c -+++ b/drivers/net/e1000/e1000_main.c -@@ -213,6 +213,10 @@ static int debug = NETIF_MSG_DRV | NETIF - module_param(debug, int, 0); - MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -+static int entropy = 0; -+module_param(entropy, int, 0); -+MODULE_PARM_DESC(entropy, "Allow e1000 to populate the /dev/random entropy pool"); -+ - /** - * e1000_get_hw_dev - return device - * used by hardware layer to print debugging information -@@ -272,6 +276,9 @@ static int e1000_request_irq(struct e100 - int irq_flags = IRQF_SHARED; - int err; - -+ if (entropy) -+ irq_flags |= IRQF_SAMPLE_RANDOM; -+ - err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, - netdev); - if (err) { diff --git a/patches.drivers/e1000e-entropy-source.patch b/patches.drivers/e1000e-entropy-source.patch deleted file mode 100644 index 07f7542..0000000 --- a/patches.drivers/e1000e-entropy-source.patch +++ /dev/null @@ -1,86 +0,0 @@ -From: Jiri Benc -Subject: Enable e1000e as entropy source (disabled by default) -References: FATE#307517 -Patch-mainline: never - -Current disk-less systems have no entropy source whatsoever. Therefore, the -network drivers tg3, bnx2, e1000, e1000e, igb and ixgbe should be enabled to -feed entropy to the kernel via the IRQF_SAMPLE_RANDOM flag when loaded. This -option shall not be enabled by default but implemented via a module option to -be activated by the administrator. - -Signed-off-by: Brandon Philips - ---- - drivers/net/e1000e/e1000.h | 1 + - drivers/net/e1000e/netdev.c | 14 +++++++++----- - drivers/net/e1000e/param.c | 4 ++++ - 3 files changed, 14 insertions(+), 5 deletions(-) - ---- a/drivers/net/e1000e/e1000.h -+++ b/drivers/net/e1000e/e1000.h -@@ -467,6 +467,7 @@ extern void e1000e_reset_interrupt_capab - extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); - - extern unsigned int copybreak; -+extern int entropy; - - extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); - ---- a/drivers/net/e1000e/netdev.c -+++ b/drivers/net/e1000e/netdev.c -@@ -1847,8 +1847,8 @@ static int e1000_request_msix(struct e10 - else - memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); - err = request_irq(adapter->msix_entries[vector].vector, -- e1000_intr_msix_rx, 0, adapter->rx_ring->name, -- netdev); -+ e1000_intr_msix_rx, entropy ? IRQF_SAMPLE_RANDOM : 0, -+ adapter->rx_ring->name, netdev); - if (err) - goto out; - adapter->rx_ring->itr_register = E1000_EITR_82574(vector); -@@ -1889,6 +1889,7 @@ static int e1000_request_irq(struct e100 - { - struct net_device *netdev = adapter->netdev; - int err; -+ int irq_flags = 0; - - if (adapter->msix_entries) { - err = e1000_request_msix(adapter); -@@ -1900,7 +1901,8 @@ static int e1000_request_irq(struct e100 - e1000e_set_interrupt_capability(adapter); - } - if (adapter->flags & FLAG_MSI_ENABLED) { -- err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, -+ err = request_irq(adapter->pdev->irq, e1000_intr_msi, -+ entropy ? IRQF_SAMPLE_RANDOM : 0, - netdev->name, netdev); - if (!err) - return err; -@@ -1910,8 +1912,10 @@ static int e1000_request_irq(struct e100 - adapter->int_mode = E1000E_INT_MODE_LEGACY; - } - -- err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, -- netdev->name, netdev); -+ if (entropy) -+ irq_flags |= IRQF_SAMPLE_RANDOM; -+ err = request_irq(adapter->pdev->irq, e1000_intr, -+ irq_flags | IRQF_SHARED, netdev->name, netdev); - if (err) - e_err("Unable to allocate interrupt, Error: %d\n", err); - ---- a/drivers/net/e1000e/param.c -+++ b/drivers/net/e1000e/param.c -@@ -31,6 +31,10 @@ - - #include "e1000.h" - -+int entropy = 0; -+module_param(entropy, int, 0); -+MODULE_PARM_DESC(entropy, "Allow e1000e to populate the /dev/random entropy pool"); -+ - /* - * This is the only thing that needs to be changed to adjust the - * maximum number of ports that the driver can manage. diff --git a/patches.drivers/ehea-modinfo.patch b/patches.drivers/ehea-modinfo.patch deleted file mode 100644 index 560f473..0000000 --- a/patches.drivers/ehea-modinfo.patch +++ /dev/null @@ -1,43 +0,0 @@ -Subject: add alias entry for portN properties -From: olh@suse.de -References: 435215 - LTC48564 -Patch-mainline: not yet - -Use separate table for alias entries in the ehea module, -otherwise the probe() function will operate on the separate ports -instead of the lhea-"root" entry of the device-tree - ---- - drivers/net/ehea/ehea_main.c | 14 +++++++++++++- - 1 file changed, 13 insertions(+), 1 deletion(-) - ---- a/drivers/net/ehea/ehea_main.c -+++ b/drivers/net/ehea/ehea_main.c -@@ -112,6 +112,19 @@ static int __devinit ehea_probe_adapter( - - static int __devexit ehea_remove(struct platform_device *dev); - -+static struct of_device_id ehea_module_device_table[] = { -+ { -+ .name = "lhea", -+ .compatible = "IBM,lhea", -+ }, -+ { -+ .type = "network", -+ .compatible = "IBM,lhea-ethernet", -+ }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, ehea_module_device_table); -+ - static struct of_device_id ehea_device_table[] = { - { - .name = "lhea", -@@ -119,7 +132,6 @@ static struct of_device_id ehea_device_t - }, - {}, - }; --MODULE_DEVICE_TABLE(of, ehea_device_table); - - static struct of_platform_driver ehea_driver = { - .driver = { diff --git a/patches.drivers/elousb.patch b/patches.drivers/elousb.patch deleted file mode 100644 index 6d25c8b..0000000 --- a/patches.drivers/elousb.patch +++ /dev/null @@ -1,380 +0,0 @@ -From: Jiri Kosina -Subject: Elo USB touchscreen driver -Patch-mainline: will be submitted for 2.6.28 -References: FATE#304972 - -This is a driver for Elo USB touchscreen devices. - -Signed-off-by: Vojtech Pavlik -Acked-by: Jiri Kosina - ---- - drivers/hid/hid-core.c | 2 - drivers/hid/hid-ids.h | 2 - drivers/input/touchscreen/Kconfig | 12 + - drivers/input/touchscreen/Makefile | 1 - drivers/input/touchscreen/elousb.c | 305 +++++++++++++++++++++++++++++++++++++ - 5 files changed, 322 insertions(+) - ---- a/drivers/hid/hid-core.c -+++ b/drivers/hid/hid-core.c -@@ -1643,6 +1643,8 @@ static const struct hid_device_id hid_ig - { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4000U) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4500U) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, ---- a/drivers/hid/hid-ids.h -+++ b/drivers/hid/hid-ids.h -@@ -216,7 +216,9 @@ - #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 - - #define USB_VENDOR_ID_ELO 0x04E7 -+#define USB_DEVICE_ID_ELO_4000U 0x0009 - #define USB_DEVICE_ID_ELO_TS2700 0x0020 -+#define USB_DEVICE_ID_ELO_4500U 0x0030 - - #define USB_VENDOR_ID_EMS 0x2006 - #define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118 ---- a/drivers/input/touchscreen/Kconfig -+++ b/drivers/input/touchscreen/Kconfig -@@ -214,6 +214,18 @@ config TOUCHSCREEN_ELO - To compile this driver as a module, choose M here: the - module will be called elo. - -+config TOUCHSCREEN_ELOUSB -+ tristate "Elo USB touchscreens" -+ select USB -+ help -+ Say Y here if you have an Elo USB touchscreen connected to -+ your system. -+ -+ If unsure, say N. -+ -+ To compile this driver as a module, choose M here: the -+ module will be called elousb. -+ - config TOUCHSCREEN_WACOM_W8001 - tristate "Wacom W8001 penabled serial touchscreen" - select SERIO ---- a/drivers/input/touchscreen/Makefile -+++ b/drivers/input/touchscreen/Makefile -@@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += h - obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o - obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o - obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o -+obj-$(CONFIG_TOUCHSCREEN_ELOUSB) += elousb.o - obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o - obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o - obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o ---- /dev/null -+++ b/drivers/input/touchscreen/elousb.c -@@ -0,0 +1,305 @@ -+/* -+ * Copyright (c) 1999-2001 Vojtech Pavlik -+ * -+ * Elo USB touchscreen support -+ */ -+ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ * Should you need to contact me, the author, you can do so either by -+ * e-mail - mail your message to , or by paper mail: -+ * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * Version Information -+ */ -+#define DRIVER_VERSION "v1.1" -+#define DRIVER_AUTHOR "Vojtech Pavlik " -+#define DRIVER_DESC "Elo USB touchscreen driver" -+#define DRIVER_LICENSE "GPL" -+ -+MODULE_AUTHOR(DRIVER_AUTHOR); -+MODULE_DESCRIPTION(DRIVER_DESC); -+MODULE_LICENSE(DRIVER_LICENSE); -+ -+struct elousb { -+ char name[128]; -+ char phys[64]; -+ struct usb_device *usbdev; -+ struct input_dev *dev; -+ struct urb *irq; -+ -+ unsigned char *data; -+ dma_addr_t data_dma; -+}; -+ -+static void elousb_irq(struct urb *urb) -+{ -+ struct elousb *elo = urb->context; -+ unsigned char *data = elo->data; -+ struct input_dev *dev = elo->dev; -+ int status; -+ -+ switch (urb->status) { -+ case 0: /* success */ -+ break; -+ case -ECONNRESET: /* unlink */ -+ case -ENOENT: -+ case -ESHUTDOWN: -+ return; -+ /* -EPIPE: should clear the halt */ -+ default: /* error */ -+ goto resubmit; -+ } -+ -+ if (data[0] != 'T') /* Mandatory ELO packet marker */ -+ return; -+ -+ -+ input_report_abs(dev, ABS_X, ((u32)data[3] << 8) | data[2]); -+ input_report_abs(dev, ABS_Y, ((u32)data[5] << 8) | data[4]); -+ -+ input_report_abs(dev, ABS_PRESSURE, -+ (data[1] & 0x80) ? (((u32)data[7] << 8) | data[6]): 0); -+ -+ if (data[1] & 0x03) { -+ input_report_key(dev, BTN_TOUCH, 1); -+ input_sync(dev); -+ } -+ -+ if (data[1] & 0x04) -+ input_report_key(dev, BTN_TOUCH, 0); -+ -+ input_sync(dev); -+ -+resubmit: -+ status = usb_submit_urb (urb, GFP_ATOMIC); -+ if (status) -+ err ("can't resubmit intr, %s-%s/input0, status %d", -+ elo->usbdev->bus->bus_name, -+ elo->usbdev->devpath, status); -+} -+ -+static int elousb_open(struct input_dev *dev) -+{ -+ struct elousb *elo = input_get_drvdata(dev); -+ -+ elo->irq->dev = elo->usbdev; -+ if (usb_submit_urb(elo->irq, GFP_KERNEL)) -+ return -EIO; -+ -+ return 0; -+} -+ -+static void elousb_close(struct input_dev *dev) -+{ -+ struct elousb *elo = input_get_drvdata(dev); -+ -+ usb_kill_urb(elo->irq); -+} -+ -+static int elousb_probe(struct usb_interface *intf, const struct usb_device_id *id) -+{ -+ struct usb_device *dev = interface_to_usbdev(intf); -+ struct usb_host_interface *interface; -+ struct usb_endpoint_descriptor *endpoint; -+ struct hid_descriptor *hdesc; -+ struct elousb *elo; -+ struct input_dev *input_dev; -+ int pipe, i; -+ unsigned int rsize = 0; -+ int error = -ENOMEM; -+ char *rdesc; -+ -+ interface = intf->cur_altsetting; -+ -+ if (interface->desc.bNumEndpoints != 1) -+ return -ENODEV; -+ -+ endpoint = &interface->endpoint[0].desc; -+ if (!(endpoint->bEndpointAddress & USB_DIR_IN)) -+ return -ENODEV; -+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) -+ return -ENODEV; -+ -+ if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && -+ (!interface->desc.bNumEndpoints || -+ usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { -+ err("HID class descriptor not present"); -+ return -ENODEV; -+ } -+ -+ for (i = 0; i < hdesc->bNumDescriptors; i++) -+ if (hdesc->desc[i].bDescriptorType == HID_DT_REPORT) -+ rsize = le16_to_cpu(hdesc->desc[i].wDescriptorLength); -+ -+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) { -+ err("weird size of report descriptor (%u)", rsize); -+ return -ENODEV; -+ } -+ -+ -+ pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); -+ -+ elo = kzalloc(sizeof(struct elousb), GFP_KERNEL); -+ input_dev = input_allocate_device(); -+ if (!elo || !input_dev) -+ goto fail1; -+ -+ elo->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &elo->data_dma); -+ if (!elo->data) -+ goto fail1; -+ -+ elo->irq = usb_alloc_urb(0, GFP_KERNEL); -+ if (!elo->irq) -+ goto fail2; -+ -+ if (!(rdesc = kmalloc(rsize, GFP_KERNEL))) -+ goto fail3; -+ -+ elo->usbdev = dev; -+ elo->dev = input_dev; -+ -+ if ((error = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), -+ HID_REQ_SET_IDLE, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, -+ interface->desc.bInterfaceNumber, -+ NULL, 0, USB_CTRL_SET_TIMEOUT)) < 0) { -+ err("setting HID idle timeout failed, error %d", error); -+ error = -ENODEV; -+ goto fail4; -+ } -+ -+ if ((error = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), -+ USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN, -+ HID_DT_REPORT << 8, interface->desc.bInterfaceNumber, -+ rdesc, rsize, USB_CTRL_GET_TIMEOUT)) < rsize) { -+ err("reading HID report descriptor failed, error %d", error); -+ error = -ENODEV; -+ goto fail4; -+ } -+ -+ if (dev->manufacturer) -+ strlcpy(elo->name, dev->manufacturer, sizeof(elo->name)); -+ -+ if (dev->product) { -+ if (dev->manufacturer) -+ strlcat(elo->name, " ", sizeof(elo->name)); -+ strlcat(elo->name, dev->product, sizeof(elo->name)); -+ } -+ -+ if (!strlen(elo->name)) -+ snprintf(elo->name, sizeof(elo->name), -+ "Elo touchscreen %04x:%04x", -+ le16_to_cpu(dev->descriptor.idVendor), -+ le16_to_cpu(dev->descriptor.idProduct)); -+ -+ usb_make_path(dev, elo->phys, sizeof(elo->phys)); -+ strlcat(elo->phys, "/input0", sizeof(elo->phys)); -+ -+ input_dev->name = elo->name; -+ input_dev->phys = elo->phys; -+ usb_to_input_id(dev, &input_dev->id); -+ input_dev->dev.parent = &intf->dev; -+ -+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); -+ set_bit(BTN_TOUCH, input_dev->keybit); -+ input_dev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y); -+ set_bit(ABS_PRESSURE, input_dev->absbit); -+ -+ input_set_abs_params(input_dev, ABS_X, 0, 4000, 0, 0); -+ input_set_abs_params(input_dev, ABS_Y, 0, 3840, 0, 0); -+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 256, 0, 0); -+ -+ input_set_drvdata(input_dev, elo); -+ -+ input_dev->open = elousb_open; -+ input_dev->close = elousb_close; -+ -+ usb_fill_int_urb(elo->irq, dev, pipe, elo->data, 8, -+ elousb_irq, elo, endpoint->bInterval); -+ elo->irq->transfer_dma = elo->data_dma; -+ elo->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; -+ -+ input_register_device(elo->dev); -+ -+ usb_set_intfdata(intf, elo); -+ return 0; -+ -+fail4: -+ kfree(rdesc); -+fail3: -+ usb_free_urb(elo->irq); -+fail2: -+ usb_buffer_free(dev, 8, elo->data, elo->data_dma); -+fail1: -+ input_free_device(input_dev); -+ kfree(elo); -+ return -ENOMEM; -+} -+ -+static void elousb_disconnect(struct usb_interface *intf) -+{ -+ struct elousb *elo = usb_get_intfdata (intf); -+ -+ usb_set_intfdata(intf, NULL); -+ if (elo) { -+ usb_kill_urb(elo->irq); -+ input_unregister_device(elo->dev); -+ usb_free_urb(elo->irq); -+ usb_buffer_free(interface_to_usbdev(intf), 8, elo->data, elo->data_dma); -+ kfree(elo); -+ } -+} -+ -+static struct usb_device_id elousb_id_table [] = { -+ { USB_DEVICE(0x04e7, 0x0009) }, /* CarrolTouch 4000U */ -+ { USB_DEVICE(0x04e7, 0x0030) }, /* CarrolTouch 4500U */ -+ { } /* Terminating entry */ -+}; -+ -+MODULE_DEVICE_TABLE (usb, elousb_id_table); -+ -+static struct usb_driver elousb_driver = { -+ .name = "elousb", -+ .probe = elousb_probe, -+ .disconnect = elousb_disconnect, -+ .id_table = elousb_id_table, -+}; -+ -+static int __init elousb_init(void) -+{ -+ int retval = usb_register(&elousb_driver); -+ if (retval == 0) -+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC); -+ return retval; -+} -+ -+static void __exit elousb_exit(void) -+{ -+ usb_deregister(&elousb_driver); -+} -+ -+module_init(elousb_init); -+module_exit(elousb_exit); diff --git a/patches.drivers/igb-entropy-source.patch b/patches.drivers/igb-entropy-source.patch deleted file mode 100644 index 4758516..0000000 --- a/patches.drivers/igb-entropy-source.patch +++ /dev/null @@ -1,70 +0,0 @@ -From: Jiri Benc -Subject: Enable igb as entropy source (disabled by default) -References: FATE#307517 -Patch-mainline: never - -Current disk-less systems have no entropy source whatsoever. Therefore, the -network drivers tg3, bnx2, e1000, e1000e, igb and ixgbe should be enabled to -feed entropy to the kernel via the IRQF_SAMPLE_RANDOM flag when loaded. This -option shall not be enabled by default but implemented via a module option to -be activated by the administrator. - -Signed-off-by: Brandon Philips - ---- - drivers/net/igb/igb_main.c | 16 +++++++++++++--- - 1 file changed, 13 insertions(+), 3 deletions(-) - ---- a/drivers/net/igb/igb_main.c -+++ b/drivers/net/igb/igb_main.c -@@ -61,6 +61,10 @@ static const struct e1000_info *igb_info - [board_82575] = &e1000_82575_info, - }; - -+static int entropy = 0; -+module_param(entropy, int, 0); -+MODULE_PARM_DESC(entropy, "Allow igb to populate the /dev/random entropy pool"); -+ - static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, -@@ -897,7 +901,8 @@ static int igb_request_msix(struct igb_a - int i, err = 0, vector = 0; - - err = request_irq(adapter->msix_entries[vector].vector, -- igb_msix_other, 0, netdev->name, adapter); -+ igb_msix_other, entropy ? IRQF_SAMPLE_RANDOM : 0, -+ netdev->name, adapter); - if (err) - goto out; - vector++; -@@ -1194,6 +1199,10 @@ static int igb_request_irq(struct igb_ad - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - int err = 0; -+ int irq_flags = 0; -+ -+ if (entropy) -+ irq_flags = IRQF_SAMPLE_RANDOM; - - if (adapter->msix_entries) { - err = igb_request_msix(adapter); -@@ -1228,7 +1237,7 @@ static int igb_request_irq(struct igb_ad - } - - if (adapter->flags & IGB_FLAG_HAS_MSI) { -- err = request_irq(adapter->pdev->irq, igb_intr_msi, 0, -+ err = request_irq(adapter->pdev->irq, igb_intr_msi, irq_flags, - netdev->name, adapter); - if (!err) - goto request_done; -@@ -1238,7 +1247,8 @@ static int igb_request_irq(struct igb_ad - adapter->flags &= ~IGB_FLAG_HAS_MSI; - } - -- err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED, -+ irq_flags |= IRQF_SHARED; -+ err = request_irq(adapter->pdev->irq, igb_intr, irq_flags, - netdev->name, adapter); - - if (err) diff --git a/patches.drivers/input-Add-LED-support-to-Synaptics-device b/patches.drivers/input-Add-LED-support-to-Synaptics-device deleted file mode 100644 index 3f624ec..0000000 --- a/patches.drivers/input-Add-LED-support-to-Synaptics-device +++ /dev/null @@ -1,218 +0,0 @@ -From: Takashi Iwai -Subject: [PATCH 2/2] input: Add LED support to Synaptics device -Patch-mainline: Submitted -References: bnc#547370,bnc#582529,bnc#589014 - -The new Synaptics devices have an LED on the top-left corner. -This patch adds a new LED class device to control it. It's created -dynamically upon synaptics device probing. - -The LED is controlled via the command 0x0a with parameters 0x88 or 0x10. -This seems only on/off control although other value might be accepted. - -The detection of the LED isn't clear yet. It should have been the new -capability bits that indicate the presence, but on real machines, it -doesn't fit. So, for the time being, the driver checks the product id -in the ext capability bits and assumes that LED exists on the known -devices. - -Signed-off-by: Takashi Iwai - ---- - drivers/input/mouse/Kconfig | 9 +++ - drivers/input/mouse/synaptics.c | 111 ++++++++++++++++++++++++++++++++++++++++ - drivers/input/mouse/synaptics.h | 3 + - 3 files changed, 123 insertions(+) - ---- a/drivers/input/mouse/Kconfig -+++ b/drivers/input/mouse/Kconfig -@@ -19,6 +19,7 @@ config MOUSE_PS2 - select SERIO_LIBPS2 - select SERIO_I8042 if X86 - select SERIO_GSCPS2 if GSC -+ select LEDS_CLASS if MOUSE_PS2_SYNAPICS_LED - help - Say Y here if you have a PS/2 mouse connected to your system. This - includes the standard 2 or 3-button PS/2 mouse, as well as PS/2 -@@ -67,6 +68,14 @@ config MOUSE_PS2_SYNAPTICS - - If unsure, say Y. - -+config MOUSE_PS2_SYNAPTICS_LED -+ bool "Support embedded LED on Synaptics devices" -+ depends on MOUSE_PS2_SYNAPTICS -+ select NEW_LEDS -+ help -+ Say Y here if you have a Synaptics device with an embedded LED. -+ This will enable LED class driver to control the LED device. -+ - config MOUSE_PS2_LIFEBOOK - bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT - default y ---- a/drivers/input/mouse/synaptics.c -+++ b/drivers/input/mouse/synaptics.c -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - #include - #include "psmouse.h" - #include "synaptics.h" -@@ -353,6 +354,110 @@ static void synaptics_pt_create(struct p - serio_register_port(serio); - } - -+#ifdef CONFIG_MOUSE_PS2_SYNAPTICS_LED -+/* -+ * LED handling: -+ * Some Synaptics devices have an embeded LED at the top-left corner. -+ */ -+ -+struct synaptics_led { -+ struct psmouse *psmouse; -+ struct work_struct work; -+ struct led_classdev cdev; -+}; -+ -+static void synaptics_set_led(struct psmouse *psmouse, int on) -+{ -+ int i; -+ unsigned char cmd = on ? 0x88 : 0x10; -+ -+ ps2_begin_command(&psmouse->ps2dev); -+ if (__ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11)) -+ goto out; -+ for (i = 6; i >= 0; i -= 2) { -+ unsigned char d = (cmd >> i) & 3; -+ if (__ps2_command(&psmouse->ps2dev, &d, PSMOUSE_CMD_SETRES)) -+ goto out; -+ } -+ cmd = 0x0a; -+ __ps2_command(&psmouse->ps2dev, &cmd, PSMOUSE_CMD_SETRATE); -+ out: -+ ps2_end_command(&psmouse->ps2dev); -+} -+ -+static void synaptics_led_work(struct work_struct *work) -+{ -+ struct synaptics_led *led; -+ -+ led = container_of(work, struct synaptics_led, work); -+ synaptics_set_led(led->psmouse, led->cdev.brightness); -+} -+ -+static void synaptics_led_cdev_brightness_set(struct led_classdev *cdev, -+ enum led_brightness value) -+{ -+ struct synaptics_led *led; -+ -+ led = container_of(cdev, struct synaptics_led, cdev); -+ schedule_work(&led->work); -+} -+ -+static void synaptics_sync_led(struct psmouse *psmouse) -+{ -+ struct synaptics_data *priv = psmouse->private; -+ -+ if (priv->led) -+ synaptics_set_led(psmouse, priv->led->cdev.brightness); -+} -+ -+static int synaptics_init_led(struct psmouse *psmouse) -+{ -+ struct synaptics_data *priv = psmouse->private; -+ struct synaptics_led *led; -+ int err; -+ -+ /* FIXME: LED is supposedly detectable in cap0c[1] 0x20, but it seems -+ * not working on real machines. -+ * So we check the product id to be sure. -+ */ -+ if (!priv->ext_cap_0c || SYN_CAP_PRODUCT_ID(priv->ext_cap) != 0xe4) -+ return 0; -+ -+ printk(KERN_INFO "synaptics: support LED control\n"); -+ led = kzalloc(sizeof(struct synaptics_led), GFP_KERNEL); -+ if (!led) -+ return -ENOMEM; -+ led->psmouse = psmouse; -+ INIT_WORK(&led->work, synaptics_led_work); -+ led->cdev.name = "psmouse::synaptics"; -+ led->cdev.brightness_set = synaptics_led_cdev_brightness_set; -+ led->cdev.flags = LED_CORE_SUSPENDRESUME; -+ err = led_classdev_register(NULL, &led->cdev); -+ if (err < 0) { -+ kfree(led); -+ return err; -+ } -+ priv->led = led; -+ return 0; -+} -+ -+static void synaptics_free_led(struct psmouse *psmouse) -+{ -+ struct synaptics_data *priv = psmouse->private; -+ -+ if (!priv->led) -+ return; -+ cancel_work_sync(&priv->led->work); -+ synaptics_set_led(psmouse, 0); -+ led_classdev_unregister(&priv->led->cdev); -+ kfree(priv->led); -+} -+#else -+#define synaptics_init_led(ps) 0 -+#define synaptics_free_led(ps) do {} while (0) -+#define synaptics_sync_led(ps) do {} while (0) -+#endif -+ - /***************************************************************************** - * Functions to interpret the absolute mode packets - ****************************************************************************/ -@@ -647,6 +752,7 @@ static void set_input_params(struct inpu - - static void synaptics_disconnect(struct psmouse *psmouse) - { -+ synaptics_free_led(psmouse); - synaptics_reset(psmouse); - kfree(psmouse->private); - psmouse->private = NULL; -@@ -678,6 +784,8 @@ static int synaptics_reconnect(struct ps - return -1; - } - -+ synaptics_sync_led(psmouse); -+ - return 0; - } - -@@ -752,6 +860,9 @@ int synaptics_init(struct psmouse *psmou - SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), - priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c); - -+ if (synaptics_init_led(psmouse) < 0) -+ goto init_fail; -+ - set_input_params(psmouse->dev, priv); - - /* ---- a/drivers/input/mouse/synaptics.h -+++ b/drivers/input/mouse/synaptics.h -@@ -97,6 +97,8 @@ struct synaptics_hw_state { - signed char scroll; - }; - -+struct synaptics_led; -+ - struct synaptics_data { - /* Data read from the touchpad */ - unsigned long int model_id; /* Model-ID */ -@@ -110,6 +112,7 @@ struct synaptics_data { - struct serio *pt_port; /* Pass-through serio port */ - - struct synaptics_hw_state mt; /* current gesture packet */ -+ struct synaptics_led *led; - }; - - void synaptics_module_init(void); diff --git a/patches.drivers/ixgbe-entropy-source.patch b/patches.drivers/ixgbe-entropy-source.patch deleted file mode 100644 index f674b35..0000000 --- a/patches.drivers/ixgbe-entropy-source.patch +++ /dev/null @@ -1,90 +0,0 @@ -From: Jiri Benc -Subject: Enable ixgbe as entropy source (disabled by default) -References: FATE#307517 -Patch-mainline: never - -Current disk-less systems have no entropy source whatsoever. Therefore, the -network drivers tg3, bnx2, e1000, e1000e, igb and ixgbe should be enabled to -feed entropy to the kernel via the IRQF_SAMPLE_RANDOM flag when loaded. This -option shall not be enabled by default but implemented via a module option to -be activated by the administrator. - -Signed-off-by: Brandon Philips - ---- - drivers/net/ixgbe/ixgbe_main.c | 22 +++++++++++++++++++--- - 1 file changed, 19 insertions(+), 3 deletions(-) - ---- a/drivers/net/ixgbe/ixgbe_main.c -+++ b/drivers/net/ixgbe/ixgbe_main.c -@@ -56,6 +56,11 @@ static const char ixgbe_driver_string[] - const char ixgbe_driver_version[] = DRV_VERSION; - static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; - -+static int entropy = 0; -+module_param(entropy, int, 0); -+MODULE_PARM_DESC(entropy, "Allow ixgbe to populate the /dev/random entropy pool"); -+ -+ - static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, - [board_82599] = &ixgbe_82599_info, -@@ -2317,6 +2322,7 @@ static int ixgbe_request_msix_irqs(struc - irqreturn_t (*handler)(int, void *); - int i, vector, q_vectors, err; - int ri = 0, ti = 0; -+ int irq_flags; - - /* Decrement for Other and TCP Timer vectors */ - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -@@ -2334,22 +2340,27 @@ static int ixgbe_request_msix_irqs(struc - struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; - handler = SET_HANDLER(q_vector); - -+ irq_flags = 0; - if (handler == &ixgbe_msix_clean_rx) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "rx", ri++); -+ if (entropy) -+ irq_flags = IRQF_SAMPLE_RANDOM; - } else if (handler == &ixgbe_msix_clean_tx) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "tx", ti++); - } else if (handler == &ixgbe_msix_clean_many) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "TxRx", ri++); -+ if (entropy) -+ irq_flags = IRQF_SAMPLE_RANDOM; - ti++; - } else { - /* skip this unused q_vector */ - continue; - } - err = request_irq(adapter->msix_entries[vector].vector, -- handler, 0, q_vector->name, -+ handler, irq_flags, q_vector->name, - q_vector); - if (err) { - e_err(probe, "request_irq failed for MSIX interrupt " -@@ -2563,14 +2574,19 @@ static int ixgbe_request_irq(struct ixgb - { - struct net_device *netdev = adapter->netdev; - int err; -+ int irq_flags = 0; -+ -+ if (entropy) -+ irq_flags = IRQF_SAMPLE_RANDOM; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - err = ixgbe_request_msix_irqs(adapter); - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { -- err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, -+ err = request_irq(adapter->pdev->irq, ixgbe_intr, irq_flags, - netdev->name, netdev); - } else { -- err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, -+ irq_flags |= IRQF_SHARED; -+ err = request_irq(adapter->pdev->irq, ixgbe_intr, irq_flags, - netdev->name, netdev); - } - diff --git a/patches.drivers/libata-unlock-hpa-by-default b/patches.drivers/libata-unlock-hpa-by-default deleted file mode 100644 index 8fc78e7..0000000 --- a/patches.drivers/libata-unlock-hpa-by-default +++ /dev/null @@ -1,24 +0,0 @@ -From: Tejun Heo -Subject: [PATCH] libata: unlock HPA by default -References: 299267 -Patch-mainline: not yet - -Unlock HPA by default. This is to stay compatible with the old IDE -drivers. - -Signed-off-by: Tejun Heo ---- - drivers/ata/libata-core.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/ata/libata-core.c -+++ b/drivers/ata/libata-core.c -@@ -138,7 +138,7 @@ int libata_fua = 0; - module_param_named(fua, libata_fua, int, 0444); - MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); - --static int ata_ignore_hpa; -+static int ata_ignore_hpa = 1; - module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); - MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); - diff --git a/patches.drivers/megaraid-mbox-fix-SG_IO b/patches.drivers/megaraid-mbox-fix-SG_IO deleted file mode 100644 index 16e188e..0000000 --- a/patches.drivers/megaraid-mbox-fix-SG_IO +++ /dev/null @@ -1,70 +0,0 @@ -From: Martin Wilck -Subject: megaraid_mbox: Oops on SG_IO -References: bnc#475619 -Patch-mainline: not yet - -This patch fixes an Oops in megaraid_mbox that happens when a -MODE_SENSE command for a logical drive is started viaioctl(SG_IO). - -The problem only occurs if the buffer specified by the user to receive -the mode data resides in highmem and if the buffer is aligned for -direct dma (no bounce buffer necessary). megaraid_mbox emulates -the MODE_SENSE command and writes the data using memset() directly -into user buffer. If the buffer is at a currently unmapped highmem -page, this leads to an Oops. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/megaraid/megaraid_mbox.c | 28 +++++++++++++++++++++++----- - 1 file changed, 23 insertions(+), 5 deletions(-) - ---- a/drivers/scsi/megaraid/megaraid_mbox.c -+++ b/drivers/scsi/megaraid/megaraid_mbox.c -@@ -1586,13 +1586,20 @@ megaraid_mbox_build_cmd(adapter_t *adapt - case MODE_SENSE: - { - struct scatterlist *sgl; -- caddr_t vaddr; -+ struct page *pg; -+ unsigned char *vaddr; -+ unsigned long flags; - - sgl = scsi_sglist(scp); -- if (sg_page(sgl)) { -- vaddr = (caddr_t) sg_virt(&sgl[0]); -+ pg = sg_page(sgl); -+ if (pg) { -+ local_irq_save(flags); -+ vaddr = kmap_atomic(pg, KM_BIO_SRC_IRQ) + sgl->offset; - - memset(vaddr, 0, scp->cmnd[4]); -+ -+ kunmap_atomic(vaddr, KM_BIO_SRC_IRQ); -+ local_irq_restore(flags); - } - else { - con_log(CL_ANN, (KERN_WARNING -@@ -2330,9 +2337,20 @@ megaraid_mbox_dpc(unsigned long devp) - if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 - && IS_RAID_CH(raid_dev, scb->dev_channel)) { - -+ struct page *pg; -+ unsigned char *vaddr; -+ unsigned long flags; -+ - sgl = scsi_sglist(scp); -- if (sg_page(sgl)) { -- c = *(unsigned char *) sg_virt(&sgl[0]); -+ pg = sg_page(sgl); -+ if (pg) { -+ local_irq_save(flags); -+ vaddr = kmap_atomic(pg, KM_BIO_SRC_IRQ) + sgl->offset; -+ -+ c = *vaddr; -+ -+ kunmap_atomic(vaddr, KM_BIO_SRC_IRQ); -+ local_irq_restore(flags); - } else { - con_log(CL_ANN, (KERN_WARNING - "megaraid mailbox: invalid sg:%d\n", diff --git a/patches.drivers/mpt-fusion-4.22.00.00-update b/patches.drivers/mpt-fusion-4.22.00.00-update deleted file mode 100644 index dcd68b1..0000000 --- a/patches.drivers/mpt-fusion-4.22.00.00-update +++ /dev/null @@ -1,18610 +0,0 @@ -From: Hannes Reinecke -Date: Tue, 24 Nov 2009 14:40:54 +0100 -Subject: Update MPT Fusion driver to 4.22.00.00-suse -References: bnc#556587 -Patch-Mainline: No - -This patch updates the MPT Fusion driver to version 4.22.00.00-suse. - -Signed-off-by: Hannes Reinecke - ---- - drivers/message/fusion/Kconfig | 16 - drivers/message/fusion/Makefile | 13 - drivers/message/fusion/csmi/csmisas.c | 5805 +++++++++++++++++++ - drivers/message/fusion/csmi/csmisas.h | 1854 ++++++ - drivers/message/fusion/lsi/mpi.h | 5 - drivers/message/fusion/lsi/mpi_cnfg.h | 21 - drivers/message/fusion/lsi/mpi_history.txt | 55 - drivers/message/fusion/lsi/mpi_log_sas.h | 14 - drivers/message/fusion/lsi/mpi_type.h | 15 - drivers/message/fusion/mptbase.c | 802 +- - drivers/message/fusion/mptbase.h | 250 - drivers/message/fusion/mptctl.c | 648 +- - drivers/message/fusion/mptctl.h | 5 - drivers/message/fusion/mptdebug.h | 11 - drivers/message/fusion/mptfc.c | 173 - drivers/message/fusion/mptlan.c | 222 - drivers/message/fusion/mptlan.h | 2 - drivers/message/fusion/mptsas.c | 1359 +++- - drivers/message/fusion/mptsas.h | 60 - drivers/message/fusion/mptscsih.c | 1043 ++- - drivers/message/fusion/mptscsih.h | 13 - drivers/message/fusion/mptspi.c | 262 - drivers/message/fusion/rejected_ioctls/diag_buffer.c | 671 ++ - drivers/message/fusion/rejected_ioctls/diag_buffer.h | 101 - 24 files changed, 12081 insertions(+), 1339 deletions(-) - ---- a/drivers/message/fusion/Kconfig -+++ b/drivers/message/fusion/Kconfig -@@ -61,13 +61,25 @@ config FUSION_SAS - LSISAS1078 - - config FUSION_MAX_SGE -- int "Maximum number of scatter gather entries (16 - 128)" -+ int "Maximum number of scatter gather entries for SAS and SPI (16 - 128)" - default "128" - range 16 128 - help - This option allows you to specify the maximum number of scatter- - gather entries per I/O. The driver default is 128, which matches -- SCSI_MAX_PHYS_SEGMENTS. However, it may decreased down to 16. -+ SAFE_PHYS_SEGMENTS. However, it may decreased down to 16. -+ Decreasing this parameter will reduce memory requirements -+ on a per controller instance. -+ -+config FUSION_MAX_FC_SGE -+ int "Maximum number of scatter gather entries for FC (16 - 256)" -+ depends on FUSION_FC -+ default "256" -+ range 16 256 -+ help -+ This option allows you to specify the maximum number of scatter- -+ gather entries per I/O. The driver default is 256, which matches -+ MAX_PHYS_SEGMENTS. However, it may decreased down to 16. - Decreasing this parameter will reduce memory requirements - on a per controller instance. - ---- a/drivers/message/fusion/Makefile -+++ b/drivers/message/fusion/Makefile -@@ -1,12 +1,17 @@ --# Fusion MPT drivers; recognized debug defines... -+# -+# LSI mpt fusion -+# -+ -+# csmi ioctls enable -+EXTRA_CFLAGS += -DCPQ_CIM -+EXTRA_CFLAGS += -DDIAG_BUFFER_SUPPORT -+ -+EXTRA_CFLAGS += -DCONFIG_FUSION_LOGGING - - # enable verbose logging - # CONFIG_FUSION_LOGGING needs to be enabled in Kconfig - #EXTRA_CFLAGS += -DMPT_DEBUG_VERBOSE - -- --#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC -- - obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o - obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o - obj-$(CONFIG_FUSION_SAS) += mptbase.o mptscsih.o mptsas.o ---- /dev/null -+++ b/drivers/message/fusion/csmi/csmisas.c -@@ -0,0 +1,5805 @@ -+/* -+ * linux/drivers/message/fusion/csmi/csmisas.c -+ * For use with LSI PCI chip/adapter(s) -+ * running LSI Fusion MPT (Message Passing Technology) firmware. -+ * -+ * Copyright (c) 1999-2008 LSI Corporation -+ * (mailto:DL-MPTFusionLinux@lsi.com) -+ */ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+/* -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; version 2 of the License. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ NO WARRANTY -+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR -+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT -+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, -+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is -+ solely responsible for determining the appropriateness of using and -+ distributing the Program and assumes all risks associated with its -+ exercise of rights under this Agreement, including but not limited to -+ the risks and costs of program errors, damage to or loss of data, -+ programs or equipment, and unavailability or interruption of operations. -+ -+ DISCLAIMER OF LIABILITY -+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY -+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND -+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED -+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+*/ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+ -+#define MPT_CSMI_DESCRIPTION "LSI Corporation: Fusion MPT Driver "MPT_LINUX_VERSION_COMMON -+#define csmisas_is_this_sas_cntr(ioc) (ioc->bus_type == SAS) ? 1 : 0 -+ -+static int csmisas_do_raid(MPT_ADAPTER *ioc, u8 action, u8 PhysDiskNum, u8 VolumeBus, -+ u8 VolumeId, pMpiRaidActionReply_t reply); -+static u8 map_sas_status_to_csmi(u8 mpi_sas_status); -+ -+/** -+ * reverse_byte_order64 -+ * -+ * @data64 -+ * -+ **/ -+static u64 -+reverse_byte_order64(u64 data64) -+{ -+ int i; -+ u64 rc; -+ u8 *inWord = (u8*)&data64, *outWord = (u8*)&rc; -+ -+ for (i = 0 ; i < 8 ; i++) -+ outWord[i] = inWord[7-i]; -+ -+ return rc; -+} -+ -+/** -+ * csmisas_is_sata -+ * -+ * @phys_disk -+ * -+ **/ -+static int -+csmisas_is_sata(RaidPhysDiskPage0_t *phys_disk) -+{ -+ if ((phys_disk->ExtDiskIdentifier[0] == 'A') && -+ (phys_disk->ExtDiskIdentifier[1] == 'T') && -+ (phys_disk->ExtDiskIdentifier[2] == 'A')) -+ return 1; -+ else -+ return 0; -+} -+ -+/** -+ * csmisas_is_end_device -+ * -+ * @attached -+ * -+ **/ -+static inline int -+csmisas_is_end_device(struct mptsas_devinfo * attached) -+{ -+ if ((attached->sas_address) && -+ (attached->device_info & -+ MPI_SAS_DEVICE_INFO_END_DEVICE) && -+ ((attached->device_info & -+ MPI_SAS_DEVICE_INFO_SSP_TARGET) | -+ (attached->device_info & -+ MPI_SAS_DEVICE_INFO_STP_TARGET) | -+ (attached->device_info & -+ MPI_SAS_DEVICE_INFO_SATA_DEVICE))) -+ return 1; -+ else -+ return 0; -+} -+ -+/** -+ * csmisas_is_phys_disk -+ * -+ * returns (1) success (0) fail - not a phys disk -+ **/ -+static int -+csmisas_is_phys_disk(MPT_ADAPTER *ioc, int channel, int id) -+{ -+ struct inactive_raid_component_info *component_info; -+ int i; -+ int rc = 0; -+ -+ if (!ioc->raid_data.pIocPg3) -+ goto out; -+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) { -+ if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) && -+ (channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) { -+ rc = 1; -+ goto out; -+ } -+ } -+ -+ /* -+ * Check inactive list for matching phys disks -+ */ -+ if (list_empty(&ioc->raid_data.inactive_list)) -+ goto out; -+ -+ down(&ioc->raid_data.inactive_list_mutex); -+ list_for_each_entry(component_info, &ioc->raid_data.inactive_list, -+ list) { -+ if ((component_info->d.PhysDiskID == id) && -+ (component_info->d.PhysDiskBus == channel)) -+ rc = 1; -+ } -+ up(&ioc->raid_data.inactive_list_mutex); -+ -+ out: -+ return rc; -+} -+ -+/** -+ * csmisas_raid_id_to_num -+ * -+ * Obtains the phys disk num for given H:C:T nexus -+ * -+ * input (channel/id) -+ * output (phys disk number - used by SCSI_IO_PASSTHRU to access hidden component) -+ * -+ * returns - signed return means failure -+ **/ -+static s8 -+csmisas_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) -+{ -+ struct inactive_raid_component_info *component_info; -+ int i; -+ s8 rc = -ENXIO; -+ -+ if (!ioc->raid_data.pIocPg3) -+ goto out; -+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) { -+ if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) && -+ (channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) { -+ rc = ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum; -+ goto out; -+ } -+ } -+ -+ /* -+ * Check inactive list for matching phys disks -+ */ -+ if (list_empty(&ioc->raid_data.inactive_list)) -+ goto out; -+ -+ down(&ioc->raid_data.inactive_list_mutex); -+ list_for_each_entry(component_info, &ioc->raid_data.inactive_list, -+ list) { -+ if ((component_info->d.PhysDiskID == id) && -+ (component_info->d.PhysDiskBus == channel)) -+ rc = component_info->d.PhysDiskNum; -+ } -+ up(&ioc->raid_data.inactive_list_mutex); -+ -+ out: -+ return rc; -+} -+ -+/** -+ * csmisas_get_device_component_by_os -+ * -+ * Obtain device component object by operating system mapping -+ * -+ * @ioc -+ * @channel -+ * @id -+ * -+ **/ -+static struct sas_device_info * -+csmisas_get_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id) -+{ -+ struct sas_device_info *sas_info, *p; -+ -+ sas_info = NULL; -+ -+ down(&ioc->sas_device_info_mutex); -+ list_for_each_entry(p, &ioc->sas_device_info_list, list) { -+ if (p->os.channel == channel && p->os.id == id) { -+ sas_info = p; -+ goto out; -+ } -+ } -+ -+ out: -+ up(&ioc->sas_device_info_mutex); -+ return sas_info; -+} -+ -+/** -+ * csmisas_get_device_component -+ * -+ * Obtain device component object by firmware system mapping -+ * -+ * @ioc -+ * @channel -+ * @id -+ * -+ **/ -+static struct sas_device_info * -+csmisas_get_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id) -+{ -+ struct sas_device_info *sas_info, *p; -+ -+ sas_info = NULL; -+ -+ down(&ioc->sas_device_info_mutex); -+ list_for_each_entry(p, &ioc->sas_device_info_list, list) { -+ if (p->fw.channel == channel && p->fw.id == id) { -+ sas_info = p; -+ goto out; -+ } -+ } -+ -+ out: -+ up(&ioc->sas_device_info_mutex); -+ return sas_info; -+} -+ -+ -+/** -+ * csmisas_get_device_component_by_sas_addr -+ * -+ * Obtain device component object by sas address -+ * -+ * @ioc -+ * @channel -+ * @id -+ * -+ **/ -+static struct sas_device_info * -+csmisas_get_device_component_by_sas_addr(MPT_ADAPTER *ioc, u64 sas_address) -+{ -+ struct sas_device_info *sas_info, *p; -+ -+ sas_info = NULL; -+ -+ down(&ioc->sas_device_info_mutex); -+ list_for_each_entry(p, &ioc->sas_device_info_list, list) { -+ if (p->sas_address == sas_address) { -+ sas_info = p; -+ goto out; -+ } -+ } -+ -+ out: -+ up(&ioc->sas_device_info_mutex); -+ return sas_info; -+} -+ -+/** -+ * csmisas_send_command_wait -+ * -+ * Send mf to firmware -+ * -+ * @ioc -+ * @mf -+ * @timeout - timeout -+ * -+ * Return: 0 for success -+ * non-zero, failure -+ **/ -+static int -+csmisas_send_command_wait(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, unsigned long timeout) -+{ -+ int rc; -+ unsigned long timeleft; -+ -+ timeout = max_t(unsigned long, MPT_IOCTL_DEFAULT_TIMEOUT, timeout); -+ rc = 0; -+ timeleft = 0; -+ -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, -+ mf->u.hdr.MsgContext); -+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) -+ mpt_put_msg_frame(mptctl_id, ioc, mf); -+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, timeout*HZ); -+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ rc = -1; -+ printk("%s: failed\n", __FUNCTION__); -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { -+ mpt_free_msg_frame(ioc, mf); -+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) -+ return rc; -+ } -+ if (!timeleft) -+ mptctl_timeout_expired(ioc, mf); -+ } -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); -+ -+ return rc; -+} -+ -+/** -+ * csmisas_send_handshake_wait -+ * -+ * Handshake a mf to firmware -+ * -+ * @ioc -+ * @mf -+ * @mf_size -+ * @timeout - timeout -+ * -+ * Return: 0 for success -+ * non-zero, failure -+ **/ -+static int -+csmisas_send_handshake_wait(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, unsigned long timeout) -+{ -+ int rc; -+ unsigned long timeleft; -+ -+ timeout = max_t(unsigned long, MPT_IOCTL_DEFAULT_TIMEOUT, timeout); -+ rc = 0; -+ timeleft = 0; -+ -+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) -+ mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf); -+ timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); -+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ rc = -1; -+ printk("%s: failed\n", __FUNCTION__); -+ mpt_clear_taskmgmt_in_progress_flag(ioc); -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { -+ mpt_free_msg_frame(ioc, mf); -+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) -+ return rc; -+ } -+ if (!timeleft) -+ mptctl_timeout_expired(ioc, mf); -+ } -+ return rc; -+} -+ -+/** -+ * csmisas_get_number_hotspares - returns num hot spares in this ioc -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * -+ * Return: number of hotspares -+ * -+ **/ -+static int -+csmisas_get_number_hotspares(MPT_ADAPTER *ioc) -+{ -+ ConfigPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ IOCPage5_t *buffer = NULL; -+ dma_addr_t dma_handle; -+ int data_sz; -+ int rc; -+ -+ memset(&hdr, 0, sizeof(ConfigPageHeader_t)); -+ memset(&cfg, 0, sizeof(CONFIGPARMS)); -+ -+ rc = 0; -+ data_sz = 0; -+ hdr.PageNumber = 5; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_IOC; -+ cfg.cfghdr.hdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if (mpt_config(ioc, &cfg) != 0) -+ goto get_ioc_pg5; -+ -+ if (hdr.PageLength == 0) -+ goto get_ioc_pg5; -+ -+ data_sz = hdr.PageLength * 4; -+ buffer = (IOCPage5_t *) pci_alloc_consistent(ioc->pcidev, -+ data_sz, &dma_handle); -+ if (!buffer) -+ goto get_ioc_pg5; -+ -+ memset((u8 *)buffer, 0, data_sz); -+ cfg.physAddr = dma_handle; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if (mpt_config(ioc, &cfg) != 0) -+ goto get_ioc_pg5; -+ -+ rc = buffer->NumHotSpares; -+ -+ get_ioc_pg5: -+ -+ if (buffer) -+ pci_free_consistent(ioc->pcidev, data_sz, -+ (u8 *) buffer, dma_handle); -+ -+ return rc; -+} -+ -+ -+/** -+ * csmisas_get_ioc_pg5 - ioc Page 5 hot spares -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @pIocPage5: ioc page 5 -+ * @data_size: expected data size(units=bytes) -+ * -+ * Return: 0 for success -+ * -ENOMEM if no memory available -+ * -EPERM if not allowed due to ISR context -+ * -EAGAIN if no msg frames currently available -+ * -EFAULT for non-successful reply or no reply (timeout) -+ **/ -+static int -+csmisas_get_ioc_pg5(MPT_ADAPTER *ioc, IOCPage5_t *iocPage5, int data_size) -+{ -+ ConfigPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ IOCPage5_t *buffer = NULL; -+ dma_addr_t dma_handle; -+ int data_sz; -+ int rc; -+ -+ memset(&hdr, 0, sizeof(ConfigPageHeader_t)); -+ memset(&cfg, 0, sizeof(CONFIGPARMS)); -+ -+ rc = 0; -+ data_sz = 0; -+ hdr.PageNumber = 5; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_IOC; -+ cfg.cfghdr.hdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) -+ goto get_ioc_pg5; -+ -+ if (hdr.PageLength == 0) { -+ rc = -EFAULT; -+ goto get_ioc_pg5; -+ } -+ -+ data_sz = hdr.PageLength * 4; -+ buffer = (IOCPage5_t *) pci_alloc_consistent(ioc->pcidev, -+ data_sz, &dma_handle); -+ if (!buffer) { -+ rc = -ENOMEM; -+ goto get_ioc_pg5; -+ } -+ -+ memset((u8 *)buffer, 0, data_sz); -+ cfg.physAddr = dma_handle; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) -+ goto get_ioc_pg5; -+ -+ memcpy(iocPage5, buffer, data_size); -+ -+ get_ioc_pg5: -+ -+ if (buffer) -+ pci_free_consistent(ioc->pcidev, data_sz, -+ (u8 *) buffer, dma_handle); -+ -+ return rc; -+} -+ -+/** -+ * csmisas_sas_device_pg0 - sas device page 0 -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @mptsas_devinfo: structure found in mptsas.h -+ * @form, @form_specific - defines the Page Address field in the config page -+ * (pls refer to chapter 5.1 in the mpi spec) -+ * -+ * Return: 0 for success -+ * -ENOMEM if no memory available -+ * -EPERM if not allowed due to ISR context -+ * -EAGAIN if no msg frames currently available -+ * -EFAULT for non-successful reply or no reply (timeout) -+ **/ -+static int -+csmisas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, -+ u32 form, u32 form_specific) -+{ -+ ConfigExtendedPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ SasDevicePage0_t *buffer; -+ dma_addr_t dma_handle; -+ u64 sas_address; -+ int rc; -+ -+ rc = 0; -+ hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 0; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.pageAddr = form + form_specific; -+ cfg.physAddr = -1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = 10; -+ -+ memset(device_info, 0, sizeof(struct mptsas_devinfo)); -+ if ((rc = mpt_config(ioc, &cfg)) != 0) -+ goto out; -+ -+ if (!hdr.ExtPageLength) { -+ rc = -ENXIO; -+ goto out; -+ } -+ -+ buffer = pci_alloc_consistent(ioc->pcidev, -+ hdr.ExtPageLength * 4, &dma_handle); -+ if (!buffer) { -+ rc = -ENOMEM; -+ goto out; -+ } -+ -+ cfg.physAddr = dma_handle; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) -+ goto out_free_consistent; -+ -+ device_info->handle = le16_to_cpu(buffer->DevHandle); -+ device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle); -+ device_info->handle_enclosure = -+ le16_to_cpu(buffer->EnclosureHandle); -+ device_info->slot = le16_to_cpu(buffer->Slot); -+ device_info->phy_id = buffer->PhyNum; -+ device_info->port_id = buffer->PhysicalPort; -+ device_info->id = buffer->TargetID; -+ device_info->channel = buffer->Bus; -+ memcpy(&sas_address, &buffer->SASAddress, sizeof(u64)); -+ device_info->sas_address = le64_to_cpu(sas_address); -+ device_info->device_info = -+ le32_to_cpu(buffer->DeviceInfo); -+ -+ out_free_consistent: -+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, -+ buffer, dma_handle); -+ out: -+ return rc; -+} -+ -+/** -+ * Routine for the CSMI Sas Get Driver Info command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_driver_info(unsigned long arg) -+{ -+ -+ CSMI_SAS_DRIVER_INFO_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_DRIVER_INFO_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_DRIVER_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in csmi_sas_get_driver_info_buffer struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Fill in the data and return the structure to the calling -+ * program -+ */ -+ memcpy( karg.Information.szName, MPT_MISCDEV_BASENAME, -+ sizeof(MPT_MISCDEV_BASENAME)); -+ memcpy( karg.Information.szDescription, MPT_CSMI_DESCRIPTION, -+ sizeof(MPT_CSMI_DESCRIPTION)); -+ -+ karg.Information.usMajorRevision = MPT_LINUX_MAJOR_VERSION; -+ karg.Information.usMinorRevision = MPT_LINUX_MINOR_VERSION; -+ karg.Information.usBuildRevision = MPT_LINUX_BUILD_VERSION; -+ karg.Information.usReleaseRevision = MPT_LINUX_RELEASE_VERSION; -+ -+ karg.Information.usCSMIMajorRevision = CSMI_MAJOR_REVISION; -+ karg.Information.usCSMIMinorRevision = CSMI_MINOR_REVISION; -+ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_DRIVER_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to write out csmi_sas_get_driver_info_buffer @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI_SAS_GET_CNTLR_CONFIG command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_cntlr_config(unsigned long arg) -+{ -+ -+ CSMI_SAS_CNTLR_CONFIG_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_CNTLR_CONFIG_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ u64 mem_phys; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_CNTLR_CONFIG_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in csmi_sas_get_cntlr_config_buffer struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Clear the struct before filling in data. */ -+ memset( &karg.Configuration, 0, sizeof(CSMI_SAS_CNTLR_CONFIG)); -+ -+ /* Fill in the data and return the structure to the calling -+ * program -+ */ -+ -+ karg.Configuration.uBaseIoAddress = ioc->pio_mem_phys; -+ karg.Configuration.BaseMemoryAddress.uLowPart = ioc->mem_phys; -+ if (sizeof(ioc->mem_phys) == sizeof(u64)) { -+ mem_phys = ioc->mem_phys; -+ karg.Configuration.BaseMemoryAddress.uHighPart = -+ (u32)(mem_phys >> 32); -+ } -+ -+ karg.Configuration.uBoardID = (ioc->pcidev->subsystem_device << 16) | -+ (ioc->pcidev->subsystem_vendor); -+ -+ karg.Configuration.usSlotNumber = -+ (ioc->pci_slot_number = 0xff) ? -+ SLOT_NUMBER_UNKNOWN : ioc->pci_slot_number; -+ karg.Configuration.bControllerClass = CSMI_SAS_CNTLR_CLASS_HBA; -+ karg.Configuration.bIoBusType = CSMI_SAS_BUS_TYPE_PCI; -+ karg.Configuration.BusAddress.PciAddress.bBusNumber = -+ ioc->pcidev->bus->number; -+ karg.Configuration.BusAddress.PciAddress.bDeviceNumber = -+ PCI_SLOT(ioc->pcidev->devfn); -+ karg.Configuration.BusAddress.PciAddress.bFunctionNumber = -+ PCI_FUNC(ioc->pcidev->devfn); -+ karg.Configuration.BusAddress.PciAddress.bReserved = 0; -+ memcpy( &karg.Configuration.szSerialNumber, ioc->board_tracer, 16 ); -+ karg.Configuration.usMajorRevision = ioc->facts.FWVersion.Struct.Major; -+ karg.Configuration.usMinorRevision = ioc->facts.FWVersion.Struct.Minor; -+ karg.Configuration.usBuildRevision = ioc->facts.FWVersion.Struct.Unit; -+ karg.Configuration.usReleaseRevision = ioc->facts.FWVersion.Struct.Dev; -+ karg.Configuration.usBIOSMajorRevision = -+ (ioc->biosVersion & 0xFF000000) >> 24; -+ karg.Configuration.usBIOSMinorRevision = -+ (ioc->biosVersion & 0x00FF0000) >> 16; -+ karg.Configuration.usBIOSBuildRevision = -+ (ioc->biosVersion & 0x0000FF00) >> 8; -+ karg.Configuration.usBIOSReleaseRevision = -+ (ioc->biosVersion & 0x000000FF); -+ karg.Configuration.uControllerFlags = CSMI_SAS_CNTLR_SAS_HBA | -+ CSMI_SAS_CNTLR_FWD_SUPPORT | CSMI_SAS_CNTLR_FWD_ONLINE | -+ CSMI_SAS_CNTLR_FWD_SRESET ; -+ -+ /* -+ * Enabling CSMI_SAS_CNTLR_SAS_RAID bit when IR fw detected -+ */ -+ if (ioc->ir_firmware) -+ karg.Configuration.uControllerFlags |= CSMI_SAS_CNTLR_SAS_RAID; -+ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* All Rrom entries will be zero. Skip them. */ -+ /* bReserved will also be zeros. */ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_DRIVER_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to write out csmi_sas_get_driver_info_buffer @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI Sas Get Controller Status command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_cntlr_status(unsigned long arg) -+{ -+ -+ CSMI_SAS_CNTLR_STATUS_BUFFER __user *uarg = (void __user *) arg; -+ MPT_ADAPTER *ioc = NULL; -+ CSMI_SAS_CNTLR_STATUS_BUFFER karg; -+ int iocnum; -+ int rc; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_CNTLR_STATUS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in csmi_sas_get_cntlr_status_buffer struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Fill in the data and return the structure to the calling -+ * program -+ */ -+ -+ rc = mpt_GetIocState(ioc, 1); -+ switch (rc) { -+ case MPI_IOC_STATE_OPERATIONAL: -+ karg.Status.uStatus = CSMI_SAS_CNTLR_STATUS_GOOD; -+ karg.Status.uOfflineReason = 0; -+ break; -+ -+ case MPI_IOC_STATE_FAULT: -+ karg.Status.uStatus = CSMI_SAS_CNTLR_STATUS_FAILED; -+ karg.Status.uOfflineReason = 0; -+ break; -+ -+ case MPI_IOC_STATE_RESET: -+ case MPI_IOC_STATE_READY: -+ default: -+ karg.Status.uStatus = CSMI_SAS_CNTLR_STATUS_OFFLINE; -+ karg.Status.uOfflineReason = -+ CSMI_SAS_OFFLINE_REASON_INITIALIZING; -+ break; -+ } -+ -+ memset(&karg.Status.bReserved, 0, 28); -+ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_CNTLR_STATUS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to write out csmi_sas_get_cntlr_status @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI Sas Get Phy Info command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_phy_info(unsigned long arg) -+{ -+ CSMI_SAS_PHY_INFO_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_PHY_INFO_BUFFER *karg; -+ MPT_ADAPTER *ioc = NULL; -+ ConfigExtendedPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ SasIOUnitPage0_t *sasIoUnitPg0; -+ dma_addr_t sasIoUnitPg0_dma; -+ int sasIoUnitPg0_data_sz; -+ SasPhyPage0_t *sasPhyPg0; -+ dma_addr_t sasPhyPg0_dma; -+ int sasPhyPg0_data_sz; -+ u16 protocol; -+ int iocnum; -+ int rc; -+ int ii; -+ u64 sas_address; -+ struct mptsas_devinfo device_info; -+ int memory_pages; -+ -+ sasIoUnitPg0=NULL; -+ sasPhyPg0=NULL; -+ sasIoUnitPg0_data_sz=0; -+ sasPhyPg0_data_sz=0; -+ -+ memory_pages = get_order(sizeof(CSMI_SAS_PHY_INFO_BUFFER)); -+ karg = (CSMI_SAS_PHY_INFO_BUFFER *)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!karg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc CSMI_SAS_PHY_INFO_BUFFER " -+ "malloc_data_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ (int)sizeof(CSMI_SAS_PHY_INFO_BUFFER), memory_pages); -+ return -ENOMEM; -+ } -+ -+ memset(karg, 0, sizeof(*karg)); -+ -+ if (copy_from_user(karg, uarg, sizeof(CSMI_SAS_PHY_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in csmisas_get_phy_info_buffer struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Fill in the data and return the structure to the calling -+ * program -+ */ -+ -+ /* Issue a config request to get the number of phys -+ */ -+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 0; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.pageAddr = 0; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASIOUNITPAGE0_PAGEVERSION: HEADER\n")); -+ dcsmisasprintk(ioc, printk(": rc=%x\n",rc)); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto sas_get_phy_info_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR ": hdr.ExtPageLength == 0\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto sas_get_phy_info_exit; -+ } -+ -+ sasIoUnitPg0_data_sz = hdr.ExtPageLength * 4; -+ rc = -ENOMEM; -+ -+ sasIoUnitPg0 = (SasIOUnitPage0_t *) pci_alloc_consistent(ioc->pcidev, -+ sasIoUnitPg0_data_sz, &sasIoUnitPg0_dma); -+ -+ if (!sasIoUnitPg0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto sas_get_phy_info_exit; -+ } -+ -+ memset((u8 *)sasIoUnitPg0, 0, sasIoUnitPg0_data_sz); -+ cfg.physAddr = sasIoUnitPg0_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASIOUNITPAGE0_PAGEVERSION: PAGE\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto sas_get_phy_info_exit; -+ } -+ -+ /* Number of Phys. */ -+ karg->Information.bNumberOfPhys = sasIoUnitPg0->NumPhys; -+ -+ /* Fill in information for each phy. */ -+ for (ii = 0; ii < karg->Information.bNumberOfPhys; ii++) { -+ -+/* EDM : dump IO Unit Page 0 data*/ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "---- IO UNIT PAGE 0 ------------\n")); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Handle=0x%X\n", -+ le16_to_cpu(sasIoUnitPg0->PhyData[ii].AttachedDeviceHandle))); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Controller Handle=0x%X\n", -+ le16_to_cpu(sasIoUnitPg0->PhyData[ii].ControllerDevHandle))); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Port=0x%X\n", -+ sasIoUnitPg0->PhyData[ii].Port)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Port Flags=0x%X\n", -+ sasIoUnitPg0->PhyData[ii].PortFlags)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "PHY Flags=0x%X\n", -+ sasIoUnitPg0->PhyData[ii].PhyFlags)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Negotiated Link Rate=0x%X\n", -+ sasIoUnitPg0->PhyData[ii].NegotiatedLinkRate)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Controller PHY Device Info=0x%X\n", -+ le32_to_cpu(sasIoUnitPg0->PhyData[ii].ControllerPhyDeviceInfo))); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "DiscoveryStatus=0x%X\n", -+ le32_to_cpu(sasIoUnitPg0->PhyData[ii].DiscoveryStatus))); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\n")); -+/* EDM : debug data */ -+ -+ /* PHY stuff. */ -+ karg->Information.Phy[ii].bPortIdentifier = -+ sasIoUnitPg0->PhyData[ii].Port; -+ -+ /* Get the negotiated link rate for the phy. */ -+ switch (sasIoUnitPg0->PhyData[ii].NegotiatedLinkRate) { -+ -+ case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED: -+ karg->Information.Phy[ii].bNegotiatedLinkRate = -+ CSMI_SAS_PHY_DISABLED; -+ break; -+ -+ case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION: -+ karg->Information.Phy[ii].bNegotiatedLinkRate = -+ CSMI_SAS_LINK_RATE_FAILED; -+ break; -+ -+ case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE: -+ break; -+ -+ case MPI_SAS_IOUNIT0_RATE_1_5: -+ karg->Information.Phy[ii].bNegotiatedLinkRate = -+ CSMI_SAS_LINK_RATE_1_5_GBPS; -+ break; -+ -+ case MPI_SAS_IOUNIT0_RATE_3_0: -+ karg->Information.Phy[ii].bNegotiatedLinkRate = -+ CSMI_SAS_LINK_RATE_3_0_GBPS; -+ break; -+ -+ case MPI_SAS_IOUNIT0_RATE_UNKNOWN: -+ default: -+ karg->Information.Phy[ii].bNegotiatedLinkRate = -+ CSMI_SAS_LINK_RATE_UNKNOWN; -+ break; -+ } -+ -+ if (sasIoUnitPg0->PhyData[ii].PortFlags & -+ MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS) { -+ karg->Information.Phy[ii].bAutoDiscover = -+ CSMI_SAS_DISCOVER_IN_PROGRESS; -+ } else { -+ karg->Information.Phy[ii].bAutoDiscover = -+ CSMI_SAS_DISCOVER_COMPLETE; -+ } -+ -+ /* Issue a config request to get -+ * phy information. -+ */ -+ hdr.PageVersion = MPI_SASPHY0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 0; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.pageAddr = ii; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASPHY0_PAGEVERSION: HEADER\n")); -+ dcsmisasprintk(ioc, printk(": rc=%x\n",rc)); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto sas_get_phy_info_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto sas_get_phy_info_exit; -+ } -+ -+ sasPhyPg0_data_sz = hdr.ExtPageLength * 4; -+ rc = -ENOMEM; -+ -+ sasPhyPg0 = (SasPhyPage0_t *) pci_alloc_consistent( -+ ioc->pcidev, sasPhyPg0_data_sz, &sasPhyPg0_dma); -+ -+ if (! sasPhyPg0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto sas_get_phy_info_exit; -+ } -+ -+ memset((u8 *)sasPhyPg0, 0, sasPhyPg0_data_sz); -+ cfg.physAddr = sasPhyPg0_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASPHY0_PAGEVERSION: PAGE\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ pci_free_consistent(ioc->pcidev, sasPhyPg0_data_sz, -+ (u8 *) sasPhyPg0, sasPhyPg0_dma); -+ goto sas_get_phy_info_exit; -+ } -+ -+/* EDM : dump PHY Page 0 data*/ -+ memcpy(&sas_address, &sasPhyPg0->SASAddress, sizeof(u64)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "---- SAS PHY PAGE 0 ------------\n")); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Handle=0x%X\n", -+ le16_to_cpu(sasPhyPg0->AttachedDevHandle))); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "SAS Address=0x%llX\n", -+ (unsigned long long)sas_address)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Attached PHY Identifier=0x%X\n", -+ sasPhyPg0->AttachedPhyIdentifier)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Attached Device Info=0x%X\n", -+ le32_to_cpu(sasPhyPg0->AttachedDeviceInfo))); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Programmed Link Rate=0x%X\n", -+ sasPhyPg0->ProgrammedLinkRate)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Hardware Link Rate=0x%X\n", -+ sasPhyPg0->HwLinkRate)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Change Count=0x%X\n", -+ sasPhyPg0->ChangeCount)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "PHY Info=0x%X\n", -+ le32_to_cpu(sasPhyPg0->PhyInfo))); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\n")); -+/* EDM : debug data */ -+ -+ /* save the data */ -+ -+ /* Set Max hardware link rate. -+ * This value is hard coded -+ * because the HW link rate -+ * is currently being -+ * overwritten in FW. -+ */ -+ -+ /* Set Max hardware link rate. */ -+ switch (sasPhyPg0->HwLinkRate & -+ MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) { -+ -+ case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5: -+ karg->Information.Phy[ii].bMaximumLinkRate = -+ CSMI_SAS_LINK_RATE_1_5_GBPS; -+ break; -+ -+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0: -+ karg->Information.Phy[ii].bMaximumLinkRate = -+ CSMI_SAS_LINK_RATE_3_0_GBPS; -+ break; -+ default: -+ break; -+ } -+ -+ /* Set Max programmed link rate. */ -+ switch (sasPhyPg0->ProgrammedLinkRate & -+ MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) { -+ -+ case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5: -+ karg->Information.Phy[ii].bMaximumLinkRate |= -+ (CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS << 4); -+ break; -+ -+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0: -+ karg->Information.Phy[ii].bMaximumLinkRate |= -+ (CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS << 4); -+ break; -+ default: -+ break; -+ } -+ -+ /* Set Min hardware link rate. */ -+ switch (sasPhyPg0->HwLinkRate & -+ MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) { -+ -+ case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5: -+ karg->Information.Phy[ii].bMinimumLinkRate = -+ CSMI_SAS_LINK_RATE_1_5_GBPS; -+ break; -+ -+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0: -+ karg->Information.Phy[ii].bMinimumLinkRate = -+ CSMI_SAS_LINK_RATE_3_0_GBPS; -+ break; -+ default: -+ break; -+ } -+ -+ /* Set Min programmed link rate. */ -+ switch (sasPhyPg0->ProgrammedLinkRate & -+ MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) { -+ -+ case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5: -+ karg->Information.Phy[ii].bMinimumLinkRate |= -+ (CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS << 4); -+ break; -+ -+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0: -+ karg->Information.Phy[ii].bMinimumLinkRate |= -+ (CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS << 4); -+ break; -+ default: -+ break; -+ } -+ -+ karg->Information.Phy[ii].bPhyChangeCount = sasPhyPg0->ChangeCount; -+ if( sasPhyPg0->PhyInfo & MPI_SAS_PHY0_PHYINFO_VIRTUAL_PHY ) -+ karg->Information.Phy[ii].bPhyFeatures = CSMI_SAS_PHY_VIRTUAL_SMP; -+ -+ /* Fill in Attached Device -+ * Initiator Port Protocol. -+ * Bits 6:3 -+ * More than one bit can be set. -+ */ -+ protocol = le32_to_cpu(sasPhyPg0->AttachedDeviceInfo) & 0x78; -+ karg->Information.Phy[ii].Attached.bInitiatorPortProtocol = 0; -+ if (protocol & MPI_SAS_DEVICE_INFO_SSP_INITIATOR) -+ karg->Information.Phy[ii].Attached.bInitiatorPortProtocol = -+ CSMI_SAS_PROTOCOL_SSP; -+ if (protocol & MPI_SAS_DEVICE_INFO_STP_INITIATOR) -+ karg->Information.Phy[ii].Attached.bInitiatorPortProtocol |= -+ CSMI_SAS_PROTOCOL_STP; -+ if (protocol & MPI_SAS_DEVICE_INFO_SMP_INITIATOR) -+ karg->Information.Phy[ii].Attached.bInitiatorPortProtocol |= -+ CSMI_SAS_PROTOCOL_SMP; -+ if (protocol & MPI_SAS_DEVICE_INFO_SATA_HOST) -+ karg->Information.Phy[ii].Attached.bInitiatorPortProtocol |= -+ CSMI_SAS_PROTOCOL_SATA; -+ -+ /* Fill in Phy Target Port -+ * Protocol. Bits 10:7 -+ * More than one bit can be set. -+ */ -+ protocol = le32_to_cpu(sasPhyPg0->AttachedDeviceInfo) & 0x780; -+ karg->Information.Phy[ii].Attached.bTargetPortProtocol = 0; -+ if (protocol & MPI_SAS_DEVICE_INFO_SSP_TARGET) -+ karg->Information.Phy[ii].Attached.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_SSP; -+ if (protocol & MPI_SAS_DEVICE_INFO_STP_TARGET) -+ karg->Information.Phy[ii].Attached.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_STP; -+ if (protocol & MPI_SAS_DEVICE_INFO_SMP_TARGET) -+ karg->Information.Phy[ii].Attached.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_SMP; -+ if (protocol & MPI_SAS_DEVICE_INFO_SATA_DEVICE) -+ karg->Information.Phy[ii].Attached.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_SATA; -+ -+ -+ /* Fill in Attached device type */ -+ switch (le32_to_cpu(sasPhyPg0->AttachedDeviceInfo) & -+ MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { -+ -+ case MPI_SAS_DEVICE_INFO_NO_DEVICE: -+ karg->Information.Phy[ii].Attached.bDeviceType = -+ CSMI_SAS_NO_DEVICE_ATTACHED; -+ break; -+ -+ case MPI_SAS_DEVICE_INFO_END_DEVICE: -+ karg->Information.Phy[ii].Attached.bDeviceType = -+ CSMI_SAS_END_DEVICE; -+ break; -+ -+ case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER: -+ karg->Information.Phy[ii].Attached.bDeviceType = -+ CSMI_SAS_EDGE_EXPANDER_DEVICE; -+ break; -+ -+ case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER: -+ karg->Information.Phy[ii].Attached.bDeviceType = -+ CSMI_SAS_FANOUT_EXPANDER_DEVICE; -+ break; -+ } -+ -+ /* Identify Info. */ -+ switch (le32_to_cpu(sasIoUnitPg0->PhyData[ii].ControllerPhyDeviceInfo) & -+ MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { -+ -+ case MPI_SAS_DEVICE_INFO_NO_DEVICE: -+ karg->Information.Phy[ii].Identify.bDeviceType = -+ CSMI_SAS_NO_DEVICE_ATTACHED; -+ break; -+ -+ case MPI_SAS_DEVICE_INFO_END_DEVICE: -+ karg->Information.Phy[ii].Identify.bDeviceType = -+ CSMI_SAS_END_DEVICE; -+ break; -+ -+ case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER: -+ karg->Information.Phy[ii].Identify.bDeviceType = -+ CSMI_SAS_EDGE_EXPANDER_DEVICE; -+ break; -+ -+ case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER: -+ karg->Information.Phy[ii].Identify.bDeviceType = -+ CSMI_SAS_FANOUT_EXPANDER_DEVICE; -+ break; -+ } -+ -+ /* Fill in Phy Initiator Port Protocol. Bits 6:3 -+ * More than one bit can be set, fall through cases. -+ */ -+ protocol = le32_to_cpu( -+ sasIoUnitPg0->PhyData[ii].ControllerPhyDeviceInfo) & 0x78; -+ karg->Information.Phy[ii].Identify.bInitiatorPortProtocol = 0; -+ if( protocol & MPI_SAS_DEVICE_INFO_SSP_INITIATOR ) -+ karg->Information.Phy[ii].Identify.bInitiatorPortProtocol |= -+ CSMI_SAS_PROTOCOL_SSP; -+ if( protocol & MPI_SAS_DEVICE_INFO_STP_INITIATOR ) -+ karg->Information.Phy[ii].Identify.bInitiatorPortProtocol |= -+ CSMI_SAS_PROTOCOL_STP; -+ if( protocol & MPI_SAS_DEVICE_INFO_SMP_INITIATOR ) -+ karg->Information.Phy[ii].Identify.bInitiatorPortProtocol |= -+ CSMI_SAS_PROTOCOL_SMP; -+ if( protocol & MPI_SAS_DEVICE_INFO_SATA_HOST ) -+ karg->Information.Phy[ii].Identify.bInitiatorPortProtocol |= -+ CSMI_SAS_PROTOCOL_SATA; -+ -+ /* Fill in Phy Target Port Protocol. Bits 10:7 -+ * More than one bit can be set, fall through cases. -+ */ -+ protocol = le32_to_cpu( -+ sasIoUnitPg0->PhyData[ii].ControllerPhyDeviceInfo) & 0x780; -+ karg->Information.Phy[ii].Identify.bTargetPortProtocol = 0; -+ if( protocol & MPI_SAS_DEVICE_INFO_SSP_TARGET ) -+ karg->Information.Phy[ii].Identify.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_SSP; -+ if( protocol & MPI_SAS_DEVICE_INFO_STP_TARGET ) -+ karg->Information.Phy[ii].Identify.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_STP; -+ if( protocol & MPI_SAS_DEVICE_INFO_SMP_TARGET ) -+ karg->Information.Phy[ii].Identify.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_SMP; -+ if( protocol & MPI_SAS_DEVICE_INFO_SATA_DEVICE ) -+ karg->Information.Phy[ii].Identify.bTargetPortProtocol |= -+ CSMI_SAS_PROTOCOL_SATA; -+ -+ /* Setup SAS Address for the attached device */ -+ if (sasPhyPg0->AttachedDevHandle) { -+ sas_address = reverse_byte_order64(sas_address); -+ memcpy(karg->Information.Phy[ii].Attached.bSASAddress, -+ &sas_address, sizeof(u64)); -+ karg->Information.Phy[ii].Attached.bPhyIdentifier = -+ sasPhyPg0->AttachedPhyIdentifier; -+ } -+ -+ /* Setup SAS Address for the parent device */ -+ csmisas_sas_device_pg0(ioc, &device_info, -+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << -+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT), -+ sasIoUnitPg0->PhyData[ii].ControllerDevHandle); -+ sas_address = reverse_byte_order64(device_info.sas_address); -+ memcpy(karg->Information.Phy[ii].Identify.bSASAddress, -+ &sas_address, sizeof(u64)); -+ karg->Information.Phy[ii].Identify.bPhyIdentifier = ii; -+ -+ pci_free_consistent(ioc->pcidev, sasPhyPg0_data_sz, -+ (u8 *) sasPhyPg0, sasPhyPg0_dma); -+ } -+ -+sas_get_phy_info_exit: -+ -+ if (sasIoUnitPg0) -+ pci_free_consistent(ioc->pcidev, sasIoUnitPg0_data_sz, -+ (u8 *) sasIoUnitPg0, sasIoUnitPg0_dma); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, karg, -+ sizeof(CSMI_SAS_PHY_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to write out csmisas_get_phy_info_buffer @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ free_pages((unsigned long)karg, memory_pages); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Set PHY Info command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_set_phy_info(unsigned long arg) -+{ -+ CSMI_SAS_SET_PHY_INFO_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_SET_PHY_INFO_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_SET_PHY_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_set_phy_info struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+/* TODO - implement IOCTL here */ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_BAD_CNTL_CODE; -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": not implemented\n")); -+ -+// cim_set_phy_info_exit: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_SET_PHY_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_set_phy_info @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+ -+} -+ -+/** -+ * Prototype Routine for the CSMI Sas Get SCSI Address command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_scsi_address(unsigned long arg) -+{ -+ CSMI_SAS_GET_SCSI_ADDRESS_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_GET_SCSI_ADDRESS_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ u64 sas_address; -+ struct sas_device_info *sas_info; -+ -+ if (copy_from_user(&karg, uarg, -+ sizeof(CSMI_SAS_GET_SCSI_ADDRESS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_get_scsi_address struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* reverse byte order the sas address */ -+ memcpy(&sas_address, karg.bSASAddress, sizeof(u64)); -+ sas_address = reverse_byte_order64(sas_address); -+ -+ /* Search the list for the matching SAS address. */ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_NO_SCSI_ADDRESS; -+ karg.bPathId = 0; -+ karg.bTargetId = 0; -+ karg.bLun = 0; -+ -+ sas_info = csmisas_get_device_component_by_sas_addr(ioc, sas_address); -+ if (!sas_info || sas_info->is_cached || sas_info->is_logical_volume) -+ goto csmisas_get_scsi_address_exit; -+ -+ karg.bPathId = sas_info->os.channel; -+ karg.bTargetId = sas_info->os.id; -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ csmisas_get_scsi_address_exit: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_GET_SCSI_ADDRESS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_get_scsi_address @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI Sas Get SCSI Address command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_sata_signature(unsigned long arg) -+{ -+ CSMI_SAS_SATA_SIGNATURE_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_SATA_SIGNATURE_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ int rc, jj; -+ ConfigExtendedPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ SasPhyPage0_t *sasPhyPg0; -+ dma_addr_t sasPhyPg0_dma; -+ int sasPhyPg0_data_sz; -+ SasDevicePage1_t *sasDevicePg1; -+ dma_addr_t sasDevicePg1_dma; -+ int sasDevicePg1_data_sz; -+ u8 phyId; -+ u64 sas_address; -+ -+ sasPhyPg0=NULL; -+ sasPhyPg0_data_sz=0; -+ sasDevicePg1=NULL; -+ sasDevicePg1_data_sz=0; -+ -+ if (copy_from_user(&karg, uarg, -+ sizeof(CSMI_SAS_SATA_SIGNATURE_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_sata_signature struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ phyId = karg.Signature.bPhyIdentifier; -+ if (phyId >= ioc->num_ports) { -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_PHY_DOES_NOT_EXIST; -+ dcsmisasprintk(ioc, printk(KERN_WARNING ": phyId >= ioc->num_ports\n")); -+ goto cim_sata_signature_exit; -+ } -+ -+ /* Default to success.*/ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* Issue a config request to get the devHandle of the attached device -+ */ -+ -+ /* Issue a config request to get phy information. */ -+ hdr.PageVersion = MPI_SASPHY0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 0; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.pageAddr = phyId; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASPHY0_PAGEVERSION: HEADER\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR ": hdr.ExtPageLength == 0\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+ -+ sasPhyPg0_data_sz = hdr.ExtPageLength * 4; -+ rc = -ENOMEM; -+ -+ sasPhyPg0 = (SasPhyPage0_t *) pci_alloc_consistent(ioc->pcidev, -+ sasPhyPg0_data_sz, &sasPhyPg0_dma); -+ -+ if (! sasPhyPg0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+ memset((u8 *)sasPhyPg0, 0, sasPhyPg0_data_sz); -+ cfg.physAddr = sasPhyPg0_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASPHY0_PAGEVERSION: PAGE\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+ /* Make sure a SATA device is attached. */ -+ if ((le32_to_cpu(sasPhyPg0->AttachedDeviceInfo) & -+ MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) { -+ dcsmisasprintk(ioc, printk(KERN_WARNING ": NOT A SATA DEVICE\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_NO_SATA_DEVICE; -+ goto cim_sata_signature_exit; -+ } -+ -+ /* Get device page 1 for FIS signature. */ -+ hdr.PageVersion = MPI_SASDEVICE1_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 1 /* page number 1 */; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ -+ cfg.pageAddr = ((MPI_SAS_DEVICE_PGAD_FORM_HANDLE << -+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT) | -+ le16_to_cpu(sasPhyPg0->AttachedDevHandle)); -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASDEVICE1_PAGEVERSION: HEADER\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": hdr.ExtPageLength == 0\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+ sasDevicePg1_data_sz = hdr.ExtPageLength * 4; -+ rc = -ENOMEM; -+ -+ sasDevicePg1 = (SasDevicePage1_t *) pci_alloc_consistent -+ (ioc->pcidev, sasDevicePg1_data_sz, &sasDevicePg1_dma); -+ -+ if (! sasDevicePg1) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+ memset((u8 *)sasDevicePg1, 0, sasDevicePg1_data_sz); -+ cfg.physAddr = sasDevicePg1_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASDEVICE1_PAGEVERSION: PAGE\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sata_signature_exit; -+ } -+ -+/* EDM : dump Device Page 1 data*/ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "---- SAS DEVICE PAGE 1 ---------\n")); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Handle=0x%x\n",sasDevicePg1->DevHandle)); -+ memcpy(&sas_address, &sasDevicePg1->SASAddress, sizeof(u64)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "SAS Address=0x%llX\n", -+ (unsigned long long)sas_address)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\n")); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Target ID=0x%x\n",sasDevicePg1->TargetID)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Bus=0x%x\n",sasDevicePg1->Bus)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Initial Reg Device FIS=")); -+ for(jj=0;jj<20;jj++) -+ dcsmisasprintk(ioc, printk("%02x ", -+ ((u8 *)&sasDevicePg1->InitialRegDeviceFIS)[jj])); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\n\n")); -+/* EDM : debug data */ -+ -+ memcpy(karg.Signature.bSignatureFIS, -+ sasDevicePg1->InitialRegDeviceFIS,20); -+ -+ cim_sata_signature_exit: -+ -+ if (sasPhyPg0) -+ pci_free_consistent(ioc->pcidev, sasPhyPg0_data_sz, -+ (u8 *) sasPhyPg0, sasPhyPg0_dma); -+ -+ if (sasDevicePg1) -+ pci_free_consistent(ioc->pcidev, sasDevicePg1_data_sz, -+ (u8 *) sasDevicePg1, sasDevicePg1_dma); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_SATA_SIGNATURE_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_sata_signature @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI Sas Get SCSI Address command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_device_address(unsigned long arg) -+{ -+ CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ struct sas_device_info *sas_info; -+ u64 sas_address; -+ -+ if (copy_from_user(&karg, uarg, -+ sizeof(CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_get_device_address_buffer struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_NO_DEVICE_ADDRESS; -+ memset(karg.bSASAddress, 0, sizeof(u64)); -+ memset(karg.bSASLun, 0, sizeof(karg.bSASLun)); -+ -+ /* Search the list for the matching SAS address. */ -+ sas_info = csmisas_get_device_component_by_os(ioc, karg.bPathId, -+ karg.bTargetId); -+ if (!sas_info || sas_info->is_cached || sas_info->is_logical_volume) -+ goto csmisas_get_device_address_exit; -+ -+ sas_address = reverse_byte_order64(sas_info->sas_address); -+ memcpy(karg.bSASAddress, &sas_address, sizeof(u64)); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ csmisas_get_device_address_exit: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_get_device_address_buffer @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI Sas Get Link Errors command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_link_errors(unsigned long arg) -+{ -+ CSMI_SAS_LINK_ERRORS_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_LINK_ERRORS_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ MPT_FRAME_HDR *mf = NULL; -+ MPIHeader_t *mpi_hdr; -+ int iocnum; -+ int rc; -+ ConfigExtendedPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ SasPhyPage1_t *sasPhyPage1; -+ dma_addr_t sasPhyPage1_dma; -+ int sasPhyPage1_data_sz; -+ SasIoUnitControlRequest_t *sasIoUnitCntrReq; -+ SasIoUnitControlReply_t *sasIoUnitCntrReply; -+ u8 phyId; -+ u16 ioc_status; -+ u32 MsgContext; -+ -+ sasPhyPage1=NULL; -+ sasPhyPage1_data_sz=0; -+ -+ if (copy_from_user(&karg, uarg, -+ sizeof(CSMI_SAS_LINK_ERRORS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmisas_get_link_errors struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ phyId = karg.Information.bPhyIdentifier; -+ if (phyId >= ioc->num_ports) { -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_PHY_DOES_NOT_EXIST; -+ dcsmisasprintk(ioc, printk(KERN_WARNING ": phyId >= ioc->num_ports\n")); -+ goto cim_get_link_errors_exit; -+ } -+ -+ /* Default to success.*/ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* Issue a config request to get the devHandle of the attached device -+ */ -+ -+ /* Issue a config request to get phy information. */ -+ hdr.PageVersion = MPI_SASPHY1_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 1 /* page number 1*/; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.pageAddr = phyId; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: MPI_SASPHY1_PAGEVERSION: HEADER\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_link_errors_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR ": hdr.ExtPageLength == 0\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_link_errors_exit; -+ } -+ -+ -+ sasPhyPage1_data_sz = hdr.ExtPageLength * 4; -+ rc = -ENOMEM; -+ -+ sasPhyPage1 = (SasPhyPage1_t *) pci_alloc_consistent(ioc->pcidev, -+ sasPhyPage1_data_sz, &sasPhyPage1_dma); -+ -+ if (! sasPhyPage1) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_link_errors_exit; -+ } -+ -+ memset((u8 *)sasPhyPage1, 0, sasPhyPage1_data_sz); -+ cfg.physAddr = sasPhyPage1_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ /* Don't check if this failed. Already in a -+ * failure case. -+ */ -+ dcsmisasprintk(ioc, printk(KERN_ERR ": FAILED: MPI_SASPHY1_PAGEVERSION: PAGE\n")); -+ dcsmisasprintk(ioc, printk(KERN_ERR ": rc=%x\n",rc)); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_link_errors_exit; -+ } -+ -+/* EDM : dump PHY Page 1 data*/ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "---- SAS PHY PAGE 1 ------------\n")); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Invalid Dword Count=0x%x\n", -+ sasPhyPage1->InvalidDwordCount)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Running Disparity Error Count=0x%x\n", -+ sasPhyPage1->RunningDisparityErrorCount)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "Loss Dword Synch Count=0x%x\n", -+ sasPhyPage1->LossDwordSynchCount)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "PHY Reset Problem Count=0x%x\n", -+ sasPhyPage1->PhyResetProblemCount)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\n\n")); -+/* EDM : debug data */ -+ -+ karg.Information.uInvalidDwordCount = -+ le32_to_cpu(sasPhyPage1->InvalidDwordCount); -+ karg.Information.uRunningDisparityErrorCount = -+ le32_to_cpu(sasPhyPage1->RunningDisparityErrorCount); -+ karg.Information.uLossOfDwordSyncCount = -+ le32_to_cpu(sasPhyPage1->LossDwordSynchCount); -+ karg.Information.uPhyResetProblemCount = -+ le32_to_cpu(sasPhyPage1->PhyResetProblemCount); -+ -+ if (karg.Information.bResetCounts == -+ CSMI_SAS_LINK_ERROR_DONT_RESET_COUNTS ) { -+ goto cim_get_link_errors_exit; -+ } -+ -+ /* Clear Error log -+ * -+ * Issue IOUNIT Control Reqeust Message -+ */ -+ -+ /* Get a MF for this command. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_link_errors_exit; -+ } -+ -+ mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; -+ sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf; -+ memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t)); -+ sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL; -+ sasIoUnitCntrReq->MsgContext = MsgContext; -+ sasIoUnitCntrReq->PhyNum = phyId; -+ sasIoUnitCntrReq->Operation = MPI_SAS_OP_PHY_CLEAR_ERROR_LOG; -+ -+ if (csmisas_send_command_wait(ioc, mf, karg.IoctlHeader.Timeout) != 0) { -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_link_errors_exit; -+ } -+ -+ /* process the completed Reply Message Frame */ -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { -+ -+ sasIoUnitCntrReply = -+ (SasIoUnitControlReply_t *)ioc->ioctl_cmds.reply; -+ ioc_status = le16_to_cpu(sasIoUnitCntrReply->IOCStatus) -+ & MPI_IOCSTATUS_MASK; -+ -+ if (ioc_status != MPI_IOCSTATUS_SUCCESS) { -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": SAS IO Unit Control: ")); -+ dcsmisasprintk(ioc, printk("IOCStatus=0x%X IOCLogInfo=0x%X\n", -+ sasIoUnitCntrReply->IOCStatus, -+ sasIoUnitCntrReply->IOCLogInfo)); -+ } -+ } -+ -+ cim_get_link_errors_exit: -+ -+ if (sasPhyPage1) -+ pci_free_consistent(ioc->pcidev, sasPhyPage1_data_sz, -+ (u8 *) sasPhyPage1, sasPhyPage1_dma); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_LINK_ERRORS_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmisas_get_link_errors @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+ -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS SMP Passthru command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_smp_passthru(unsigned long arg) -+{ -+ CSMI_SAS_SMP_PASSTHRU_BUFFER __user *uarg = (void __user *) arg; -+ MPT_ADAPTER *ioc; -+ CSMI_SAS_SMP_PASSTHRU_BUFFER *karg; -+ pSmpPassthroughRequest_t smpReq; -+ pSmpPassthroughReply_t smpReply; -+ MPT_FRAME_HDR *mf = NULL; -+ MPIHeader_t *mpi_hdr; -+ char *psge; -+ int iocnum, flagsLength; -+ void * request_data; -+ dma_addr_t request_data_dma; -+ u32 request_data_sz; -+ void * response_data; -+ dma_addr_t response_data_dma; -+ u32 response_data_sz; -+ u16 ioc_status; -+ u64 sas_address; -+ u32 MsgContext; -+ int malloc_data_sz; -+ int memory_pages; -+ -+ malloc_data_sz = sizeof(CSMI_SAS_SMP_PASSTHRU_BUFFER); -+ memory_pages = get_order(malloc_data_sz); -+ karg = (CSMI_SAS_SMP_PASSTHRU_BUFFER *)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!karg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc CSMI_SAS_SMP_PASSTHRU_BUFFER " -+ "malloc_data_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ malloc_data_sz, memory_pages); -+ return -ENOMEM; -+ } -+ -+ if (copy_from_user(karg, uarg, sizeof(CSMI_SAS_SMP_PASSTHRU_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_smp_passthru struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ request_data = NULL; -+ response_data = NULL; -+ response_data_sz = sizeof(CSMI_SAS_SMP_RESPONSE); -+ request_data_sz = karg->Parameters.uRequestLength; -+ -+ if (((iocnum = mpt_verify_adapter(karg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (ioc->ioc_reset_in_progress) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Busy with IOC Reset \n", -+ __FILE__, __LINE__,__FUNCTION__); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EBUSY; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Default to success.*/ -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* Do some error checking on the request. */ -+ if (karg->Parameters.bPortIdentifier == CSMI_SAS_IGNORE_PORT) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_SELECT_PHY_OR_PORT; -+ goto cim_smp_passthru_exit; -+ } -+ -+ if ((request_data_sz > 0xFFFF) || (!request_data_sz)) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_smp_passthru_exit; -+ } -+ -+ /* Get a free request frame and save the message context. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_smp_passthru_exit; -+ } -+ -+ mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; -+ smpReq = (pSmpPassthroughRequest_t ) mf; -+ -+ memset(smpReq,0,ioc->req_sz); -+ -+ memcpy(&sas_address, karg->Parameters.bDestinationSASAddress, -+ sizeof(u64)); -+ sas_address = cpu_to_le64(reverse_byte_order64(sas_address)); -+ memcpy(&smpReq->SASAddress, &sas_address, sizeof(u64)); -+ -+ /* Fill in smp request. */ -+ smpReq->PhysicalPort = karg->Parameters.bPortIdentifier; -+ smpReq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; -+ smpReq->RequestDataLength = cpu_to_le16(request_data_sz); -+ smpReq->ConnectionRate = karg->Parameters.bConnectionRate; -+ smpReq->MsgContext = MsgContext; -+ smpReq->Reserved2 = 0; -+ smpReq->Reserved3 = 0; -+ -+ /* -+ * Prepare the necessary pointers to run -+ * through the SGL generation -+ */ -+ -+ psge = (char *)&smpReq->SGL; -+ -+ /* setup the *Request* payload SGE */ -+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | -+ MPI_SGE_FLAGS_SYSTEM_ADDRESS | -+ MPI_SGE_FLAGS_HOST_TO_IOC | -+ MPI_SGE_FLAGS_END_OF_BUFFER; -+ -+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; -+ flagsLength |= request_data_sz; -+ -+ request_data = pci_alloc_consistent( -+ ioc->pcidev, request_data_sz, &request_data_dma); -+ -+ if (!request_data) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ mpt_free_msg_frame(ioc, mf); -+ goto cim_smp_passthru_exit; -+ } -+ -+ ioc->add_sge(psge, flagsLength, request_data_dma); -+ psge += ioc->SGE_size; -+ -+ memcpy(request_data, &karg->Parameters.Request, request_data_sz); -+ -+ /* setup the *Response* payload SGE */ -+ response_data = pci_alloc_consistent( -+ ioc->pcidev, response_data_sz, &response_data_dma); -+ -+ if (!response_data) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ mpt_free_msg_frame(ioc, mf); -+ goto cim_smp_passthru_exit; -+ } -+ -+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | -+ MPI_SGE_FLAGS_SYSTEM_ADDRESS | -+ MPI_SGE_FLAGS_IOC_TO_HOST | -+ MPI_SGE_FLAGS_END_OF_BUFFER; -+ -+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; -+ flagsLength |= response_data_sz; -+ -+ ioc->add_sge(psge, flagsLength, response_data_dma); -+ -+ if (csmisas_send_command_wait(ioc, mf, karg->IoctlHeader.Timeout) != 0) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_smp_passthru_exit; -+ } -+ -+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) == 0) { -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": SMP Passthru: oh no, there is no reply!!")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_smp_passthru_exit; -+ } -+ -+ /* process the completed Reply Message Frame */ -+ smpReply = (pSmpPassthroughReply_t )ioc->ioctl_cmds.reply; -+ ioc_status = le16_to_cpu(smpReply->IOCStatus) & MPI_IOCSTATUS_MASK; -+ -+ if ((ioc_status != MPI_IOCSTATUS_SUCCESS) && -+ (ioc_status != MPI_IOCSTATUS_SCSI_DATA_UNDERRUN)) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": SMP Passthru: ")); -+ dcsmisasprintk(ioc, printk("IOCStatus=0x%X IOCLogInfo=0x%X SASStatus=0x%X\n", -+ le16_to_cpu(smpReply->IOCStatus), -+ le32_to_cpu(smpReply->IOCLogInfo), -+ smpReply->SASStatus)); -+ goto cim_smp_passthru_exit; -+ } -+ -+ karg->Parameters.bConnectionStatus = -+ map_sas_status_to_csmi(smpReply->SASStatus); -+ -+ -+ if (le16_to_cpu(smpReply->ResponseDataLength)) { -+ karg->Parameters.uResponseBytes = le16_to_cpu(smpReply->ResponseDataLength); -+ memcpy(&karg->Parameters.Response, -+ response_data, le16_to_cpu(smpReply->ResponseDataLength)); -+ } -+ -+ cim_smp_passthru_exit: -+ -+ if (request_data) -+ pci_free_consistent(ioc->pcidev, request_data_sz, -+ (u8 *)request_data, request_data_dma); -+ -+ if (response_data) -+ pci_free_consistent(ioc->pcidev, response_data_sz, -+ (u8 *)response_data, response_data_dma); -+ -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, karg, -+ sizeof(CSMI_SAS_SMP_PASSTHRU_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_smp_passthru @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ free_pages((unsigned long)karg, memory_pages); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": %s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS SSP Passthru command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int csmisas_ssp_passthru(unsigned long arg) -+{ -+ CSMI_SAS_SSP_PASSTHRU_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_SSP_PASSTHRU_BUFFER karg_hdr, * karg; -+ MPT_ADAPTER *ioc = NULL; -+ pSCSIIORequest_t pScsiRequest; -+ pSCSIIOReply_t pScsiReply; -+ MPT_FRAME_HDR *mf = NULL; -+ MPIHeader_t *mpi_hdr; -+ int iocnum,ii; -+ u64 sas_address; -+ u16 req_idx; -+ char *psge; -+ int flagsLength; -+ void * request_data; -+ dma_addr_t request_data_dma; -+ u32 request_data_sz; -+ int malloc_data_sz; -+ int memory_pages; -+ u16 ioc_status; -+ u8 volume_id; -+ u8 volume_bus; -+ u8 is_hidden_raid_component; -+ u8 channel; -+ u8 id; -+ struct sas_device_info *sas_info; -+ u8 skey, asc, ascq; -+ u32 MsgContext; -+ -+ if (copy_from_user(&karg_hdr, uarg, sizeof(CSMI_SAS_SSP_PASSTHRU_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_ssp_passthru struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ request_data = NULL; -+ request_data_sz = karg_hdr.Parameters.uDataLength; -+ channel = 0; -+ id = 0; -+ volume_id = 0; -+ volume_bus = 0; -+ is_hidden_raid_component = 0; -+ -+ malloc_data_sz = (request_data_sz + -+ offsetof(CSMI_SAS_SSP_PASSTHRU_BUFFER, bDataBuffer)); -+ memory_pages = get_order(malloc_data_sz); -+ karg = (CSMI_SAS_SSP_PASSTHRU_BUFFER *)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!karg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc SAS_SSP_PASSTHRU_BUFFER " -+ "malloc_data_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ malloc_data_sz, memory_pages); -+ return -ENOMEM; -+ } -+ -+ memset(karg, 0, sizeof(*karg)); -+ -+ if (copy_from_user(karg, uarg, request_data_sz + -+ offsetof(CSMI_SAS_SSP_PASSTHRU_BUFFER,bDataBuffer))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_ssp_passthru struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ /* -+ * some checks of the incoming frame -+ */ -+ if ( offsetof(CSMI_SAS_SSP_PASSTHRU_BUFFER,bDataBuffer) + -+ request_data_sz - sizeof(IOCTL_HEADER) > -+ karg->IoctlHeader.Length ) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ "%s::%s()" -+ " @%d - expected datalen incorrect!\n", -+ __FILE__, __FUNCTION__, __LINE__)); -+ goto cim_ssp_passthru_exit; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ goto cim_ssp_passthru_exit; -+ } -+ -+ if (ioc->ioc_reset_in_progress) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Busy with IOC Reset \n", -+ __FILE__, __LINE__,__FUNCTION__); -+ return -EBUSY; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; -+ printk(KERN_ERR "%s::%s()@%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ goto cim_ssp_passthru_exit; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Default to success. -+ */ -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* Neither a phy nor a port has been selected. -+ */ -+ if ((karg->Parameters.bPhyIdentifier == CSMI_SAS_USE_PORT_IDENTIFIER) && -+ (karg->Parameters.bPortIdentifier == CSMI_SAS_IGNORE_PORT)) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_SELECT_PHY_OR_PORT; -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ "%s::%s()" -+ " @%d - incorrect bPhyIdentifier and bPortIdentifier!\n", -+ __FILE__, __FUNCTION__, __LINE__)); -+ goto cim_ssp_passthru_exit; -+ } -+ -+ /* A phy has been selected. Verify that it's valid. -+ */ -+ if (karg->Parameters.bPortIdentifier == CSMI_SAS_IGNORE_PORT) { -+ -+ /* Is the phy in range? */ -+ if (karg->Parameters.bPhyIdentifier >= ioc->num_ports) { -+ dcsmisasprintk(ioc, printk(KERN_WARNING ": phyId >= ioc->num_ports (%d %d)\n", -+ karg->Parameters.bPhyIdentifier, -+ ioc->num_ports)); -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_PHY_DOES_NOT_EXIST; -+ goto cim_ssp_passthru_exit; -+ } -+ } -+ -+ if(karg->Parameters.bAdditionalCDBLength) { -+ /* TODO - SCSI IO (32) Request Message support -+ */ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": greater than 16-byte cdb " -+ "is not supported!\n")); -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ goto cim_ssp_passthru_exit; -+ } -+ -+ /* we will use SAS address to resolve the scsi adddressing -+ */ -+ memcpy(&sas_address, karg->Parameters.bDestinationSASAddress, -+ sizeof(u64)); -+ sas_address = reverse_byte_order64(sas_address); -+ -+ /* Search the list for the matching SAS address. -+ */ -+ sas_info = csmisas_get_device_component_by_sas_addr(ioc, sas_address); -+ if (!sas_info || sas_info->is_cached) { -+ /* -+ *Invalid SAS address -+ */ -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ "%s::%s() @%d - couldn't find associated " -+ "SASAddress=%llX!\n", __FILE__, __FUNCTION__, __LINE__, -+ (unsigned long long)sas_address)); -+ goto cim_ssp_passthru_exit; -+ } -+ -+ id = sas_info->fw.id; -+ channel = sas_info->fw.channel; -+ -+ if (csmisas_is_phys_disk(ioc, channel, id)) { -+ id = csmisas_raid_id_to_num(ioc, channel, id); -+ channel = 0; -+ is_hidden_raid_component = 1; -+ } -+ -+ /* Get a free request frame and save the message context. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_ssp_passthru_exit; -+ } -+ -+ mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; -+ pScsiRequest = (pSCSIIORequest_t) mf; -+ req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); -+ -+ memset(pScsiRequest,0,sizeof(SCSIIORequest_t)); -+ -+ /* Fill in SCSI IO (16) request. -+ */ -+ -+ pScsiRequest->Function = (is_hidden_raid_component == 1) ? -+ MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH : MPI_FUNCTION_SCSI_IO_REQUEST; -+ pScsiRequest->TargetID = id; -+ pScsiRequest->Bus = channel; -+ memcpy(pScsiRequest->LUN, &karg->Parameters.bLun, 8); -+ pScsiRequest->CDBLength = karg->Parameters.bCDBLength; -+ pScsiRequest->DataLength = cpu_to_le32(request_data_sz); -+ pScsiRequest->MsgContext = MsgContext; -+ memcpy(pScsiRequest->CDB, karg->Parameters.bCDB, -+ pScsiRequest->CDBLength); -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\tchannel = %d id = %d ", -+ sas_info->fw.channel, sas_info->fw.id)); -+ dcsmisasprintk(ioc, if(is_hidden_raid_component) -+ printk(KERN_DEBUG "num_id = %d ", id)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\n")); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\tcdb_len = %d request_len = %d\n", -+ pScsiRequest->CDBLength, request_data_sz)); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\t")); -+ dcsmisasprintk(ioc, for (ii = 0; ii < pScsiRequest->CDBLength; ++ii) -+ printk(" %02x", pScsiRequest->CDB[ii])); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\n")); -+ -+ /* direction -+ */ -+ if (karg->Parameters.uFlags & CSMI_SAS_SSP_READ) { -+ pScsiRequest->Control = cpu_to_le32(MPI_SCSIIO_CONTROL_READ); -+ } else if (karg->Parameters.uFlags & CSMI_SAS_SSP_WRITE) { -+ pScsiRequest->Control = cpu_to_le32(MPI_SCSIIO_CONTROL_WRITE); -+ } else if ((karg->Parameters.uFlags & CSMI_SAS_SSP_UNSPECIFIED) && -+ (!karg->Parameters.uDataLength)) { -+ /* no data transfer -+ */ -+ pScsiRequest->Control = cpu_to_le32(MPI_SCSIIO_CONTROL_NODATATRANSFER); -+ } else { -+ /* no direction specified -+ */ -+ pScsiRequest->Control = cpu_to_le32(MPI_SCSIIO_CONTROL_READ); -+ pScsiRequest->MsgFlags = -+ MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR; -+ } -+ -+ pScsiRequest->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; -+ if (ioc->sg_addr_size == sizeof(u64)) -+ pScsiRequest->MsgFlags |= MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64; -+ -+ /* task attributes -+ */ -+ if((karg->Parameters.uFlags && 0xFF) == 0) { -+ pScsiRequest->Control |= cpu_to_le32(MPI_SCSIIO_CONTROL_SIMPLEQ); -+ } else if (karg->Parameters.uFlags & -+ CSMI_SAS_SSP_TASK_ATTRIBUTE_HEAD_OF_QUEUE) { -+ pScsiRequest->Control |= cpu_to_le32(MPI_SCSIIO_CONTROL_HEADOFQ); -+ } else if (karg->Parameters.uFlags & -+ CSMI_SAS_SSP_TASK_ATTRIBUTE_ORDERED) { -+ pScsiRequest->Control |= cpu_to_le32(MPI_SCSIIO_CONTROL_ORDEREDQ); -+ } else if (karg->Parameters.uFlags & -+ CSMI_SAS_SSP_TASK_ATTRIBUTE_ACA) { -+ pScsiRequest->Control |= cpu_to_le32(MPI_SCSIIO_CONTROL_ACAQ); -+ } else { -+ pScsiRequest->Control |= cpu_to_le32(MPI_SCSIIO_CONTROL_UNTAGGED); -+ } -+ -+ /* setup sense -+ */ -+ pScsiRequest->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; -+ pScsiRequest->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + -+ (req_idx * MPT_SENSE_BUFFER_ALLOC)); -+ -+ /* setup databuffer sg, assuming we fit everything one contiguous buffer -+ */ -+ psge = (char *)&pScsiRequest->SGL; -+ -+ if (karg->Parameters.uFlags & CSMI_SAS_SSP_WRITE) { -+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; -+ } else if (karg->Parameters.uFlags & CSMI_SAS_SSP_READ) { -+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; -+ }else { -+ flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | -+ MPI_SGE_FLAGS_DIRECTION ) -+ << MPI_SGE_FLAGS_SHIFT; -+ } -+ flagsLength |= request_data_sz; -+ -+ if ( request_data_sz > 0) { -+ request_data = pci_alloc_consistent( -+ ioc->pcidev, request_data_sz, &request_data_dma); -+ -+ if (request_data == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED " -+ "request_data_sz=%d\n", request_data_sz)); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ mpt_free_msg_frame(ioc, mf); -+ goto cim_ssp_passthru_exit; -+ } -+ -+ ioc->add_sge(psge, flagsLength, request_data_dma); -+ if (karg->Parameters.uFlags & CSMI_SAS_SSP_WRITE) -+ memcpy(request_data, karg->bDataBuffer, request_data_sz); -+ } else { -+ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); -+ } -+ -+ if (csmisas_send_command_wait(ioc, mf, karg->IoctlHeader.Timeout) != 0) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_ssp_passthru_exit; -+ } -+ -+ memset(&karg->Status,0,sizeof(CSMI_SAS_SSP_PASSTHRU_STATUS)); -+ karg->Status.bConnectionStatus = CSMI_SAS_OPEN_ACCEPT; -+ karg->Status.bDataPresent = CSMI_SAS_SSP_NO_DATA_PRESENT; -+ karg->Status.bStatus = GOOD; -+ karg->Status.bResponseLength[0] = 0; -+ karg->Status.bResponseLength[1] = 0; -+ karg->Status.uDataBytes = request_data_sz; -+ -+ /* process the completed Reply Message Frame */ -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { -+ -+ pScsiReply = (pSCSIIOReply_t ) ioc->ioctl_cmds.reply; -+ karg->Status.bStatus = pScsiReply->SCSIStatus; -+ karg->Status.uDataBytes = min(le32_to_cpu(pScsiReply->TransferCount), -+ request_data_sz); -+ ioc_status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK; -+ -+ if (pScsiReply->SCSIState == -+ MPI_SCSI_STATE_AUTOSENSE_VALID) { -+ karg->Status.bConnectionStatus = -+ CSMI_SAS_SSP_SENSE_DATA_PRESENT; -+ karg->Status.bResponseLength[0] = -+ (u8)le32_to_cpu(pScsiReply->SenseCount) & 0xFF; -+ memcpy(karg->Status.bResponse, -+ ioc->ioctl_cmds.sense, le32_to_cpu(pScsiReply->SenseCount)); -+ -+ skey = ioc->ioctl_cmds.sense[2] & 0x0F; -+ asc = ioc->ioctl_cmds.sense[12]; -+ ascq = ioc->ioctl_cmds.sense[13]; -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "\t [sense_key,asc,ascq]: " -+ "[0x%02x,0x%02x,0x%02x]\n", -+ skey, asc, ascq)); -+ -+ } else if(pScsiReply->SCSIState == -+ MPI_SCSI_STATE_RESPONSE_INFO_VALID) { -+ karg->Status.bDataPresent = -+ CSMI_SAS_SSP_RESPONSE_DATA_PRESENT; -+ karg->Status.bResponseLength[0] = -+ sizeof(pScsiReply->ResponseInfo); -+ for (ii=0;iiResponseInfo);ii++) { -+ karg->Status.bResponse[ii] = -+ ((u8*)&pScsiReply->ResponseInfo)[ -+ (sizeof(pScsiReply->ResponseInfo)-1)-ii]; -+ } -+ } else if ((ioc_status != MPI_IOCSTATUS_SUCCESS) && -+ (ioc_status != MPI_IOCSTATUS_SCSI_RECOVERED_ERROR) && -+ (ioc_status != MPI_IOCSTATUS_SCSI_DATA_UNDERRUN)) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": SCSI IO : ")); -+ dcsmisasprintk(ioc, printk("IOCStatus=0x%X IOCLogInfo=0x%X\n", -+ pScsiReply->IOCStatus, -+ pScsiReply->IOCLogInfo)); -+ } -+ } -+ -+ if ((karg->Status.uDataBytes) && (request_data) && -+ (karg->Parameters.uFlags & CSMI_SAS_SSP_READ)) { -+ if (copy_to_user((void __user *)uarg->bDataBuffer, -+ request_data, karg->Status.uDataBytes)) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to write data to user %p\n", -+ __FILE__, __LINE__,__FUNCTION__, -+ (void*)karg->bDataBuffer); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ } -+ } -+ -+ cim_ssp_passthru_exit: -+ -+ -+ if (request_data) -+ pci_free_consistent(ioc->pcidev, request_data_sz, -+ (u8 *)request_data, request_data_dma); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, karg, -+ offsetof(CSMI_SAS_SSP_PASSTHRU_BUFFER, bDataBuffer))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_ssp_passthru @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ free_pages((unsigned long)karg, memory_pages); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS STP Passthru command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_stp_passthru(unsigned long arg) -+{ -+ CSMI_SAS_STP_PASSTHRU_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_STP_PASSTHRU_BUFFER karg_hdr, *karg; -+ MPT_ADAPTER *ioc = NULL; -+ pSataPassthroughRequest_t pSataRequest; -+ pSataPassthroughReply_t pSataReply; -+ MPT_FRAME_HDR *mf = NULL; -+ MPIHeader_t *mpi_hdr; -+ int iocnum; -+ u32 data_sz; -+ u64 sas_address; -+ u16 req_idx; -+ char *psge; -+ int flagsLength; -+ void * request_data; -+ dma_addr_t request_data_dma; -+ u32 request_data_sz; -+ int malloc_data_sz; -+ int memory_pages; -+ u8 channel; -+ u8 id; -+ u8 volume_id; -+ u8 volume_bus; -+ struct sas_device_info *sas_info; -+ u16 ioc_status; -+ u32 MsgContext; -+ -+ if (copy_from_user(&karg_hdr, uarg, sizeof(CSMI_SAS_STP_PASSTHRU_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ request_data=NULL; -+ request_data_sz = karg_hdr.Parameters.uDataLength; -+ volume_id = 0; -+ volume_bus = 0; -+ channel = 0; -+ id = 0; -+ -+ malloc_data_sz = (request_data_sz + -+ offsetof(CSMI_SAS_STP_PASSTHRU_BUFFER, bDataBuffer)); -+ memory_pages = get_order(malloc_data_sz); -+ karg = (CSMI_SAS_STP_PASSTHRU_BUFFER *)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!karg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc CSMI_SAS_STP_PASSTHRU_BUFFER " -+ "malloc_data_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ malloc_data_sz, memory_pages); -+ return -ENOMEM; -+ } -+ -+ memset(karg, 0, sizeof(*karg)); -+ -+ if (copy_from_user(karg, uarg, malloc_data_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_ssp_passthru struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (ioc->ioc_reset_in_progress) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Busy with IOC Reset \n", -+ __FILE__, __LINE__,__FUNCTION__); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EBUSY; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Default to success. -+ */ -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* Neither a phy nor a port has been selected. -+ */ -+ if ((karg->Parameters.bPhyIdentifier == CSMI_SAS_USE_PORT_IDENTIFIER) && -+ (karg->Parameters.bPortIdentifier == CSMI_SAS_IGNORE_PORT)) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_SELECT_PHY_OR_PORT; -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ "%s::%s() @%d - incorrect bPhyIdentifier and bPortIdentifier!\n", -+ __FILE__,__FUNCTION__, __LINE__)); -+ goto cim_stp_passthru_exit; -+ } -+ -+ /* A phy has been selected. Verify that it's valid. -+ */ -+ if (karg->Parameters.bPortIdentifier == CSMI_SAS_IGNORE_PORT) { -+ -+ /* Is the phy in range? */ -+ if (karg->Parameters.bPhyIdentifier >= ioc->num_ports) { -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_PHY_DOES_NOT_EXIST; -+ goto cim_stp_passthru_exit; -+ } -+ } -+ -+ data_sz = sizeof(CSMI_SAS_STP_PASSTHRU_BUFFER) - -+ sizeof(IOCTL_HEADER) - sizeof(u8*) + -+ request_data_sz; -+ -+ if ( data_sz > karg->IoctlHeader.Length ) { -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ "%s::%s() @%d - expected datalen incorrect!\n", -+ __FILE__, __FUNCTION__,__LINE__)); -+ goto cim_stp_passthru_exit; -+ } -+ -+ -+ /* we will use SAS address to resolve the scsi adddressing -+ */ -+ memcpy(&sas_address, karg->Parameters.bDestinationSASAddress, -+ sizeof(u64)); -+ sas_address = reverse_byte_order64(sas_address); -+ -+ /* Search the list for the matching SAS address. -+ */ -+ sas_info = csmisas_get_device_component_by_sas_addr(ioc, sas_address); -+ if (!sas_info || sas_info->is_cached || sas_info->is_logical_volume) { -+ /* -+ *Invalid SAS address -+ */ -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ "%s::%s() @%d - couldn't find associated " -+ "SASAddress=%llX!\n", __FILE__, __FUNCTION__, __LINE__, -+ (unsigned long long)sas_address)); -+ goto cim_stp_passthru_exit; -+ } -+ -+ id = sas_info->fw.id; -+ channel = sas_info->fw.channel; -+ -+ /* check that this is an STP or SATA target device -+ */ -+ if ( !(sas_info->device_info & MPI_SAS_DEVICE_INFO_STP_TARGET ) && -+ !(sas_info->device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE )) { -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ goto cim_stp_passthru_exit; -+ } -+ -+ /* Get a free request frame and save the message context. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_stp_passthru_exit; -+ } -+ -+ mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; -+ pSataRequest = (pSataPassthroughRequest_t) mf; -+ req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); -+ -+ memset(pSataRequest,0,sizeof(pSataPassthroughRequest_t)); -+ -+ pSataRequest->TargetID = id; -+ pSataRequest->Bus = channel; -+ pSataRequest->Function = MPI_FUNCTION_SATA_PASSTHROUGH; -+ pSataRequest->PassthroughFlags = cpu_to_le16(karg->Parameters.uFlags); -+ pSataRequest->ConnectionRate = karg->Parameters.bConnectionRate; -+ pSataRequest->MsgContext = MsgContext; -+ pSataRequest->DataLength = cpu_to_le32(request_data_sz); -+ pSataRequest->MsgFlags = 0; -+ memcpy( pSataRequest->CommandFIS,karg->Parameters.bCommandFIS, 20); -+ -+ psge = (char *)&pSataRequest->SGL; -+ if (karg->Parameters.uFlags & CSMI_SAS_STP_WRITE) { -+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; -+ } else if (karg->Parameters.uFlags & CSMI_SAS_STP_READ) { -+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; -+ }else { -+ flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | -+ MPI_SGE_FLAGS_DIRECTION ) -+ << MPI_SGE_FLAGS_SHIFT; -+ } -+ -+ flagsLength |= request_data_sz; -+ if (request_data_sz > 0) { -+ request_data = pci_alloc_consistent( -+ ioc->pcidev, request_data_sz, &request_data_dma); -+ -+ if (request_data == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ mpt_free_msg_frame(ioc, mf); -+ goto cim_stp_passthru_exit; -+ } -+ -+ ioc->add_sge(psge, flagsLength, request_data_dma); -+ if (karg->Parameters.uFlags & CSMI_SAS_SSP_WRITE) -+ memcpy(request_data, karg->bDataBuffer, request_data_sz); -+ } else { -+ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); -+ } -+ -+ if (csmisas_send_command_wait(ioc, mf, karg->IoctlHeader.Timeout) != 0) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_stp_passthru_exit; -+ } -+ -+ memset(&karg->Status,0,sizeof(CSMI_SAS_STP_PASSTHRU_STATUS)); -+ -+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) == 0) { -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": STP Passthru: oh no, there is no reply!!")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_stp_passthru_exit; -+ } -+ -+ /* process the completed Reply Message Frame */ -+ pSataReply = (pSataPassthroughReply_t ) ioc->ioctl_cmds.reply; -+ ioc_status = le16_to_cpu(pSataReply->IOCStatus) & MPI_IOCSTATUS_MASK; -+ -+ if (ioc_status != MPI_IOCSTATUS_SUCCESS && -+ ioc_status != MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) { -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": STP Passthru: ")); -+ dcsmisasprintk(ioc, printk("IOCStatus=0x%X IOCLogInfo=0x%X SASStatus=0x%X\n", -+ le16_to_cpu(pSataReply->IOCStatus), -+ le32_to_cpu(pSataReply->IOCLogInfo), -+ pSataReply->SASStatus)); -+ } -+ -+ karg->Status.bConnectionStatus = -+ map_sas_status_to_csmi(pSataReply->SASStatus); -+ -+ memcpy(karg->Status.bStatusFIS,pSataReply->StatusFIS, 20); -+ -+ /* -+ * for now, just zero out uSCR array, -+ * then copy the one dword returned -+ * in the reply frame into uSCR[0] -+ */ -+ memset( karg->Status.uSCR, 0, 64); -+ karg->Status.uSCR[0] = le32_to_cpu(pSataReply->StatusControlRegisters); -+ -+ if((le32_to_cpu(pSataReply->TransferCount)) && (request_data) && -+ (karg->Parameters.uFlags & CSMI_SAS_STP_READ)) { -+ karg->Status.uDataBytes = -+ min(le32_to_cpu(pSataReply->TransferCount),request_data_sz); -+ if (copy_to_user((void __user *)uarg->bDataBuffer, -+ request_data, karg->Status.uDataBytes)) { -+ printk(KERN_ERR "%s::%s() @%d - " -+ "Unable to write data to user %p\n", -+ __FILE__, __FUNCTION__, __LINE__, -+ (void*)karg->bDataBuffer); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ } -+ } -+ -+ cim_stp_passthru_exit: -+ -+ if (request_data) -+ pci_free_consistent(ioc->pcidev, request_data_sz, -+ (u8 *)request_data, request_data_dma); -+ -+ /* Copy th data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, karg, -+ offsetof(CSMI_SAS_STP_PASSTHRU_BUFFER, bDataBuffer))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_ssp_passthru @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ free_pages((unsigned long)karg, memory_pages); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": %s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Firmware Download command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_firmware_download(unsigned long arg) -+{ -+ CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ pMpiFwHeader_t pFwHeader=NULL; -+ -+ if (copy_from_user(&karg, uarg, -+ sizeof(CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_firmware_download struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ /* Default to success.*/ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ karg.Information.usStatus = CSMI_SAS_FWD_SUCCESS; -+ karg.Information.usSeverity = CSMI_SAS_FWD_INFORMATION; -+ -+ /* some checks of the incoming frame */ -+ if ((karg.Information.uBufferLength + -+ sizeof(CSMI_SAS_FIRMWARE_DOWNLOAD)) > -+ karg.IoctlHeader.Length) { -+ karg.IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ karg.Information.usStatus = CSMI_SAS_FWD_FAILED; -+ goto cim_firmware_download_exit; -+ } -+ -+ if ( karg.Information.uDownloadFlags & -+ (CSMI_SAS_FWD_SOFT_RESET | CSMI_SAS_FWD_VALIDATE)) { -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ karg.Information.usStatus = CSMI_SAS_FWD_REJECT; -+ karg.Information.usSeverity = CSMI_SAS_FWD_ERROR; -+ goto cim_firmware_download_exit; -+ } -+ -+ /* now we need to alloc memory so we can pull in the -+ * fw image attached to end of incoming packet. -+ */ -+ pFwHeader = kmalloc(karg.Information.uBufferLength, GFP_KERNEL); -+ if (!pFwHeader){ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ karg.Information.usStatus = CSMI_SAS_FWD_REJECT; -+ karg.Information.usSeverity = CSMI_SAS_FWD_ERROR; -+ goto cim_firmware_download_exit; -+ } -+ memset(pFwHeader, 0, sizeof(*pFwHeader)); -+ -+ if (copy_from_user(pFwHeader, uarg->bDataBuffer, -+ karg.Information.uBufferLength)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in pFwHeader @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if ( !((pFwHeader->Signature0 == MPI_FW_HEADER_SIGNATURE_0) && -+ (pFwHeader->Signature1 == MPI_FW_HEADER_SIGNATURE_1) && -+ (pFwHeader->Signature2 == MPI_FW_HEADER_SIGNATURE_2))) { -+ // the signature check failed -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ karg.Information.usStatus = CSMI_SAS_FWD_REJECT; -+ karg.Information.usSeverity = CSMI_SAS_FWD_ERROR; -+ goto cim_firmware_download_exit; -+ } -+ -+ if ( mptctl_do_fw_download(karg.IoctlHeader.IOControllerNumber, -+ uarg->bDataBuffer, karg.Information.uBufferLength) -+ != 0) { -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ karg.Information.usStatus = CSMI_SAS_FWD_FAILED; -+ karg.Information.usSeverity = CSMI_SAS_FWD_FATAL; -+ goto cim_firmware_download_exit; -+ } -+ -+ if((karg.Information.uDownloadFlags & CSMI_SAS_FWD_SOFT_RESET) || -+ (karg.Information.uDownloadFlags & CSMI_SAS_FWD_HARD_RESET)) { -+ if (mpt_HardResetHandler(ioc, CAN_SLEEP) != 0) { -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ karg.Information.usStatus = CSMI_SAS_FWD_FAILED; -+ karg.Information.usSeverity = CSMI_SAS_FWD_FATAL; -+ } -+ } -+ -+ cim_firmware_download_exit: -+ -+ if(pFwHeader) -+ kfree(pFwHeader); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_firmware_download @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Get RAID Info command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_raid_info(unsigned long arg) -+{ -+ CSMI_SAS_RAID_INFO_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_RAID_INFO_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ u32 raidFlags; -+ u8 maxRaidTypes; -+ u8 maxDrivesPerSet; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_RAID_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_get_raid_info struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ if (!ioc->raid_data.pIocPg2) -+ goto csmisas_get_raid_info_out; -+ karg.Information.uNumRaidSets = -+ ioc->raid_data.pIocPg2->NumActiveVolumes; -+ karg.Information.uMaxRaidSets = ioc->raid_data.pIocPg2->MaxVolumes; -+ if( ioc->raid_data.pIocPg6 ) { -+ // get absolute maximum for all RAID sets -+ maxDrivesPerSet = ioc->raid_data.pIocPg6->MaxDrivesIS; -+ maxDrivesPerSet = max(ioc->raid_data.pIocPg6->MaxDrivesIM, -+ maxDrivesPerSet); -+ maxDrivesPerSet = max(ioc->raid_data.pIocPg6->MaxDrivesIME, -+ maxDrivesPerSet); -+ karg.Information.uMaxDrivesPerSet = maxDrivesPerSet; -+ } -+ else -+ karg.Information.uMaxDrivesPerSet = 8; -+ // For bMaxRaidSets, count bits set in bits 0-6 of CapabilitiesFlags -+ raidFlags = ioc->raid_data.pIocPg2->CapabilitiesFlags & 0x0000007F; -+ for( maxRaidTypes=0; raidFlags; maxRaidTypes++ ) -+ raidFlags &= raidFlags - 1; -+ karg.Information.bMaxRaidTypes = maxRaidTypes; -+ // ulMinRaidSetBlocks hard coded to 1MB until available from config page -+ karg.Information.ulMinRaidSetBlocks.uLowPart = 2048; -+ karg.Information.ulMinRaidSetBlocks.uHighPart = 0; -+ karg.Information.ulMaxRaidSetBlocks.uLowPart = 0xffffffff; -+ if( ioc->raid_data.pIocPg2->CapabilitiesFlags & -+ MPI_IOCPAGE2_CAP_FLAGS_RAID_64_BIT_ADDRESSING ) -+ karg.Information.ulMaxRaidSetBlocks.uHighPart = 0xffffffff; -+ else -+ karg.Information.ulMaxRaidSetBlocks.uHighPart = 0; -+ karg.Information.uMaxPhysicalDrives = -+ ioc->raid_data.pIocPg2->MaxPhysDisks; -+ karg.Information.uMaxExtents = 1; -+ karg.Information.uMaxModules = 0; -+ karg.Information.uMaxTransformationMemory = 0; -+ karg.Information.uChangeCount = ioc->csmi_change_count; -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+csmisas_get_raid_info_out: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_RAID_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_get_raid_info @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * csmisas_do_raid - Format and Issue a RAID volume request message. -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @action: What do be done. -+ * @PhysDiskNum: Logical target id. -+ * @VolumeBus: Target locations bus. -+ * @VolumeId: Volume id -+ * -+ * Returns: < 0 on a fatal error -+ * 0 on success -+ * -+ * Remark: Wait to return until reply processed by the ISR. -+ **/ -+static int -+csmisas_do_raid(MPT_ADAPTER *ioc, u8 action, u8 PhysDiskNum, u8 VolumeBus, u8 VolumeId, pMpiRaidActionReply_t reply) -+{ -+ MpiRaidActionRequest_t *pReq; -+ MpiRaidActionReply_t *pReply; -+ MPT_FRAME_HDR *mf; -+ -+ /* Get and Populate a free Frame -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ return -EAGAIN; -+ } -+ pReq = (MpiRaidActionRequest_t *)mf; -+ pReq->Action = action; -+ pReq->Reserved1 = 0; -+ pReq->ChainOffset = 0; -+ pReq->Function = MPI_FUNCTION_RAID_ACTION; -+ pReq->VolumeID = VolumeId; -+ pReq->VolumeBus = VolumeBus; -+ pReq->PhysDiskNum = PhysDiskNum; -+ pReq->MsgFlags = 0; -+ pReq->Reserved2 = 0; -+ pReq->ActionDataWord = 0; /* Reserved for this action */ -+ //pReq->ActionDataSGE = 0; -+ -+ ioc->add_sge((char *)&pReq->ActionDataSGE, -+ MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); -+ -+ if (csmisas_send_command_wait(ioc, mf, MPT_IOCTL_DEFAULT_TIMEOUT) != 0) -+ return -ENODATA; -+ -+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) && -+ (reply != NULL)){ -+ pReply = (MpiRaidActionReply_t *)&(ioc->ioctl_cmds.reply); -+ memcpy(reply, pReply, -+ min(ioc->reply_sz, -+ 4*pReply->MsgLength)); -+ } -+ -+ return 0; -+} -+ -+/** -+ * csmisas_raid_inq -+ * @ioc = per host instance -+ * @opcode = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH or -+ * MPI_FUNCTION_SCSI_IO_REQUEST -+ * @id = target id -+ * @bus = target bus -+ * @inq_vpd = inquiry data, returned -+ * @inq_vpd_sz = maximum size of inquiry data -+ * -+ * Return = 0(sucess), non-zero(failure) -+ **/ -+static int -+csmisas_raid_inq(MPT_ADAPTER *ioc, u8 opcode, u8 bus, u8 id, u8 inq_vpd_page, -+ u8 * inq_vpd, u32 inq_vpd_sz) -+{ -+ MPT_FRAME_HDR *mf = NULL; -+ MPIHeader_t *mpi_hdr; -+ pSCSIIORequest_t pScsiRequest; -+ u16 req_idx; -+ char *psge; -+ u8 inq_vpd_cdb[6]; -+ u8 *request_data=NULL; -+ dma_addr_t request_data_dma; -+ u32 request_data_sz; -+ int rc = 0; -+ u32 MsgContext; -+ -+ request_data_sz = inq_vpd_sz; -+ -+ /* fill-in cdb */ -+ memset(inq_vpd_cdb, 0, sizeof(inq_vpd_cdb)); -+ inq_vpd_cdb[0] = 0x12; -+ if (inq_vpd_page) { -+ inq_vpd_cdb[1] = 0x01; /* evpd bit */ -+ inq_vpd_cdb[2] = inq_vpd_page; -+ } -+ inq_vpd_cdb[3] = (u8)(request_data_sz >> 8); -+ inq_vpd_cdb[4] = (u8)request_data_sz; -+ -+ /* Get a free request frame and save the message context. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ goto csmisas_raid_inq_exit; -+ } -+ -+ mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; -+ pScsiRequest = (pSCSIIORequest_t) mf; -+ req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); -+ -+ memset(pScsiRequest,0,sizeof(SCSIIORequest_t)); -+ pScsiRequest->Function = opcode; -+ pScsiRequest->TargetID = id; -+ pScsiRequest->Bus = bus; -+ pScsiRequest->CDBLength = 6; -+ pScsiRequest->DataLength = cpu_to_le32(request_data_sz); -+ pScsiRequest->MsgContext = MsgContext; -+ memcpy(pScsiRequest->CDB,inq_vpd_cdb,pScsiRequest->CDBLength); -+ pScsiRequest->Control = cpu_to_le32(MPI_SCSIIO_CONTROL_READ); -+ pScsiRequest->Control |= cpu_to_le32(MPI_SCSIIO_CONTROL_SIMPLEQ); -+ pScsiRequest->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; -+ if (ioc->sg_addr_size == sizeof(u64)) -+ pScsiRequest->MsgFlags |= MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64; -+ -+ /* setup sense -+ */ -+ pScsiRequest->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; -+ pScsiRequest->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + -+ (req_idx * MPT_SENSE_BUFFER_ALLOC)); -+ -+ request_data = pci_alloc_consistent( -+ ioc->pcidev, request_data_sz, &request_data_dma); -+ -+ if (request_data == NULL) { -+ mpt_free_msg_frame(ioc, mf); -+ rc=-1; -+ goto csmisas_raid_inq_exit; -+ } -+ -+ memset(request_data,0,request_data_sz); -+ psge = (char *)&pScsiRequest->SGL; -+ ioc->add_sge(psge, (MPT_SGE_FLAGS_SSIMPLE_READ | 0xFC) , -+ request_data_dma); -+ -+ if (csmisas_send_command_wait(ioc, mf, MPT_IOCTL_DEFAULT_TIMEOUT) != 0) { -+ rc=-1; -+ goto csmisas_raid_inq_exit; -+ } -+ -+ /* copy the request_data */ -+ memcpy(inq_vpd, request_data, request_data_sz); -+ -+ csmisas_raid_inq_exit: -+ -+ if (request_data) -+ pci_free_consistent(ioc->pcidev, request_data_sz, -+ request_data, request_data_dma); -+ -+ return rc; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Get RAID Config command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_raid_config(unsigned long arg) -+{ -+ CSMI_SAS_RAID_CONFIG_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_RAID_CONFIG_BUFFER karg,*pKarg=NULL; -+ CONFIGPARMS cfg; -+ ConfigPageHeader_t header; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ u8 volumeID, VolumeBus; -+ u8 physDiskNum, physDiskNumMax; -+ int volumepage0sz = 0; -+ int physdiskpage0sz = 0, ioc_page5_sz = 0; -+ dma_addr_t volume0_dma, physdisk0_dma; -+ dma_addr_t ioc_page5_dma = 0; -+ pRaidVolumePage0_t pVolume0 = NULL; -+ pRaidPhysDiskPage0_t pPhysDisk0 = NULL; -+ pMpiRaidActionReply_t pRaidActionReply = NULL; -+ u32 device_info = 0; -+ pIOCPage5_t pIocPage5 = NULL; -+ int i, idx, csmi_sas_raid_config_buffer_sz; -+ int memory_pages; -+ int copy_buffer_sz = 0; -+ u64 totalMaxLBA, tmpTotalMaxLBA; -+ u64 sas_address; -+ struct sas_device_info *sas_info; -+ -+ if (copy_from_user(&karg, uarg, sizeof(IOCTL_HEADER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmisas_get_raid_config struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ csmi_sas_raid_config_buffer_sz = karg.IoctlHeader.Length; -+ memory_pages = get_order(csmi_sas_raid_config_buffer_sz); -+ pKarg = (CSMI_SAS_RAID_CONFIG_BUFFER *)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!pKarg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc RAID_CONFIG_BUFFER " -+ "csmi_sas_raid_config_buffer_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ csmi_sas_raid_config_buffer_sz, memory_pages); -+ return -ENOMEM; -+ } -+ memset(pKarg, 0, sizeof(*pKarg)); -+ -+ if (copy_from_user(pKarg, uarg, csmi_sas_raid_config_buffer_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmisas_get_raid_config struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(pKarg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ if (pKarg->Configuration.uChangeCount != 0 && -+ pKarg->Configuration.uChangeCount != ioc->csmi_change_count ) { -+ pKarg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ pKarg->Configuration.uFailureCode = -+ CSMI_SAS_FAIL_CODE_CHANGE_COUNT_INVALID; -+ goto cim_get_raid_config_exit; -+ } -+ -+ if (!ioc->raid_data.pIocPg2) { -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ /* -+ * Check to see if the input uRaidSetIndex is -+ * greater than the number of RAID sets -+ */ -+ if (pKarg->Configuration.uRaidSetIndex >= -+ ioc->raid_data.pIocPg2->NumActiveVolumes) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_RAID_SET_OUT_OF_RANGE; -+ goto cim_get_raid_config_exit; -+ } -+ -+ /* -+ * get RAID Volume Page 0 -+ */ -+ volumeID = ioc->raid_data.pIocPg2->RaidVolume[pKarg->Configuration.uRaidSetIndex].VolumeID; -+ VolumeBus = ioc->raid_data.pIocPg2->RaidVolume[pKarg->Configuration.uRaidSetIndex].VolumeBus; -+ -+ header.PageVersion = 0; -+ header.PageLength = 0; -+ header.PageNumber = 0; -+ header.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME; -+ cfg.cfghdr.hdr = &header; -+ cfg.physAddr = -1; -+ cfg.pageAddr = (VolumeBus << 8) + volumeID; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ if (mpt_config(ioc, &cfg) != 0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ if (header.PageLength == 0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ volumepage0sz = header.PageLength * 4; -+ pVolume0 = pci_alloc_consistent(ioc->pcidev, volumepage0sz, -+ &volume0_dma); -+ if (!pVolume0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.physAddr = volume0_dma; -+ if (mpt_config(ioc, &cfg) != 0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ totalMaxLBA = (u64)le32_to_cpu(pVolume0->MaxLBA) | -+ ((u64)le32_to_cpu(pVolume0->MaxLBAHigh)) << 32; -+ tmpTotalMaxLBA = totalMaxLBA + 1; -+ do_div(tmpTotalMaxLBA, 2048); -+ pKarg->Configuration.bDriveCount = 0; -+ pKarg->Configuration.uCapacity = tmpTotalMaxLBA; -+ pKarg->Configuration.uStripeSize = -+ le32_to_cpu(pVolume0->StripeSize)/2; -+ -+ switch(pVolume0->VolumeType) { -+ case MPI_RAID_VOL_TYPE_IS: -+ pKarg->Configuration.bRaidType = CSMI_SAS_RAID_TYPE_0; -+ break; -+ case MPI_RAID_VOL_TYPE_IME: -+ pKarg->Configuration.bRaidType = CSMI_SAS_RAID_TYPE_10; -+ break; -+ case MPI_RAID_VOL_TYPE_IM: -+ pKarg->Configuration.bRaidType = CSMI_SAS_RAID_TYPE_1; -+ break; -+ default: -+ pKarg->Configuration.bRaidType = CSMI_SAS_RAID_TYPE_OTHER; -+ break; -+ } -+ -+ switch (pVolume0->VolumeStatus.State) { -+ case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: -+ pKarg->Configuration.bStatus = CSMI_SAS_RAID_SET_STATUS_OK; -+ break; -+ case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: -+ /* Volume is degraded, check if Resyncing or Inactive */ -+ pKarg->Configuration.bStatus = CSMI_SAS_RAID_SET_STATUS_DEGRADED; -+ break; -+ case MPI_RAIDVOL0_STATUS_STATE_FAILED: -+ pKarg->Configuration.bStatus = CSMI_SAS_RAID_SET_STATUS_FAILED; -+ break; -+ } -+ -+ /* check flags */ -+ if (pVolume0->VolumeStatus.Flags & -+ MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE) -+ pKarg->Configuration.bStatus = CSMI_SAS_RAID_SET_STATUS_OFFLINE; -+ else if (pVolume0->VolumeStatus.Flags & -+ MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) -+ pKarg->Configuration.bStatus = CSMI_SAS_RAID_SET_STATUS_REBUILDING; -+ -+ pKarg->Configuration.bInformation = 0; /* default */ -+ if(pVolume0->VolumeStatus.Flags & -+ MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS ) { -+ -+ uint64_t * ptrUint64; -+ uint64_t totalBlocks64, blocksRemaining64; -+ uint32_t totalBlocks32, blocksRemaining32; -+ -+ /* get percentage complete */ -+ pRaidActionReply = kmalloc( sizeof(MPI_RAID_VOL_INDICATOR) + -+ offsetof(MSG_RAID_ACTION_REPLY,ActionData), -+ GFP_KERNEL); -+ -+ if (!pRaidActionReply){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__,pKarg); -+ goto cim_get_raid_config_exit; -+ } -+ memset(pRaidActionReply, 0, sizeof(*pRaidActionReply)); -+ -+ csmisas_do_raid(ioc, -+ MPI_RAID_ACTION_INDICATOR_STRUCT, -+ 0, VolumeBus, volumeID, pRaidActionReply); -+ -+ ptrUint64 = (uint64_t *)&pRaidActionReply->ActionData; -+ totalBlocks64 = *ptrUint64; -+ ptrUint64++; -+ blocksRemaining64 = *ptrUint64; -+ while(totalBlocks64 > 0xFFFFFFFFUL){ -+ totalBlocks64 = totalBlocks64 >> 1; -+ blocksRemaining64 = blocksRemaining64 >> 1; -+ } -+ totalBlocks32 = (uint32_t)totalBlocks64; -+ blocksRemaining32 = (uint32_t)blocksRemaining64; -+ -+ if(totalBlocks32) -+ pKarg->Configuration.bInformation = -+ (totalBlocks32 - blocksRemaining32) / -+ (totalBlocks32 / 100); -+ -+ kfree(pRaidActionReply); -+ } -+ -+ /* fill-in more information depending on data type */ -+ if (pKarg->Configuration.bDataType == -+ CSMI_SAS_RAID_DATA_ADDITIONAL_DATA) { -+ pKarg->Configuration.Data->bLabel[0] = '\0'; -+ pKarg->Configuration.Data->bRaidSetLun[1] = 0; -+ pKarg->Configuration.Data->bWriteProtection = -+ CSMI_SAS_RAID_SET_WRITE_PROTECT_UNKNOWN; -+ pKarg->Configuration.Data->bCacheSetting = -+ CSMI_SAS_RAID_SET_CACHE_UNKNOWN; -+ pKarg->Configuration.Data->bCacheRatio = 0; -+ pKarg->Configuration.Data->usBlockSize = 512; -+ pKarg->Configuration.Data->ulRaidSetExtentOffset.uLowPart = 0; -+ pKarg->Configuration.Data->ulRaidSetExtentOffset.uHighPart = 0; -+ pKarg->Configuration.Data->ulRaidSetBlocks.uLowPart = -+ le32_to_cpu(pVolume0->MaxLBA); -+ pKarg->Configuration.Data->ulRaidSetBlocks.uHighPart = -+ le32_to_cpu(pVolume0->MaxLBAHigh); -+ if (pVolume0->VolumeType == MPI_RAID_VOL_TYPE_IS || -+ pVolume0->VolumeType == MPI_RAID_VOL_TYPE_IME ) { -+ pKarg->Configuration.Data->uStripeSizeInBlocks = -+ le32_to_cpu(pVolume0->StripeSize); -+ } else { -+ pKarg->Configuration.Data->uStripeSizeInBlocks = 0; -+ } -+ pKarg->Configuration.Data->uSectorsPerTrack = 128; -+ for (i=0; i<16; i++) { -+ // unsupported -+ pKarg->Configuration.Data->bApplicationScratchPad[i] = -+ 0xFF; -+ } -+ pKarg->Configuration.Data->uNumberOfHeads = 16; -+ -+ tmpTotalMaxLBA = totalMaxLBA; -+ do_div(tmpTotalMaxLBA, -+ (pKarg->Configuration.Data->uNumberOfHeads * -+ pKarg->Configuration.Data->uSectorsPerTrack)); -+ pKarg->Configuration.Data->uNumberOfTracks = tmpTotalMaxLBA; -+ } else if ( pKarg->Configuration.bDataType == -+ CSMI_SAS_RAID_DATA_DEVICE_ID ) { -+ /* Send inquiry to get VPD Page 0x83 */ -+ u32 vpd_page_sz; -+ vpd_page_sz = csmi_sas_raid_config_buffer_sz - -+ offsetof(CSMI_SAS_RAID_CONFIG,DeviceId); -+ if (csmisas_raid_inq(ioc, MPI_FUNCTION_SCSI_IO_REQUEST, -+ VolumeBus, volumeID, 0x83, -+ (u8*)&pKarg->Configuration.DeviceId->bDeviceIdentificationVPDPage, -+ vpd_page_sz) != 0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ } else { -+ /* suppress drive information */ -+ if (pKarg->Configuration.bDriveCount == -+ CSMI_SAS_RAID_DRIVE_COUNT_SUPRESSED) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ goto cim_get_raid_config_exit; -+ } -+ } -+ -+ /* get hotspare info, used later in this function */ -+ if (pVolume0->VolumeSettings.HotSparePool) { -+ /* Read and save IOC Page 5 -+ */ -+ header.PageVersion = 0; -+ header.PageLength = 0; -+ header.PageNumber = 5; -+ header.PageType = MPI_CONFIG_PAGETYPE_IOC; -+ cfg.cfghdr.hdr = &header; -+ cfg.physAddr = -1; -+ cfg.pageAddr = 0; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ if ((mpt_config(ioc, &cfg) == 0) && (header.PageLength)) { -+ ioc_page5_sz = header.PageLength * 4; -+ pIocPage5 = pci_alloc_consistent(ioc->pcidev, -+ ioc_page5_sz, -+ &ioc_page5_dma); -+ memset(pIocPage5,0,ioc_page5_sz); -+ if (ioc_page5_dma) { -+ cfg.physAddr = ioc_page5_dma; -+ cfg.action = -+ MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ mpt_config(ioc, &cfg); -+ } -+ } -+ } -+ -+ /* -+ * get RAID Physical Disk Page 0 -+ */ -+ header.PageVersion = 0; -+ header.PageLength = 0; -+ header.PageNumber = 0; -+ header.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; -+ cfg.cfghdr.hdr = &header; -+ cfg.physAddr = -1; -+ cfg.pageAddr = 0; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ if (mpt_config(ioc, &cfg) != 0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ if (header.PageLength == 0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ physdiskpage0sz = header.PageLength * 4; -+ pPhysDisk0 = pci_alloc_consistent(ioc->pcidev, physdiskpage0sz, -+ &physdisk0_dma); -+ if (!pPhysDisk0) { -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ cfg.physAddr = physdisk0_dma; -+ -+ physDiskNumMax = (csmi_sas_raid_config_buffer_sz - -+ offsetof(CSMI_SAS_RAID_CONFIG,Drives)) -+ / sizeof(CSMI_SAS_RAID_DRIVES); -+ -+ tmpTotalMaxLBA = totalMaxLBA; -+ if (pVolume0->VolumeType == MPI_RAID_VOL_TYPE_IS) { -+ do_div(tmpTotalMaxLBA, pVolume0->NumPhysDisks); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "IS Volume tmpTotalMaxLBA=%llX\n", -+ (unsigned long long)tmpTotalMaxLBA)); -+ } -+ else if (pVolume0->VolumeType == MPI_RAID_VOL_TYPE_IME) { -+ do_div(tmpTotalMaxLBA, pVolume0->NumPhysDisks * 2); -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "IME Volume tmpTotalMaxLBA=%llX\n", -+ (unsigned long long)tmpTotalMaxLBA)); -+ } else { -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "IM Volume tmpTotalMaxLBA=%llX\n", -+ (unsigned long long)tmpTotalMaxLBA)); -+ } -+ -+ for (i=0; i< min(pVolume0->NumPhysDisks, physDiskNumMax); i++) { -+ -+ physDiskNum = pVolume0->PhysDisk[i].PhysDiskNum; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.pageAddr = physDiskNum; -+ if (mpt_config(ioc, &cfg) != 0){ -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_raid_config_exit; -+ } -+ -+ pKarg->Configuration.bDriveCount++; -+ if (pKarg->Configuration.bDataType != CSMI_SAS_RAID_DATA_DRIVES) -+ continue; -+ -+ /* Search the list for the matching SAS address. */ -+ sas_info = csmisas_get_device_component_by_fw(ioc, pPhysDisk0->PhysDiskBus, -+ pPhysDisk0->PhysDiskID); -+ if (sas_info) { -+ sas_address = reverse_byte_order64(sas_info->sas_address); -+ memcpy(pKarg->Configuration.Drives[i].bSASAddress, -+ &sas_address,sizeof(u64)); -+ if (!device_info) -+ device_info = sas_info->device_info; -+ } -+ -+ memcpy(pKarg->Configuration.Drives[i].bModel, -+ pPhysDisk0->InquiryData.VendorID, -+ offsetof(RAID_PHYS_DISK0_INQUIRY_DATA,ProductRevLevel)); -+ memcpy(pKarg->Configuration.Drives[i].bFirmware, -+ pPhysDisk0->InquiryData.ProductRevLevel, -+ sizeof(pPhysDisk0->InquiryData.ProductRevLevel)); -+ if (csmisas_is_sata(pPhysDisk0)) { -+ memcpy(&pKarg->Configuration.Drives[i].bSerialNumber, -+ &pPhysDisk0->ExtDiskIdentifier[4], -+ 4); -+ memcpy(&pKarg->Configuration.Drives[i].bSerialNumber[4], -+ &pPhysDisk0->DiskIdentifier, -+ sizeof(pPhysDisk0->DiskIdentifier)); -+ } else { -+ memcpy(pKarg->Configuration.Drives[i].bSerialNumber, -+ pPhysDisk0->DiskIdentifier, -+ sizeof(pPhysDisk0->DiskIdentifier)); -+ } -+ -+ pKarg->Configuration.Drives[i].bDriveUsage = -+ (pPhysDisk0->PhysDiskStatus.Flags & -+ MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) ? -+ CSMI_SAS_DRIVE_CONFIG_NOT_USED : -+ CSMI_SAS_DRIVE_CONFIG_MEMBER; -+ -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_OK; -+ if (pPhysDisk0->PhysDiskStatus.State == -+ MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED) { -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_OFFLINE; -+ } else if(pPhysDisk0->PhysDiskStatus.State) { -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_FAILED; -+ if(pKarg->Configuration.bStatus == -+ CSMI_SAS_RAID_SET_STATUS_DEGRADED) -+ pKarg->Configuration.bInformation = i; -+ } else if((pVolume0->VolumeStatus.Flags & -+ MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) && -+ (pPhysDisk0->PhysDiskStatus.Flags & -+ MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC)) -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_REBUILDING; -+ else if(pPhysDisk0->ErrorData.SmartCount || -+ (pPhysDisk0->PhysDiskStatus.Flags & -+ MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC)) -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_DEGRADED; -+ -+ memset(pKarg->Configuration.Drives[i].bSASLun, -+ 0, sizeof(pKarg->Configuration.Drives[i].bSASLun)); -+ if (csmisas_is_sata(pPhysDisk0)) { -+ pKarg->Configuration.Drives[i].bDriveType = -+ CSMI_SAS_DRIVE_TYPE_SATA; -+ } else { /* drive in a volume can only be SAS/SATA */ -+ pKarg->Configuration.Drives[i].bDriveType = -+ CSMI_SAS_DRIVE_TYPE_SINGLE_PORT_SAS; -+ if (mpt_raid_phys_disk_get_num_paths(ioc, -+ pVolume0->PhysDisk[i].PhysDiskNum) > 1) -+ pKarg->Configuration.Drives[i].bDriveType = -+ CSMI_SAS_DRIVE_TYPE_DUAL_PORT_SAS; -+ } -+ -+ pKarg->Configuration.Drives[i].usBlockSize = 512; -+ pKarg->Configuration.Drives[i].uDriveIndex = -+ pPhysDisk0->PhysDiskNum; -+ pKarg->Configuration.Drives[i].ulTotalUserBlocks.uLowPart = -+ (u32)tmpTotalMaxLBA; -+ pKarg->Configuration.Drives[i].ulTotalUserBlocks.uHighPart = -+ (u32)(tmpTotalMaxLBA >> 32); -+ } -+ -+ /* adding hot spare info at the end */ -+ if ((pVolume0->VolumeSettings.HotSparePool) && (pIocPage5) && -+ (pVolume0->VolumeType != MPI_RAID_VOL_TYPE_IS)) { -+ for (idx = 0, i = pVolume0->NumPhysDisks ; -+ idx < pIocPage5->NumHotSpares ; idx++) { -+ if (i >= physDiskNumMax) -+ break; -+ if ((pVolume0->VolumeSettings.HotSparePool & -+ pIocPage5->HotSpare[idx].HotSparePool) == 0) -+ continue; -+ if(pIocPage5->HotSpare[idx].Flags != -+ MPI_IOC_PAGE_5_HOT_SPARE_ACTIVE) -+ continue; -+ physDiskNum = pIocPage5->HotSpare[idx].PhysDiskNum; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.pageAddr = physDiskNum; -+ if (mpt_config(ioc, &cfg) != 0) -+ continue; -+ -+ /* don't mix SSP hot spare -+ * in SATA volume -+ */ -+ if (!csmisas_is_sata(pPhysDisk0) && -+ (device_info & -+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)) -+ continue; -+ -+ /* don't mix SATA hot spare -+ * in SSP volume -+ */ -+ if (csmisas_is_sata(pPhysDisk0) && -+ (device_info & -+ MPI_SAS_DEVICE_INFO_SSP_TARGET)) -+ continue; -+ -+ /* capacity check for IM volumes*/ -+ if ((pVolume0->VolumeType == -+ MPI_RAID_VOL_TYPE_IM) && -+ (totalMaxLBA + -+ (64*2*1024) /* metadata = 64MB*/ > -+ le32_to_cpu(pPhysDisk0->MaxLBA))) -+ continue; -+ -+ tmpTotalMaxLBA = totalMaxLBA; -+ do_div(tmpTotalMaxLBA, pVolume0->NumPhysDisks); -+ /* capacity check for IME volumes*/ -+ if ((pVolume0->VolumeType == -+ MPI_RAID_VOL_TYPE_IME) && -+ (((totalMaxLBA + -+ pVolume0->NumPhysDisks) * 2) + -+ (64*2*1024 ) /*metadata = 64MB*/ > -+ le32_to_cpu(pPhysDisk0->MaxLBA))) -+ continue; -+ -+ pKarg->Configuration.bDriveCount++; -+ if (pKarg->Configuration.bDataType != -+ CSMI_SAS_RAID_DATA_DRIVES) { -+ i++; -+ continue; -+ } -+ -+ /* Search the list for the matching SAS address. */ -+ sas_info = csmisas_get_device_component_by_fw(ioc, -+ pPhysDisk0->PhysDiskBus, pPhysDisk0->PhysDiskID); -+ if (sas_info) { -+ sas_address = reverse_byte_order64(sas_info->sas_address); -+ memcpy(pKarg->Configuration.Drives[i].bSASAddress, -+ &sas_address,sizeof(u64)); -+ } -+ -+ memcpy(pKarg->Configuration.Drives[i].bModel, -+ pPhysDisk0->InquiryData.VendorID, -+ offsetof(RAID_PHYS_DISK0_INQUIRY_DATA,ProductRevLevel)); -+ memcpy(pKarg->Configuration.Drives[i].bFirmware, -+ pPhysDisk0->InquiryData.ProductRevLevel, -+ sizeof(pPhysDisk0->InquiryData.ProductRevLevel)); -+ if (csmisas_is_sata(pPhysDisk0)) { -+ memcpy(&pKarg->Configuration.Drives[i].bSerialNumber, -+ &pPhysDisk0->ExtDiskIdentifier[4], -+ 4); -+ memcpy(&pKarg->Configuration.Drives[i].bSerialNumber[4], -+ &pPhysDisk0->DiskIdentifier, -+ sizeof(pPhysDisk0->DiskIdentifier)); -+ } else { -+ memcpy(pKarg->Configuration.Drives[i].bSerialNumber, -+ pPhysDisk0->DiskIdentifier, -+ sizeof(pPhysDisk0->DiskIdentifier)); -+ } -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_OK; -+ if(pPhysDisk0->PhysDiskStatus.State) -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_FAILED; -+ else if(pPhysDisk0->ErrorData.SmartCount) -+ pKarg->Configuration.Drives[i].bDriveStatus = -+ CSMI_SAS_DRIVE_STATUS_DEGRADED; -+ pKarg->Configuration.Drives[i].bDriveUsage = -+ CSMI_SAS_DRIVE_CONFIG_SPARE; -+ pKarg->Configuration.Drives[i].usBlockSize = 512; -+ pKarg->Configuration.Drives[i].uDriveIndex = -+ pPhysDisk0->PhysDiskNum; -+ if (csmisas_is_sata(pPhysDisk0)) { -+ pKarg->Configuration.Drives[i].bDriveType = -+ CSMI_SAS_DRIVE_TYPE_SATA; -+ } else { /* drive in a volume can only be SAS/SATA */ -+ pKarg->Configuration.Drives[i].bDriveType = -+ CSMI_SAS_DRIVE_TYPE_SINGLE_PORT_SAS; -+ if (mpt_raid_phys_disk_get_num_paths(ioc, -+ pVolume0->PhysDisk[i].PhysDiskNum) > 1) -+ pKarg->Configuration.Drives[i].bDriveType = -+ CSMI_SAS_DRIVE_TYPE_DUAL_PORT_SAS; -+ } -+ i++; -+ } -+ } -+ -+ // Only return data on the first 240 drives -+ if( pKarg->Configuration.bDriveCount > 0xF0 ) -+ pKarg->Configuration.bDriveCount = -+ CSMI_SAS_RAID_DRIVE_COUNT_TOO_BIG; -+ -+ pKarg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ cim_get_raid_config_exit: -+ -+ if (pVolume0 != NULL) -+ pci_free_consistent(ioc->pcidev, volumepage0sz, pVolume0, -+ volume0_dma); -+ -+ if(pPhysDisk0 != NULL) -+ pci_free_consistent(ioc->pcidev, physdiskpage0sz, pPhysDisk0, -+ physdisk0_dma); -+ -+ if(pIocPage5 != NULL) -+ pci_free_consistent(ioc->pcidev, ioc_page5_sz, pIocPage5, -+ ioc_page5_dma); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ -+ /* find the buffer size to copy depending on how much is filled-in */ -+ switch (pKarg->Configuration.bDataType) { -+ case CSMI_SAS_RAID_DATA_ADDITIONAL_DATA: -+ copy_buffer_sz = sizeof(IOCTL_HEADER) + -+ offsetof(CSMI_SAS_RAID_CONFIG,Data) + -+ sizeof(CSMI_SAS_RAID_SET_ADDITIONAL_DATA); -+ break; -+ case CSMI_SAS_RAID_DATA_DRIVES: -+ if (pKarg->Configuration.bDriveCount == -+ CSMI_SAS_RAID_DRIVE_COUNT_SUPRESSED) -+ copy_buffer_sz = sizeof(IOCTL_HEADER) + -+ offsetof(CSMI_SAS_RAID_CONFIG,Drives); -+ else -+ copy_buffer_sz = sizeof(IOCTL_HEADER) + -+ offsetof(CSMI_SAS_RAID_CONFIG,Drives) + -+ (pKarg->Configuration.bDriveCount * -+ sizeof(CSMI_SAS_RAID_DRIVES)); -+ break; -+ case CSMI_SAS_RAID_DATA_DEVICE_ID: -+ copy_buffer_sz = csmi_sas_raid_config_buffer_sz; -+ break; -+ } -+ -+ if (copy_to_user(uarg, pKarg, copy_buffer_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_get_raid_config @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Get RAID Features command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_raid_features(unsigned long arg) -+{ -+ CSMI_SAS_RAID_FEATURES_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_RAID_FEATURES_BUFFER karg, *pKarg=NULL; -+ int csmi_sas_raid_features_buffer_sz, iocnum; -+ int memory_pages; -+ MPT_ADAPTER *ioc = NULL; -+ -+ if (copy_from_user(&karg, uarg, sizeof(IOCTL_HEADER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_get_raid_features struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ csmi_sas_raid_features_buffer_sz = karg.IoctlHeader.Length; -+ memory_pages = get_order(csmi_sas_raid_features_buffer_sz); -+ pKarg = (CSMI_SAS_RAID_FEATURES_BUFFER *)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!pKarg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc RAID_FEATURES_BUFFER " -+ "csmi_sas_raid_features_buffer_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ csmi_sas_raid_features_buffer_sz, memory_pages); -+ return -ENOMEM; -+ } -+ memset(pKarg, 0, sizeof(*pKarg)); -+ -+ if (copy_from_user(pKarg, uarg, csmi_sas_raid_features_buffer_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_get_raid_features struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(pKarg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ if (pKarg->Information.uChangeCount != 0 && -+ pKarg->Information.uChangeCount != ioc->csmi_change_count ) { -+ pKarg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ pKarg->Information.uFailureCode = -+ CSMI_SAS_FAIL_CODE_CHANGE_COUNT_INVALID; -+ goto cim_get_raid_features_exit; -+ } -+ -+ pKarg->Information.uFeatures = CSMI_SAS_RAID_FEATURE_REBUILD | -+ CSMI_SAS_RAID_FEATURE_SURFACE_SCAN | -+ CSMI_SAS_RAID_FEATURE_SPARES_SHARED; -+ pKarg->Information.bDefaultTransformPriority = -+ CSMI_SAS_PRIORITY_UNKNOWN; -+ pKarg->Information.bTransformPriority = CSMI_SAS_PRIORITY_UNKNOWN; -+ pKarg->Information.bDefaultRebuildPriority = CSMI_SAS_PRIORITY_UNKNOWN; -+ pKarg->Information.bRebuildPriority = -+ pKarg->Information.bDefaultRebuildPriority; -+ pKarg->Information.bDefaultSurfaceScanPriority = -+ CSMI_SAS_PRIORITY_UNKNOWN; -+ pKarg->Information.bSurfaceScanPriority = CSMI_SAS_PRIORITY_UNKNOWN; -+ pKarg->Information.uRaidSetTransformationRules = 0; -+ -+ /* IS */ -+ pKarg->Information.RaidType[0].bRaidType = CSMI_SAS_RAID_TYPE_0; -+ pKarg->Information.RaidType[0].uSupportedStripeSizeMap = 0x80; -+ -+ /* IM */ -+ pKarg->Information.RaidType[1].bRaidType = CSMI_SAS_RAID_TYPE_1; -+ pKarg->Information.RaidType[1].uSupportedStripeSizeMap = 0; -+ -+ /* IME */ -+ pKarg->Information.RaidType[2].bRaidType = CSMI_SAS_RAID_TYPE_1E; -+ pKarg->Information.RaidType[2].uSupportedStripeSizeMap = 0x80; -+ -+ pKarg->Information.RaidType[3].bRaidType = CSMI_SAS_RAID_TYPE_END; -+ pKarg->Information.bCacheRatiosSupported[0] = -+ CSMI_SAS_RAID_CACHE_RATIO_END; -+ -+ cim_get_raid_features_exit: -+ -+ /* -+ * Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, pKarg, -+ sizeof(CSMI_SAS_RAID_FEATURES_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_get_raid_features @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Set RAID Control command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_set_raid_control(unsigned long arg) -+{ -+ CSMI_SAS_RAID_CONTROL_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_RAID_CONTROL_BUFFER karg, *pKarg=NULL; -+ int csmi_sas_raid_control_buffer_sz, iocnum; -+ int memory_pages; -+ MPT_ADAPTER *ioc = NULL; -+ -+ if (copy_from_user(&karg, uarg, sizeof(IOCTL_HEADER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_set_raid_control struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ csmi_sas_raid_control_buffer_sz = karg.IoctlHeader.Length; -+ memory_pages = get_order(csmi_sas_raid_control_buffer_sz); -+ pKarg = (CSMI_SAS_RAID_CONTROL_BUFFER *)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!pKarg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc RAID_CONTROL_BUFFER " -+ "csmi_sas_raid_control_buffer_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ csmi_sas_raid_control_buffer_sz, memory_pages); -+ return -ENOMEM; -+ } -+ memset(pKarg, 0, sizeof(*pKarg)); -+ -+ if (copy_from_user(pKarg, uarg, csmi_sas_raid_control_buffer_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_set_raid_control struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(pKarg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ if (pKarg->Information.uChangeCount != 0 && -+ pKarg->Information.uChangeCount != ioc->csmi_change_count ) { -+ pKarg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ pKarg->Information.uFailureCode = -+ CSMI_SAS_FAIL_CODE_CHANGE_COUNT_INVALID; -+ goto cim_set_raid_control_exit; -+ } -+ -+ if (pKarg->Information.bTransformPriority != -+ CSMI_SAS_PRIORITY_UNCHANGED) { -+ pKarg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ pKarg->Information.uFailureCode = -+ CSMI_SAS_FAIL_CODE_TRANSFORM_PRIORITY_INVALID; -+ goto cim_set_raid_control_exit; -+ } -+ -+ if (pKarg->Information.bRebuildPriority != -+ CSMI_SAS_PRIORITY_AUTO && -+ pKarg->Information.bRebuildPriority != -+ CSMI_SAS_PRIORITY_UNCHANGED) { -+ pKarg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ pKarg->Information.uFailureCode = -+ CSMI_SAS_FAIL_CODE_REBUILD_PRIORITY_INVALID; -+ goto cim_set_raid_control_exit; -+ } -+ -+ if (pKarg->Information.bCacheRatioFlag == -+ CSMI_SAS_RAID_CACHE_RATIO_DISABLE) { -+ pKarg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ pKarg->Information.uFailureCode = -+ CSMI_SAS_FAIL_CODE_CACHE_RATIO_INVALID; -+ goto cim_set_raid_control_exit; -+ } -+ -+ if( !strcmp(pKarg->Information.bClearConfiguration, -+ CSMI_SAS_RAID_CLEAR_CONFIGURATION_SIGNATURE) ) { -+ pKarg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ pKarg->Information.uFailureCode = -+ CSMI_SAS_FAIL_CODE_CLEAR_CONFIGURATION_INVALID; -+ goto cim_set_raid_control_exit; -+ } -+ -+ pKarg->Information.bFailureDescription[0] = '\0'; -+ -+ cim_set_raid_control_exit: -+ -+ /* -+ * Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, pKarg, -+ sizeof(CSMI_SAS_RAID_CONTROL_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_set_raid_control @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ free_pages((unsigned long)pKarg, memory_pages); -+ return 0; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Get Raid Element. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_raid_element(unsigned long arg) -+{ -+ CSMI_SAS_RAID_ELEMENT_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_RAID_ELEMENT_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_RAID_ELEMENT_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmisas_get_raid_element struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+/* TODO - implement IOCTL here */ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_BAD_CNTL_CODE; -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": not implemented\n")); -+ -+// csmisas_get_raid_element_exit: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_RAID_ELEMENT_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmisas_get_raid_element @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+ -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Set Raid Operation -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_set_raid_operation(unsigned long arg) -+{ -+ CSMI_SAS_RAID_SET_OPERATION_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_RAID_SET_OPERATION_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_RAID_SET_OPERATION_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_set_raid_operation struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+/* TODO - implement IOCTL here */ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_BAD_CNTL_CODE; -+ dcsmisasprintk(ioc, printk(KERN_DEBUG ": not implemented\n")); -+ -+// cim_set_raid_operation: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_RAID_SET_OPERATION_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_set_raid_operation @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+ -+} -+ -+ -+/** -+ * Prototype Routine for the CSMI SAS Task Managment Config command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_task_managment(unsigned long arg) -+{ -+ CSMI_SAS_SSP_TASK_IU_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_SSP_TASK_IU_BUFFER karg; -+ pSCSITaskMgmt_t pScsiTm; -+ pSCSITaskMgmtReply_t pScsiTmReply; -+ MPT_ADAPTER *ioc = NULL; -+ MPT_SCSI_HOST *hd; -+ MPT_FRAME_HDR *mf = NULL; -+ MPIHeader_t *mpi_hdr; -+ int iocnum; -+ u8 taskType; -+ u8 channel; -+ u8 id; -+ u8 queueTag; -+ u32 TaskMsgContext = 0; -+ int i; -+ u8 found_qtag; -+ struct sas_device_info *sas_info; -+ u16 ioc_status; -+ u32 MsgContext; -+ -+ if (copy_from_user(&karg, uarg, sizeof(CSMI_SAS_SSP_TASK_IU_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_task_managment struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; -+ -+ sas_info = csmisas_get_device_component_by_os(ioc, -+ karg.Parameters.bPathId, karg.Parameters.bTargetId); -+ if (!sas_info || sas_info->is_cached || sas_info->is_logical_volume) -+ goto cim_get_task_managment_exit; -+ -+ channel = sas_info->fw.channel; -+ id = sas_info->fw.id; -+ queueTag = (u8)karg.Parameters.uQueueTag & 0xFF; -+ hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; -+ -+ /* try to catch an error -+ */ -+ if ((karg.Parameters.uFlags & CSMI_SAS_TASK_IU) && -+ (karg.Parameters.uFlags & CSMI_SAS_HARD_RESET_SEQUENCE)) -+ goto cim_get_task_managment_exit; -+ -+ if (karg.Parameters.uFlags & CSMI_SAS_TASK_IU) { -+ switch (karg.Parameters.bTaskManagementFunction) { -+ -+ case CSMI_SAS_SSP_ABORT_TASK: -+ taskType = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; -+ break; -+ case CSMI_SAS_SSP_ABORT_TASK_SET: -+ taskType = MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET; -+ break; -+ case CSMI_SAS_SSP_CLEAR_TASK_SET: -+ taskType = MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET; -+ break; -+ case CSMI_SAS_SSP_LOGICAL_UNIT_RESET: -+ taskType = MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET; -+ break; -+ case CSMI_SAS_SSP_CLEAR_ACA: -+ case CSMI_SAS_SSP_QUERY_TASK: -+ default: -+ goto cim_get_task_managment_exit; -+ } -+ } else if (karg.Parameters.uFlags & CSMI_SAS_HARD_RESET_SEQUENCE) -+ taskType = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET; -+ else -+ goto cim_get_task_managment_exit; -+ -+ switch (karg.Parameters.uInformation) { -+ case CSMI_SAS_SSP_TEST: -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "TM request for test purposes\n")); -+ break; -+ case CSMI_SAS_SSP_EXCEEDED: -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "TM request due to timeout\n")); -+ break; -+ case CSMI_SAS_SSP_DEMAND: -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "TM request demanded by app\n")); -+ break; -+ case CSMI_SAS_SSP_TRIGGER: -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "TM request sent to trigger event\n")); -+ break; -+ } -+ -+ switch (taskType) { -+ -+ case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK: -+ /* -+ * look up qtag in the ScsiLookup[] table -+ */ -+ for (i = 0, found_qtag = 0; i < hd->ioc->req_depth; i++) { -+ if ((ioc->ScsiLookup[i]) && -+ (ioc->ScsiLookup[i]->tag == queueTag)) { -+ mf = MPT_INDEX_2_MFPTR(hd->ioc, i); -+ TaskMsgContext = -+ mf->u.frame.hwhdr.msgctxu.MsgContext; -+ found_qtag=1; -+ break; -+ } -+ } -+ -+ if(!found_qtag) -+ goto cim_get_task_managment_exit; -+ -+ case MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: -+ case MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: -+ case MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET: -+ /* for now, this should work -+ */ -+ case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET: -+ -+ /* Single threading .... -+ */ -+ mutex_lock(&ioc->taskmgmt_cmds.mutex); -+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { -+ mutex_unlock(&ioc->taskmgmt_cmds.mutex); -+ karg.IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_FAILED; -+ goto cim_get_task_managment_exit; -+ } -+ /* Send request -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ mutex_unlock(&ioc->taskmgmt_cmds.mutex); -+ mpt_clear_taskmgmt_in_progress_flag(ioc); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_task_managment_exit; -+ } -+ -+ mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; -+ pScsiTm = (pSCSITaskMgmt_t ) mf; -+ -+ memset(pScsiTm,0,sizeof(SCSITaskMgmt_t)); -+ pScsiTm->TaskType = taskType; -+ pScsiTm->Bus = channel; -+ pScsiTm->TargetID = id; -+ int_to_scsilun(karg.Parameters.bLun, -+ (struct scsi_lun *)pScsiTm->LUN); -+ pScsiTm->MsgContext = MsgContext; -+ pScsiTm->TaskMsgContext = TaskMsgContext; -+ pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; -+ -+ if (csmisas_send_handshake_wait(ioc, mf, -+ karg.IoctlHeader.Timeout) != 0) { -+ mutex_unlock(&ioc->taskmgmt_cmds.mutex); -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_get_task_managment_exit; -+ } -+ -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { -+ -+ pScsiTmReply = -+ (pSCSITaskMgmtReply_t ) ioc->ioctl_cmds.reply; -+ -+ ioc_status = le16_to_cpu(pScsiTmReply->IOCStatus) -+ & MPI_IOCSTATUS_MASK; -+ -+ memset(&karg.Status,0, -+ sizeof(CSMI_SAS_SSP_PASSTHRU_STATUS)); -+ -+ if(ioc_status == MPI_IOCSTATUS_SUCCESS) { -+ karg.IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_SUCCESS; -+ karg.Status.bSSPStatus = -+ CSMI_SAS_SSP_STATUS_COMPLETED; -+ }else if(ioc_status == MPI_IOCSTATUS_INSUFFICIENT_RESOURCES) { -+ karg.IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_SUCCESS; -+ karg.Status.bSSPStatus = -+ CSMI_SAS_SSP_STATUS_RETRY; -+ }else { -+ karg.IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_FAILED; -+ karg.Status.bSSPStatus = -+ CSMI_SAS_SSP_STATUS_FATAL_ERROR; -+ } -+ } else -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ -+ break; -+ -+ default: -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; -+ break; -+ } -+ -+ mutex_unlock(&ioc->taskmgmt_cmds.mutex); -+ -+ cim_get_task_managment_exit: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_SSP_TASK_IU_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_task_managment @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * map_sas_status_to_csmi - Conversion for Connection Status -+ * @mpi_sas_status: Sas status returned by the firmware -+ * -+ * Returns converted connection status -+ * -+ **/ -+static u8 -+map_sas_status_to_csmi(u8 mpi_sas_status) -+{ -+ u8 csmi_connect_status; -+ -+ switch (mpi_sas_status) { -+ -+ case MPI_SASSTATUS_SUCCESS: -+ csmi_connect_status = CSMI_SAS_OPEN_ACCEPT; -+ break; -+ -+ case MPI_SASSTATUS_UTC_BAD_DEST: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_BAD_DESTINATION; -+ break; -+ -+ case MPI_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_RATE_NOT_SUPPORTED; -+ break; -+ -+ case MPI_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED: -+ csmi_connect_status = -+ CSMI_SAS_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED; -+ break; -+ -+ case MPI_SASSTATUS_UTC_STP_RESOURCES_BUSY: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_STP_RESOURCES_BUSY; -+ break; -+ -+ case MPI_SASSTATUS_UTC_WRONG_DESTINATION: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_WRONG_DESTINATION; -+ break; -+ -+ case MPI_SASSTATUS_SDSF_NAK_RECEIVED: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_RETRY; -+ break; -+ -+ case MPI_SASSTATUS_SDSF_CONNECTION_FAILED: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_PATHWAY_BLOCKED; -+ break; -+ -+ case MPI_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_NO_DESTINATION; -+ break; -+ -+ case MPI_SASSTATUS_UNKNOWN_ERROR: -+ case MPI_SASSTATUS_INVALID_FRAME: -+ case MPI_SASSTATUS_UTC_BREAK_RECEIVED: -+ case MPI_SASSTATUS_UTC_PORT_LAYER_REQUEST: -+ case MPI_SASSTATUS_SHORT_INFORMATION_UNIT: -+ case MPI_SASSTATUS_LONG_INFORMATION_UNIT: -+ case MPI_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA: -+ case MPI_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR: -+ case MPI_SASSTATUS_XFER_RDY_NOT_EXPECTED: -+ case MPI_SASSTATUS_DATA_INCORRECT_DATA_LENGTH: -+ case MPI_SASSTATUS_DATA_TOO_MUCH_READ_DATA: -+ case MPI_SASSTATUS_DATA_OFFSET_ERROR: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_RESERVE_STOP; -+ break; -+ -+ default: -+ csmi_connect_status = CSMI_SAS_OPEN_REJECT_RESERVE_STOP; -+ break; -+ } -+ -+ return csmi_connect_status; -+} -+ -+/** -+ * csmisas_phy_reset -+ * Issues a phy link reset or phy hard reset -+ * -+ * @ioc - Pointer to MPT_ADAPTER structure -+ * @PhyNum - phy number -+ * @opcode - {MPI_SAS_OP_PHY_LINK_RESET,MPI_SAS_OP_PHY_HARD_RESET} -+ * -+ * Returns: 0 for success, non-zero error -+ **/ -+static int -+csmisas_phy_reset(MPT_ADAPTER *ioc, u8 PhyNum, u8 opcode) -+{ -+ SasIoUnitControlRequest_t *sasIoUnitCntrReq; -+ SasIoUnitControlReply_t *sasIoUnitCntrReply; -+ MPT_FRAME_HDR *mf = NULL; -+ MPIHeader_t *mpi_hdr; -+ u16 ioc_status; -+ u32 MsgContext; -+ -+ if ((opcode != MPI_SAS_OP_PHY_LINK_RESET) && -+ (opcode != MPI_SAS_OP_PHY_HARD_RESET)) -+ return -1; -+ -+ /* Get a MF for this command. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": no msg frames!\n")); -+ return -1; -+ } -+ -+ mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; -+ sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf; -+ memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t)); -+ sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL; -+ sasIoUnitCntrReq->MsgContext = MsgContext; -+ sasIoUnitCntrReq->Operation = opcode; -+ sasIoUnitCntrReq->PhyNum = PhyNum; -+ -+ if (csmisas_send_command_wait(ioc, mf, MPT_IOCTL_DEFAULT_TIMEOUT) != 0) -+ return -1; -+ -+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) == 0) -+ return -1; -+ -+ /* process the completed Reply Message Frame */ -+ sasIoUnitCntrReply = (SasIoUnitControlReply_t *)ioc->ioctl_cmds.reply; -+ ioc_status = le16_to_cpu(sasIoUnitCntrReply->IOCStatus) -+ & MPI_IOCSTATUS_MASK; -+ if (ioc_status != MPI_IOCSTATUS_SUCCESS) { -+ printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", -+ __FUNCTION__, -+ sasIoUnitCntrReply->IOCStatus, -+ sasIoUnitCntrReply->IOCLogInfo); -+ return -1; -+ } -+ return 0; -+} -+ -+/** Prototype Routine for the CSMI SAS Phy Control command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_phy_control(unsigned long arg) -+{ -+ CSMI_SAS_PHY_CONTROL_BUFFER __user *uarg = (void __user *) arg; -+ IOCTL_HEADER ioctl_header; -+ PCSMI_SAS_PHY_CONTROL_BUFFER karg; -+ SasIOUnitPage0_t *sasIoUnitPg0=NULL; -+ dma_addr_t sasIoUnitPg0_dma; -+ int sasIoUnitPg0_data_sz=0; -+ SasIOUnitPage1_t *sasIoUnitPg1=NULL; -+ dma_addr_t sasIoUnitPg1_dma; -+ int sasIoUnitPg1_data_sz=0; -+ ConfigExtendedPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum; -+ int csmi_sas_phy_control_buffer_sz; -+ int memory_pages; -+ -+ if (copy_from_user(&ioctl_header, uarg, sizeof(IOCTL_HEADER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in IOCTL_HEADER" -+ "struct @ %p\n", __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ csmi_sas_phy_control_buffer_sz = ioctl_header.Length; -+ memory_pages = get_order(csmi_sas_phy_control_buffer_sz); -+ karg = (PCSMI_SAS_PHY_CONTROL_BUFFER)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!karg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc SAS_PHY_CONTROL_BUFFER " -+ "csmi_sas_phy_control_buffer_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ csmi_sas_phy_control_buffer_sz, memory_pages); -+ return -ENOMEM; -+ } -+ memset(karg, 0, sizeof(*karg)); -+ -+ if (copy_from_user(karg, uarg, csmi_sas_phy_control_buffer_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_phy_control_buffer " -+ "struct @ %p\n", __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(ioctl_header.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ if (karg->bPhyIdentifier >= ioc->num_ports) { -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ /* -+ * Retreive SAS IOUNIT PAGE 0 -+ */ -+ -+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 0; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.pageAddr = 0; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: READ MPI_SASIOUNITPAGE0: HEADER\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": hdr.ExtPageLength == 0\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ sasIoUnitPg0_data_sz = hdr.ExtPageLength * 4; -+ sasIoUnitPg0 = (SasIOUnitPage0_t *) pci_alloc_consistent(ioc->pcidev, -+ sasIoUnitPg0_data_sz, &sasIoUnitPg0_dma); -+ -+ if (!sasIoUnitPg0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ memset((u8 *)sasIoUnitPg0, 0, sasIoUnitPg0_data_sz); -+ cfg.physAddr = sasIoUnitPg0_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: READ MPI_SASIOUNITPAGE0: CURRENT\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ /* -+ * Retreive SAS IOUNIT PAGE 1 -+ */ -+ -+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 1; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.pageAddr = 0; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: READ MPI_SASIOUNITPAGE1: HEADER\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": hdr.ExtPageLength == 0\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ sasIoUnitPg1_data_sz = hdr.ExtPageLength * 4; -+ sasIoUnitPg1 = (SasIOUnitPage1_t *) pci_alloc_consistent(ioc->pcidev, -+ sasIoUnitPg1_data_sz, &sasIoUnitPg1_dma); -+ -+ if (!sasIoUnitPg1) { -+ dcsmisasprintk(ioc, printk(KERN_ERR ": pci_alloc_consistent: FAILED\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ memset((u8 *)sasIoUnitPg1, 0, sasIoUnitPg1_data_sz); -+ cfg.physAddr = sasIoUnitPg1_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: READ MPI_SASIOUNITPAGE1: CURRENT\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ -+ switch (karg->uFunction) { -+ -+ case CSMI_SAS_PC_LINK_RESET: -+ case CSMI_SAS_PC_HARD_RESET: -+ { -+ u8 opcode = (karg->uFunction==CSMI_SAS_PC_LINK_RESET) ? -+ MPI_SAS_OP_PHY_LINK_RESET : MPI_SAS_OP_PHY_HARD_RESET; -+ -+ if((karg->uLinkFlags & CSMI_SAS_PHY_ACTIVATE_CONTROL) && -+ (karg->usLengthOfControl >= sizeof(CSMI_SAS_PHY_CONTROL)) && -+ (karg->bNumberOfControls > 0)){ -+ if(karg->Control[0].bRate == -+ CSMI_SAS_LINK_RATE_1_5_GBPS) { -+ sasIoUnitPg1->PhyData[karg->bPhyIdentifier].MaxMinLinkRate = -+ MPI_SAS_IOUNIT1_MAX_RATE_1_5 | -+ MPI_SAS_IOUNIT1_MIN_RATE_1_5; -+ } -+ else if(karg->Control[0].bRate == -+ CSMI_SAS_LINK_RATE_3_0_GBPS) { -+ sasIoUnitPg1->PhyData[karg->bPhyIdentifier].MaxMinLinkRate = -+ MPI_SAS_IOUNIT1_MAX_RATE_3_0 | -+ MPI_SAS_IOUNIT1_MIN_RATE_3_0; -+ } -+ sasIoUnitPg1->PhyData[karg->bPhyIdentifier].PhyFlags &= -+ ~MPI_SAS_IOUNIT1_PHY_FLAGS_PHY_DISABLE; -+ cfg.dir = 1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM; -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: WRITE MPI_SASIOUNITPAGE1 NVRAM\n")); -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: WRITE MPI_SASIOUNITPAGE1 CURRENT\n")); -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ } -+ if (csmisas_phy_reset(ioc, -+ karg->bPhyIdentifier, opcode) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: csmisas_phy_reset\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ break; -+ -+ } -+ case CSMI_SAS_PC_PHY_DISABLE: -+ if(karg->usLengthOfControl || karg->bNumberOfControls) { -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ break; -+ } -+ sasIoUnitPg1->PhyData[karg->bPhyIdentifier].PhyFlags |= -+ MPI_SAS_IOUNIT1_PHY_FLAGS_PHY_DISABLE; -+ cfg.dir = 1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM; -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: WRITE MPI_SASIOUNITPAGE1 NVRAM\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; -+ if (mpt_config(ioc, &cfg) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: WRITE MPI_SASIOUNITPAGE1 CURRENT\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ if (csmisas_phy_reset(ioc, -+ karg->bPhyIdentifier, MPI_SAS_OP_PHY_HARD_RESET) != 0) { -+ dcsmisasprintk(ioc, printk(KERN_ERR -+ ": FAILED: csmisas_phy_reset\n")); -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_phy_control_exit; -+ } -+ break; -+ -+ case CSMI_SAS_PC_GET_PHY_SETTINGS: -+ if(karg->usLengthOfControl || karg->bNumberOfControls) { -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ break; -+ } -+ if(csmi_sas_phy_control_buffer_sz < -+ offsetof(CSMI_SAS_PHY_CONTROL_BUFFER,Control) + -+ (4* sizeof(CSMI_SAS_PHY_CONTROL))) { -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_INVALID_PARAMETER; -+ break; -+ } -+ karg->usLengthOfControl = sizeof(CSMI_SAS_PHY_CONTROL); -+ karg->bNumberOfControls = 4; -+ karg->Control[0].bType = CSMI_SAS_SAS; -+ karg->Control[0].bRate = CSMI_SAS_LINK_RATE_1_5_GBPS; -+ karg->Control[1].bType = CSMI_SAS_SAS; -+ karg->Control[1].bRate = CSMI_SAS_LINK_RATE_3_0_GBPS; -+ karg->Control[2].bType = CSMI_SAS_SATA; -+ karg->Control[2].bRate = CSMI_SAS_LINK_RATE_1_5_GBPS; -+ karg->Control[3].bType = CSMI_SAS_SATA; -+ karg->Control[3].bRate = CSMI_SAS_LINK_RATE_3_0_GBPS; -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ break; -+ default: -+ break; -+ } -+ -+ cim_sas_phy_control_exit: -+ -+ if (sasIoUnitPg0) -+ pci_free_consistent(ioc->pcidev, sasIoUnitPg0_data_sz, -+ (u8 *) sasIoUnitPg0, sasIoUnitPg0_dma); -+ -+ if (sasIoUnitPg1) -+ pci_free_consistent(ioc->pcidev, sasIoUnitPg1_data_sz, -+ (u8 *) sasIoUnitPg1, sasIoUnitPg1_dma); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, karg,csmi_sas_phy_control_buffer_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_phy_control_buffer @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ free_pages((unsigned long)karg, memory_pages); -+ return 0; -+} -+ -+/** -+ * csmisas_get_manuf_pg_7 - Fetch Manufacturing config Page7. -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @mfgpage7_buffer: pointer to ManufacturingPage7_t that returns config -+ * page data -+ * @mfg_size - max size of buffer -+ * -+ * Return: 0 for success -+ * -ENOMEM if no memory available -+ * -EPERM if not allowed due to ISR context -+ * -EAGAIN if no msg frames currently available -+ * -EFAULT for non-successful reply or no reply (timeout) -+ **/ -+static int -+csmisas_get_manuf_pg_7(MPT_ADAPTER *ioc, ManufacturingPage7_t *mfgpage7_buffer, int mfg_size) -+{ -+ ConfigPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ ManufacturingPage7_t *mfgPage7 = NULL; -+ dma_addr_t mfgPage7_dma; -+ int data_sz = 0; -+ int rc; -+ -+ /* Get Manufacturing Page 7 header */ -+ hdr.PageVersion = MPI_MANUFACTURING0_PAGEVERSION; -+ hdr.PageLength = 0; -+ hdr.PageNumber = 7; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; -+ cfg.cfghdr.hdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; -+ cfg.pageAddr = 0; -+ cfg.timeout = 0; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) -+ goto csmisas_get_manuf_pg_7_exit; -+ -+ if (hdr.PageLength == 0) { -+ rc = -EFAULT; -+ goto csmisas_get_manuf_pg_7_exit; -+ } -+ -+ data_sz = hdr.PageLength * 4; -+ mfgPage7 = pci_alloc_consistent(ioc->pcidev, data_sz, &mfgPage7_dma); -+ if (!mfgPage7) { -+ rc = -ENOMEM; -+ goto csmisas_get_manuf_pg_7_exit; -+ } -+ -+ memset((u8 *)mfgPage7, 0, data_sz); -+ cfg.physAddr = mfgPage7_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) -+ goto csmisas_get_manuf_pg_7_exit; -+ -+ /* copy buffer back to user */ -+ memcpy(mfgpage7_buffer, mfgPage7, min(data_sz, mfg_size)); -+ -+ csmisas_get_manuf_pg_7_exit: -+ -+ if (mfgPage7) -+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *)mfgPage7, -+ mfgPage7_dma); -+ -+ return rc; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Get Connector info command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ **/ -+static int -+csmisas_get_connector_info(unsigned long arg) -+{ -+ CSMI_SAS_CONNECTOR_INFO_BUFFER __user *uarg = (void __user *) arg; -+ CSMI_SAS_CONNECTOR_INFO_BUFFER karg; -+ MPT_ADAPTER *ioc = NULL; -+ ManufacturingPage7_t *mfgPg7 = NULL; -+ int mfgPg7_sz; -+ int iocnum; -+ int i; -+ -+ if (copy_from_user(&karg, uarg, -+ sizeof(CSMI_SAS_CONNECTOR_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_connector_info_buffer" -+ " struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ karg.IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ -+ /* `32` is the sizeof MPI_MANPAGE7_CONNECTOR_INFO */ -+ for (i = 0; i < 32; i++) { -+ karg.Reference[i].uPinout = CSMI_SAS_CON_UNKNOWN; -+ strcpy(karg.Reference[i].bConnector,""); -+ karg.Reference[i].bLocation = CSMI_SAS_CON_UNKNOWN; -+ } -+ -+ mfgPg7_sz = offsetof(CONFIG_PAGE_MANUFACTURING_7,ConnectorInfo) + -+ (ioc->num_ports * sizeof(MPI_MANPAGE7_CONNECTOR_INFO)); -+ mfgPg7 = kmalloc(mfgPg7_sz, GFP_KERNEL); -+ if (!mfgPg7){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, mfgPg7); -+ return -EFAULT; -+ } -+ memset(mfgPg7, 0, mfgPg7_sz); -+ -+ if (!csmisas_get_manuf_pg_7(ioc, mfgPg7, mfgPg7_sz)) { -+ for (i = 0; i < ioc->num_ports; i++) { -+ karg.Reference[i].uPinout = -+ le32_to_cpu(mfgPg7->ConnectorInfo[i].Pinout); -+ /*endian conversion , this is u8 * 16 ?? */ -+ strncpy(karg.Reference[i].bConnector, -+ mfgPg7->ConnectorInfo[i].Connector, 16); -+ karg.Reference[i].bLocation = -+ mfgPg7->ConnectorInfo[i].Location; -+ } -+ } -+ -+ kfree(mfgPg7); -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, &karg, -+ sizeof(CSMI_SAS_CONNECTOR_INFO_BUFFER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_connector_info_buffer @" -+ "%p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return 0; -+} -+ -+/** -+ * csmisas_fill_location_data -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ **/ -+static int -+csmisas_fill_location_data(MPT_ADAPTER *ioc, u8 bus, u8 id, u8 opcode, -+ CSMI_SAS_LOCATION_IDENTIFIER * location_ident) -+{ -+ -+ ConfigExtendedPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ int rc; -+ SasDevicePage0_t *sasDevicePg0=NULL; -+ SasEnclosurePage0_t *sasEnclosurePg0=NULL; -+ dma_addr_t sasDevicePg0_dma,sasEnclosurePg0_dma; -+ int sasDevicePg0_data_sz=0; -+ int sasEnclosurePg0_data_sz=0; -+ u64 sas_address; -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ memset (location_ident, 0, sizeof(*location_ident)); -+ -+ /* SAS Device Page 0 */ -+ hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 0; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ rc=-1; -+ goto fill_location_data_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ rc=-1; -+ goto fill_location_data_exit; -+ } -+ -+ sasDevicePg0_data_sz = hdr.ExtPageLength * 4; -+ sasDevicePg0 = (SasDevicePage0_t *) pci_alloc_consistent( -+ ioc->pcidev, sasDevicePg0_data_sz, &sasDevicePg0_dma); -+ if (!sasDevicePg0) { -+ rc=-1; -+ goto fill_location_data_exit; -+ } -+ -+ memset((u8 *)sasDevicePg0, 0, sasDevicePg0_data_sz); -+ cfg.physAddr = sasDevicePg0_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.pageAddr = (bus << 8) + id -+ + (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << -+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT); -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ rc=-1; -+ goto fill_location_data_exit; -+ } -+ -+ location_ident->bLocationFlags |= CSMI_SAS_LOCATE_SAS_ADDRESS_VALID; -+ memcpy(&sas_address, &sasDevicePg0->SASAddress, sizeof(u64)); -+ sas_address = reverse_byte_order64(sas_address); -+ memcpy(location_ident->bSASAddress, &sas_address, sizeof(u64)); -+ -+ location_ident->bLocationFlags |= CSMI_SAS_LOCATE_SAS_LUN_VALID; -+ memset(location_ident->bSASLun, 0, sizeof(location_ident->bSASLun)); -+ -+ /* SAS Enclosure Page 0 */ -+ hdr.PageVersion = MPI_SASENCLOSURE0_PAGEVERSION; -+ hdr.ExtPageLength = 0; -+ hdr.PageNumber = 0; -+ hdr.Reserved1 = 0; -+ hdr.Reserved2 = 0; -+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; -+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_ENCLOSURE; -+ -+ cfg.cfghdr.ehdr = &hdr; -+ cfg.physAddr = -1; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; /* read */ -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ rc=0; -+ goto fill_location_data_exit; -+ } -+ -+ if (hdr.ExtPageLength == 0) { -+ rc=0; -+ goto fill_location_data_exit; -+ } -+ -+ sasEnclosurePg0_data_sz = hdr.ExtPageLength * 4; -+ sasEnclosurePg0 = (SasEnclosurePage0_t *) pci_alloc_consistent( -+ ioc->pcidev, sasEnclosurePg0_data_sz, &sasEnclosurePg0_dma); -+ if (!sasEnclosurePg0) { -+ rc=0; -+ goto fill_location_data_exit; -+ } -+ cfg.physAddr = sasEnclosurePg0_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.pageAddr = sasDevicePg0->EnclosureHandle -+ + (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE << MPI_SAS_ENCLOS_PGAD_FORM_SHIFT); -+ -+ if ((rc = mpt_config(ioc, &cfg)) != 0) { -+ rc=0; -+ goto fill_location_data_exit; -+ } -+ -+ location_ident->bLocationFlags |= CSMI_SAS_LOCATE_ENCLOSURE_IDENTIFIER_VALID; -+ memcpy(&sas_address, &sasEnclosurePg0->EnclosureLogicalID, sizeof(u64)); -+ sas_address = reverse_byte_order64(sas_address); -+ if (sas_address) -+ memcpy(location_ident->bEnclosureIdentifier, &sas_address, sizeof(u64)); -+ else -+ strcpy(location_ident->bEnclosureIdentifier,"Internal"); -+ -+// bBayPrefix - not supported -+ -+// TODO - We need to look at sasEnclosurePg0-.Flags , to determine -+// whether SEP BUS/TargetID is valid. Ifs its a SES device, then -+// issue internal inquiry to (bus/id) to gather the Enclosure name. -+// If the device is SMP, then issue SMP_MANUFACTURING to get enclosure name -+// If its direct attached, there is no enclosure name -+ location_ident->bLocationFlags |= CSMI_SAS_LOCATE_ENCLOSURE_NAME_VALID; -+ strcpy(location_ident->bEnclosureName,"Not Supported"); -+ -+ location_ident->bLocationFlags |= CSMI_SAS_LOCATE_LOCATION_STATE_VALID; -+ location_ident->bLocationState = CSMI_SAS_LOCATE_UNKNOWN; -+ -+ location_ident->bLocationFlags |= CSMI_SAS_LOCATE_BAY_IDENTIFIER_VALID; -+ location_ident->bBayIdentifier = le16_to_cpu(sasDevicePg0->Slot); -+ -+ -+// TODO - illuminating LEDs, -+// karg->bIdentify = CSMI_SAS_LOCATE_FORCE_OFF, CSMI_SAS_LOCATE_FORCE_ON -+// We can enable/disable LEDs by SCSI Enclosure Processor MPI request message -+// printk("Flags=0x%x\n",sasEnclosurePg0->Flags); -+ -+/* check sasEnclosurePg0->Flags - -+ * to validate whether we need to send the SEPRequest -+ * bit:5 should be set -+ * bit:3-0 any bit should be set. If zero, then SEPRequest will fail -+*/ -+ -+/* MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR -+ * Look in mpi_init.h -+ * SEPRequest_t = structure -+ * -+ * SEPRequest_t->Action should be set to MPI_SEP_REQ_ACTION_WRITE_STATUS -+ * -+ * SEPRequest_t->Flags should be set to -+ * MPI_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS, to pass along enclosure/slot ids -+ * -+ * SEPRequest_t->SlotStatus |= MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST - this -+ * will illuminate the LEDs -+ */ -+ -+fill_location_data_exit: -+ -+ if (sasDevicePg0 != NULL) -+ pci_free_consistent(ioc->pcidev, sasDevicePg0_data_sz, -+ sasDevicePg0, sasDevicePg0_dma); -+ -+ if (sasEnclosurePg0 != NULL) -+ pci_free_consistent(ioc->pcidev, sasEnclosurePg0_data_sz, -+ sasEnclosurePg0, sasEnclosurePg0_dma); -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ return rc; -+} -+ -+static int -+csmisas_fill_location_data_raid(MPT_ADAPTER *ioc, PCSMI_SAS_GET_LOCATION_BUFFER karg, u8 VolumeBus, -+ u8 volumeID) -+{ -+ pRaidVolumePage0_t pVolume0 = NULL; -+ pRaidPhysDiskPage0_t pPhysDisk0 = NULL; -+ CONFIGPARMS cfg; -+ ConfigPageHeader_t header; -+ u8 physDiskNumMax; -+ int volumepage0sz = 0, physdiskpage0sz = 0; -+ dma_addr_t volume0_dma, physdisk0_dma; -+ int csmi_sas_get_location_sz; -+ int rc = 0, i, idx; -+ int num_hotpares; -+ u64 totalMaxLBA, tmpTotalMaxLBA; -+ IOCPage5_t *iocPage5 = NULL; -+ u32 device_info = 0; -+ struct sas_device_info *sas_info; -+ int sz; -+ -+ csmi_sas_get_location_sz = karg->IoctlHeader.Length; -+ physDiskNumMax = (csmi_sas_get_location_sz - -+ offsetof(CSMI_SAS_GET_LOCATION_BUFFER,Location)) -+ / sizeof(CSMI_SAS_LOCATION_IDENTIFIER); -+ karg->bNumberOfLocationIdentifiers=0; -+ -+ /* -+ * get RAID Volume Page 0 -+ */ -+ -+ header.PageVersion = 0; -+ header.PageLength = 0; -+ header.PageNumber = 0; -+ header.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME; -+ cfg.cfghdr.hdr = &header; -+ cfg.physAddr = -1; -+ cfg.pageAddr = (VolumeBus << 8) + volumeID; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ if (mpt_config(ioc, &cfg) != 0) { -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ -+ if (header.PageLength == 0) { -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ -+ volumepage0sz = header.PageLength * 4; -+ pVolume0 = pci_alloc_consistent(ioc->pcidev, volumepage0sz, -+ &volume0_dma); -+ if (!pVolume0) { -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.physAddr = volume0_dma; -+ if (mpt_config(ioc, &cfg) != 0){ -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ -+ totalMaxLBA = (u64)le32_to_cpu(pVolume0->MaxLBA) | -+ ((u64)le32_to_cpu(pVolume0->MaxLBAHigh)) << 32; -+ -+ /* -+ * get RAID Physical Disk Page 0 -+ */ -+ header.PageVersion = 0; -+ header.PageLength = 0; -+ header.PageNumber = 0; -+ header.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; -+ cfg.cfghdr.hdr = &header; -+ cfg.physAddr = -1; -+ cfg.pageAddr = 0; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; -+ cfg.timeout = MPT_IOCTL_DEFAULT_TIMEOUT; -+ if (mpt_config(ioc, &cfg) != 0) { -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ -+ if (header.PageLength == 0) { -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ -+ physdiskpage0sz = header.PageLength * 4; -+ pPhysDisk0 = pci_alloc_consistent(ioc->pcidev, physdiskpage0sz, -+ &physdisk0_dma); -+ if (!pPhysDisk0) { -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ cfg.physAddr = physdisk0_dma; -+ -+ for (i=0; i < min(pVolume0->NumPhysDisks, physDiskNumMax); i++) { -+ -+ /* obtain a refresh of pPhysDisk0 */ -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.pageAddr = pVolume0->PhysDisk[i].PhysDiskNum; -+ if (mpt_config(ioc, &cfg) != 0){ -+ rc = -1; -+ goto sas_fill_location_data_raid_exit; -+ } -+ -+ if((csmisas_fill_location_data(ioc, pPhysDisk0->PhysDiskBus, -+ pPhysDisk0->PhysDiskID, karg->bIdentify, -+ &karg->Location[karg->bNumberOfLocationIdentifiers])) == 0) -+ karg->bNumberOfLocationIdentifiers++; -+ -+ if (device_info) -+ continue; -+ sas_info = csmisas_get_device_component_by_fw(ioc, -+ pPhysDisk0->PhysDiskBus, pPhysDisk0->PhysDiskID); -+ if (!sas_info || sas_info->is_cached) -+ continue; -+ device_info = sas_info->device_info; -+ } -+ -+ if (pVolume0->VolumeType == MPI_RAID_VOL_TYPE_IS) -+ goto sas_fill_location_data_raid_exit; -+ -+ /* -+ * hot spare support -+ * -+ */ -+ -+ num_hotpares = csmisas_get_number_hotspares(ioc); -+ -+ if (num_hotpares) { -+ -+ sz = offsetof(IOCPage5_t, HotSpare) + -+ num_hotpares * sizeof(IOC_5_HOT_SPARE); -+ iocPage5 = kmalloc(sz, GFP_KERNEL); -+ -+ if (!iocPage5) -+ goto sas_fill_location_data_raid_exit; -+ memset(iocPage5, 0, sizeof(*iocPage5)); -+ -+ if (csmisas_get_ioc_pg5(ioc, iocPage5, sz) != 0) -+ goto sas_fill_location_data_raid_exit; -+ -+ for(i = 0, idx = pVolume0->NumPhysDisks ; i < num_hotpares; -+ i++, idx++) { -+ -+ if (idx >= physDiskNumMax) -+ break; -+ -+ /* obtain a refresh of pPhysDisk0 */ -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ cfg.pageAddr = iocPage5->HotSpare[i].PhysDiskNum; -+ if (mpt_config(ioc, &cfg) != 0) -+ goto sas_fill_location_data_raid_exit; -+ -+ /* Search the list for the matching SAS address. */ -+ sas_info = csmisas_get_device_component_by_fw(ioc, -+ pPhysDisk0->PhysDiskBus, pPhysDisk0->PhysDiskID); -+ -+ if (!sas_info || sas_info->is_cached) -+ continue; -+ -+ /* don't mix SSP hot spare -+ * in SATA volume -+ */ -+ if (!csmisas_is_sata(pPhysDisk0) && -+ (device_info & -+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)) -+ continue; -+ -+ /* don't mix SATA hot spare -+ * in SSP volume -+ */ -+ if (csmisas_is_sata(pPhysDisk0) && -+ (device_info & -+ MPI_SAS_DEVICE_INFO_SSP_TARGET)) -+ continue; -+ -+ /* capacity check for IM volumes*/ -+ if ((pVolume0->VolumeType == -+ MPI_RAID_VOL_TYPE_IM) && -+ (totalMaxLBA + -+ (64*2*1024) /* metadata = 64MB*/ > -+ le32_to_cpu(pPhysDisk0->MaxLBA))) -+ continue; -+ -+ tmpTotalMaxLBA = totalMaxLBA; -+ do_div(tmpTotalMaxLBA, pVolume0->NumPhysDisks); -+ /* capacity check for IME volumes*/ -+ if ((pVolume0->VolumeType == -+ MPI_RAID_VOL_TYPE_IME) && -+ ((tmpTotalMaxLBA * 2) + -+ (64*2*1024 ) /*metadata = 64MB*/ > -+ le32_to_cpu(pPhysDisk0->MaxLBA))) -+ continue; -+ -+ if((csmisas_fill_location_data(ioc, -+ pPhysDisk0->PhysDiskBus, pPhysDisk0->PhysDiskID, -+ karg->bIdentify, -+ &karg->Location[karg->bNumberOfLocationIdentifiers])) == 0) -+ karg->bNumberOfLocationIdentifiers++; -+ } -+ } -+ -+ -+ sas_fill_location_data_raid_exit: -+ -+ kfree(iocPage5); -+ -+ if (pVolume0) -+ pci_free_consistent(ioc->pcidev, volumepage0sz, pVolume0, -+ volume0_dma); -+ -+ if(pPhysDisk0) -+ pci_free_consistent(ioc->pcidev, physdiskpage0sz, pPhysDisk0, -+ physdisk0_dma); -+ -+ return rc; -+} -+ -+/** -+ * Prototype Routine for the CSMI SAS Get location command. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -ENODEV if no such device/adapter -+ */ -+static int -+csmisas_get_location(unsigned long arg) -+{ -+ CSMI_SAS_GET_LOCATION_BUFFER __user *uarg = (void __user *) arg; -+ PCSMI_SAS_GET_LOCATION_BUFFER karg; -+ IOCTL_HEADER ioctl_header; -+ MPT_ADAPTER *ioc = NULL; -+ int iocnum,i; -+ int csmi_sas_get_location_sz; -+ int memory_pages; -+ struct sas_device_info *sas_info; -+ -+ if (copy_from_user(&ioctl_header, uarg, sizeof(IOCTL_HEADER))) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in IOCTL_HEADER" -+ "struct @ %p\n", __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ csmi_sas_get_location_sz = ioctl_header.Length; -+ memory_pages = get_order(csmi_sas_get_location_sz); -+ karg = (PCSMI_SAS_GET_LOCATION_BUFFER)__get_free_pages( -+ GFP_KERNEL, memory_pages); -+ if (!karg){ -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to malloc GET_LOCATION_BUFFER " -+ "csmi_sas_get_location_sz=%d memory_pages=%d\n", -+ __FILE__, __LINE__, __FUNCTION__, -+ csmi_sas_get_location_sz, memory_pages); -+ return -ENOMEM; -+ } -+ memset(karg, 0, sizeof(*karg)); -+ -+ if (copy_from_user(karg, uarg, csmi_sas_get_location_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to read in csmi_sas_phy_control_buffer " -+ "struct @ %p\n", __FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg->IoctlHeader.IOControllerNumber, -+ &ioc)) < 0) || (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ if (!csmisas_is_this_sas_cntr(ioc)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not SAS controller!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ free_pages((unsigned long)karg, memory_pages); -+ return -ENODEV; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s enter.\n",__FUNCTION__)); -+ -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_INVALID_PARAMETER; -+ if(karg->bLengthOfLocationIdentifier != -+ sizeof(CSMI_SAS_LOCATION_IDENTIFIER)) -+ goto cim_sas_get_location_exit; -+ -+ sas_info = csmisas_get_device_component_by_os(ioc, karg->bPathId, -+ karg->bTargetId); -+ if (!sas_info) -+ goto cim_sas_get_location_exit; -+ -+ /* RAID SUPPORT */ -+ if (ioc->raid_data.pIocPg2 && sas_info->is_logical_volume) { -+ for (i=0; iraid_data.pIocPg2->NumActiveVolumes; i++){ -+ if (sas_info->fw.id == -+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID && -+ sas_info->fw.channel == -+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus) { -+ if(csmisas_fill_location_data_raid(ioc, karg, -+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus, -+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) == 0) -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_SUCCESS; -+ else -+ karg->IoctlHeader.ReturnCode = -+ CSMI_SAS_STATUS_FAILED; -+ goto cim_sas_get_location_exit; -+ } -+ } -+ } -+ -+ /* NON-RAID SUPPORT */ -+ if (sas_info->is_cached || sas_info->is_logical_volume) -+ goto cim_sas_get_location_exit; -+ -+ /* make sure there's enough room to populate the Location[] struct */ -+ if ((csmi_sas_get_location_sz - -+ offsetof(CSMI_SAS_GET_LOCATION_BUFFER,Location)) < -+ sizeof(CSMI_SAS_LOCATION_IDENTIFIER)) -+ goto cim_sas_get_location_exit; -+ -+ karg->bNumberOfLocationIdentifiers=1; -+ karg->Location[0].bLocationFlags=0; -+ if((csmisas_fill_location_data(ioc, sas_info->fw.channel, -+ sas_info->fw.id, karg->bIdentify, &karg->Location[0])) == 0) -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_SUCCESS; -+ else -+ karg->IoctlHeader.ReturnCode = CSMI_SAS_STATUS_FAILED; -+ -+ cim_sas_get_location_exit: -+ -+ /* Copy the data from kernel memory to user memory -+ */ -+ if (copy_to_user(uarg, karg, csmi_sas_get_location_sz)) { -+ printk(KERN_ERR "%s@%d::%s() - " -+ "Unable to write out csmi_sas_get_location_buffer " -+ "@ %p\n",__FILE__, __LINE__, __FUNCTION__, uarg); -+ free_pages((unsigned long)karg, memory_pages); -+ return -EFAULT; -+ } -+ -+ dcsmisasprintk(ioc, printk(KERN_DEBUG "%s exit.\n",__FUNCTION__)); -+ free_pages((unsigned long)karg, memory_pages); -+ return 0; -+} ---- /dev/null -+++ b/drivers/message/fusion/csmi/csmisas.h -@@ -0,0 +1,1854 @@ -+/************************************************************************** -+ -+Module Name: -+ -+ CSMISAS.H -+ -+ -+Abstract: -+ -+ This file contains constants and data structure definitions used by drivers -+ that support the Common Storage Management Interface specification for -+ SAS or SATA in either the Windows or Linux. -+ -+ This should be considered as a reference implementation only. Changes may -+ be necessary to accommodate a specific build environment or target OS. -+ -+Revision History: -+ -+ 001 SEF 8/12/03 Initial release. -+ 002 SEF 8/20/03 Cleanup to match documentation. -+ 003 SEF 9/12/03 Additional cleanup, created combined header -+ 004 SEF 9/23/03 Changed base types to match linux defaults -+ Added RAID signature -+ Added bControllerFlags to CSMI_SAS_CNTLR_CONFIG -+ Changed CSMI_SAS_BEGIN_PACK to 8 for common structures -+ Fixed other typos identified in first compilation test -+ 005 SEF 10/03/03 Additions to match first version of CSMI document -+ 006 SEF 10/14/03 Fixed typedef struct _CSMI_SAS_SMP_PASSTHRU_BUFFER -+ Added defines for bConnectionRate -+ 007 SEF 10/15/03 Added Firmware Download Control Code and support -+ Added CSMI revision support -+ 008 SEF 10/30/03 No functional change, just updated version to track -+ spec changes -+ 009 SEF 12/09/03 No functional change, just updated version to track -+ spec changes -+ 010 SEF 3/11/04 Fixed typedef struct CSMI_SAS_RAID_DRIVES to include the -+ bFirmware member that is defined in the spec, but -+ was missing in this file, -+ added CC_CSMI_SAS_TASK_MANAGEMENT -+ 011 SEF 4/02/04 No functional change, added comment line before -+ CC_CSMI_SAS_TASK_MANAGEMENT -+ 012 SEF 4/16/04 Added IOControllerNumber to linux header, -+ Modified linux control codes to have upper word of -+ 0xCC77.... to indicate CSMI version 77 -+ Added bSignalClass to CC_CSMI_SET_PHY_INFO -+ Added CC_CSMI_SAS_PHY_CONTROL support -+ 013 SEF 5/14/04 Added CC_CSMI_SAS_GET_CONNECTOR_INFO support -+ 014 SEF 5/24/04 No functional change, just updated version to track spec -+ changes -+ 015 SEF 6/16/04 changed bPinout to uPinout to reflect proper size, -+ changed width of bLocation defines to reflect size -+ 016 SEF 6/17/04 changed bLengthOfControls in CSMI_SAS_PHY_CONTROL -+ to be proper size -+ 017 SEF 9/17/04 added CSMI_SAS_SATA_PORT_SELECTOR, -+ CSMI_SAS_LINK_VIRTUAL, CSMI_SAS_CON_NOT_PRESENT, and -+ CSMI_SAS_CON_NOT_CONNECTED -+ 018 SEF 9/20/04 added CSMI_SAS_PHY_USER_PATTERN, -+ changed definition of CSMI_SAS_PHY_FIXED_PATTERN to not -+ conflict with activate definition -+ 019 SEF 12/06/04 added CSMI_SAS_GET_LOCATION -+ added bSSPStatus to CSMI_SAS_SSP_PASSTHRU_STATUS -+ structure -+ 020 SEF 5/25/05 added CSMI_SAS_PHY_VIRTUAL_SMP, and changes to -+ CSMI_SAS_GET_LOCATION -+ 021 SEF 11/03/05 added new RAID creation functionality -+ 022 SEF 2/01/06 corrected typo bNegotitiatedLInkRate -+ Added two more RAID_TYPES, 7 and 8 -+ 023 SEF 4/04/06 added CSMI_RAID_TYPE_1E -+ changed structures that contained surface scan -+ to priority approach rather than time, causes -+ 0.89 to incompatible with 0.87, so a version -+ check is necessary when interpreting the -+ raid structures -+ Added netware section -+ 024 DRG 5/22/06 Added uFailureCode to CSMI_SAS_RAID_CONFIG and -+ CSMI_SAS_RAID_FEATURES -+ Changed __u64 fields to high and low __u32 fields in -+ order to avoid backward compatibility issues with -+ packing and alignment. -+ Fixed alignment problem in CSMI_SAS_RAID_DRIVES. -+ Added CSMI_SAS_CNTLR_SMART_ARRAY to uControllerFlags -+ Reassigned the value of CSMI_SAS_CNTLR_RAID_CFG_SUPPORT -+ to avoid a conflict. -+ -+**************************************************************************/ -+ -+#ifndef _CSMI_SAS_H_ -+#define _CSMI_SAS_H_ -+ -+// CSMI Specification Revision, the intent is that all versions of the -+// specification will be backward compatible after the 1.00 release. -+// Major revision number, corresponds to xxxx. of CSMI specification -+// Minor revision number, corresponds to .xxxx of CSMI specification -+#define CSMI_MAJOR_REVISION 0 -+#define CSMI_MINOR_REVISION 90 -+ -+/*************************************************************************/ -+/* PATCHES FOR TYPOS */ -+/*************************************************************************/ -+ -+#define bNegotitiatedLInkRate bNegotiatedLinkRate -+ -+/*************************************************************************/ -+/* TARGET OS LINUX SPECIFIC CODE */ -+/*************************************************************************/ -+ -+// EDM #ifdef _linux -+#ifdef __KERNEL__ -+ -+// Linux base types -+ -+#include -+ -+#define __i8 char -+ -+// pack definition -+ -+// EDM #define CSMI_SAS_BEGIN_PACK(x) pack(x) -+// EDM #define CSMI_SAS_END_PACK pack() -+ -+// IOCTL Control Codes -+// (IoctlHeader.ControlCode) -+ -+// Control Codes prior to 0.77 -+ -+// Control Codes requiring CSMI_ALL_SIGNATURE -+ -+// #define CC_CSMI_SAS_GET_DRIVER_INFO 0x12345678 -+// #define CC_CSMI_SAS_GET_CNTLR_CONFIG 0x23456781 -+// #define CC_CSMI_SAS_GET_CNTLR_STATUS 0x34567812 -+// #define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 0x92345678 -+ -+// Control Codes requiring CSMI_RAID_SIGNATURE -+ -+// #define CC_CSMI_SAS_GET_RAID_INFO 0x45678123 -+// #define CC_CSMI_SAS_GET_RAID_CONFIG 0x56781234 -+ -+// Control Codes requiring CSMI_SAS_SIGNATURE -+ -+// #define CC_CSMI_SAS_GET_PHY_INFO 0x67812345 -+// #define CC_CSMI_SAS_SET_PHY_INFO 0x78123456 -+// #define CC_CSMI_SAS_GET_LINK_ERRORS 0x81234567 -+// #define CC_CSMI_SAS_SMP_PASSTHRU 0xA1234567 -+// #define CC_CSMI_SAS_SSP_PASSTHRU 0xB1234567 -+// #define CC_CSMI_SAS_STP_PASSTHRU 0xC1234567 -+// #define CC_CSMI_SAS_GET_SATA_SIGNATURE 0xD1234567 -+// #define CC_CSMI_SAS_GET_SCSI_ADDRESS 0xE1234567 -+// #define CC_CSMI_SAS_GET_DEVICE_ADDRESS 0xF1234567 -+// #define CC_CSMI_SAS_TASK_MANAGEMENT 0xA2345678 -+ -+// Control Codes for 0.77 and later -+ -+// Control Codes requiring CSMI_ALL_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_DRIVER_INFO 0xCC770001 -+#define CC_CSMI_SAS_GET_CNTLR_CONFIG 0xCC770002 -+#define CC_CSMI_SAS_GET_CNTLR_STATUS 0xCC770003 -+#define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 0xCC770004 -+ -+// Control Codes requiring CSMI_RAID_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_RAID_INFO 0xCC77000A -+#define CC_CSMI_SAS_GET_RAID_CONFIG 0xCC77000B -+#define CC_CSMI_SAS_GET_RAID_FEATURES 0xCC77000C -+#define CC_CSMI_SAS_SET_RAID_CONTROL 0xCC77000D -+#define CC_CSMI_SAS_GET_RAID_ELEMENT 0xCC77000E -+#define CC_CSMI_SAS_SET_RAID_OPERATION 0xCC77000F -+ -+// Control Codes requiring CSMI_SAS_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_PHY_INFO 0xCC770014 -+#define CC_CSMI_SAS_SET_PHY_INFO 0xCC770015 -+#define CC_CSMI_SAS_GET_LINK_ERRORS 0xCC770016 -+#define CC_CSMI_SAS_SMP_PASSTHRU 0xCC770017 -+#define CC_CSMI_SAS_SSP_PASSTHRU 0xCC770018 -+#define CC_CSMI_SAS_STP_PASSTHRU 0xCC770019 -+#define CC_CSMI_SAS_GET_SATA_SIGNATURE 0xCC770020 -+#define CC_CSMI_SAS_GET_SCSI_ADDRESS 0xCC770021 -+#define CC_CSMI_SAS_GET_DEVICE_ADDRESS 0xCC770022 -+#define CC_CSMI_SAS_TASK_MANAGEMENT 0xCC770023 -+#define CC_CSMI_SAS_GET_CONNECTOR_INFO 0xCC770024 -+#define CC_CSMI_SAS_GET_LOCATION 0xCC770025 -+ -+ -+// Control Codes requiring CSMI_PHY_SIGNATURE -+ -+#define CC_CSMI_SAS_PHY_CONTROL 0xCC77003C -+ -+// EDM #pragma CSMI_SAS_BEGIN_PACK(8) -+#pragma pack(8) -+ -+// IOCTL_HEADER -+typedef struct _IOCTL_HEADER { -+ __u32 IOControllerNumber; -+ __u32 Length; -+ __u32 ReturnCode; -+ __u32 Timeout; -+ __u16 Direction; -+} IOCTL_HEADER, -+ *PIOCTL_HEADER; -+ -+// EDM #pragma CSMI_SAS_END_PACK -+#pragma pack() -+ -+#endif -+ -+/*************************************************************************/ -+/* TARGET OS WINDOWS SPECIFIC CODE */ -+/*************************************************************************/ -+ -+#ifdef _WIN32 -+ -+// windows IOCTL definitions -+ -+#ifndef _NTDDSCSIH_ -+#include -+#endif -+ -+// pack definition -+ -+#if defined _MSC_VER -+ #define CSMI_SAS_BEGIN_PACK(x) pack(push,x) -+ #define CSMI_SAS_END_PACK pack(pop) -+#elif defined __BORLANDC__ -+ #define CSMI_SAS_BEGIN_PACK(x) option -a##x -+ #define CSMI_SAS_END_PACK option -a. -+#else -+ #error "CSMISAS.H - Must externally define a pack compiler designator." -+#endif -+ -+// base types -+ -+#define __u8 unsigned char -+#define __u16 unsigned short -+#define __u32 unsigned long -+#define __u64 unsigned __int64 -+ -+#define __i8 char -+ -+// IOCTL Control Codes -+// (IoctlHeader.ControlCode) -+ -+// Control Codes requiring CSMI_ALL_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_DRIVER_INFO 1 -+#define CC_CSMI_SAS_GET_CNTLR_CONFIG 2 -+#define CC_CSMI_SAS_GET_CNTLR_STATUS 3 -+#define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 4 -+ -+// Control Codes requiring CSMI_RAID_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_RAID_INFO 10 -+#define CC_CSMI_SAS_GET_RAID_CONFIG 11 -+#define CC_CSMI_SAS_GET_RAID_FEATURES 12 -+#define CC_CSMI_SAS_SET_RAID_CONTROL 13 -+#define CC_CSMI_SAS_GET_RAID_ELEMENT 14 -+#define CC_CSMI_SAS_SET_RAID_OPERATION 15 -+ -+// Control Codes requiring CSMI_SAS_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_PHY_INFO 20 -+#define CC_CSMI_SAS_SET_PHY_INFO 21 -+#define CC_CSMI_SAS_GET_LINK_ERRORS 22 -+#define CC_CSMI_SAS_SMP_PASSTHRU 23 -+#define CC_CSMI_SAS_SSP_PASSTHRU 24 -+#define CC_CSMI_SAS_STP_PASSTHRU 25 -+#define CC_CSMI_SAS_GET_SATA_SIGNATURE 26 -+#define CC_CSMI_SAS_GET_SCSI_ADDRESS 27 -+#define CC_CSMI_SAS_GET_DEVICE_ADDRESS 28 -+#define CC_CSMI_SAS_TASK_MANAGEMENT 29 -+#define CC_CSMI_SAS_GET_CONNECTOR_INFO 30 -+#define CC_CSMI_SAS_GET_LOCATION 31 -+ -+// Control Codes requiring CSMI_PHY_SIGNATURE -+ -+#define CC_CSMI_SAS_PHY_CONTROL 60 -+ -+#define IOCTL_HEADER SRB_IO_CONTROL -+#define PIOCTL_HEADER PSRB_IO_CONTROL -+ -+#endif -+ -+/*************************************************************************/ -+/* TARGET OS NETWARE SPECIFIC CODE */ -+/*************************************************************************/ -+ -+#ifdef _NETWARE -+ -+// NetWare IOCTL definitions -+ -+#define CSMI_SAS_BEGIN_PACK(x) pack(x) -+#define CSMI_SAS_END_PACK pack() -+ -+#ifndef LONG -+typedef unsigned long LONG; -+#endif -+ -+#ifndef WORD -+typedef unsigned short WORD; -+#endif -+ -+#ifndef BYTE -+typedef unsigned char BYTE; -+#endif -+ -+/* Need to have these definitions for Netware */ -+#define __u8 unsigned char -+#define __u16 unsigned short -+#define __u32 unsigned long -+#define __u64 unsigned __int64 -+ -+#define __i8 char -+ -+ -+// EDM #pragma CSMI_SAS_BEGIN_PACK(8) -+#pragma pack(8) -+ -+// IOCTL_HEADER -+typedef struct _IOCTL_HEADER { -+ __u32 Length; -+ __u32 ReturnCode; -+} IOCTL_HEADER, -+ *PIOCTL_HEADER; -+ -+// EDM #pragma CSMI_SAS_END_PACK -+#pragma pack() -+ -+// IOCTL Control Codes -+// (IoctlHeader.ControlCode) -+ -+// Control Codes requiring CSMI_ALL_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_DRIVER_INFO 0x01FF0001 -+#define CC_CSMI_SAS_GET_CNTLR_CONFIG 0x01FF0002 -+#define CC_CSMI_SAS_GET_CNTLR_STATUS 0x01FF0003 -+#define CC_CSMI_SAS_FIRMWARE_DOWNLOAD 0x01FF0004 -+ -+// Control Codes requiring CSMI_RAID_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_RAID_INFO 0x01FF000A -+#define CC_CSMI_SAS_GET_RAID_CONFIG 0x01FF000B -+#define CC_CSMI_SAS_GET_RAID_FEATURES 0x01FF000C -+#define CC_CSMI_SAS_SET_RAID_CONTROL 0x01FF000D -+#define CC_CSMI_SAS_GET_RAID_ELEMENT 0x01FF000E -+#define CC_CSMI_SAS_SET_RAID_OPERATION 0x01FF000F -+ -+// Control Codes requiring CSMI_SAS_SIGNATURE -+ -+#define CC_CSMI_SAS_GET_PHY_INFO 0x01FF0014 -+#define CC_CSMI_SAS_SET_PHY_INFO 0x01FF0015 -+#define CC_CSMI_SAS_GET_LINK_ERRORS 0x01FF0016 -+#define CC_CSMI_SAS_SMP_PASSTHRU 0x01FF0017 -+#define CC_CSMI_SAS_SSP_PASSTHRU 0x01FF0018 -+#define CC_CSMI_SAS_STP_PASSTHRU 0x01FF0019 -+#define CC_CSMI_SAS_GET_SATA_SIGNATURE 0x01FF001A -+#define CC_CSMI_SAS_GET_SCSI_ADDRESS 0x01FF001B -+#define CC_CSMI_SAS_GET_DEVICE_ADDRESS 0x01FF001C -+#define CC_CSMI_SAS_TASK_MANAGEMENT 0x01FF001D -+#define CC_CSMI_SAS_GET_CONNECTOR_INFO 0x01FF001E -+#define CC_CSMI_SAS_GET_LOCATION 0x01FF001F -+ -+// Control Codes requiring CSMI_PHY_SIGNATURE -+ -+#define CC_CSMI_SAS_PHY_CONTROL 60 -+ -+#endif -+ -+/*************************************************************************/ -+/* TARGET OS NOT DEFINED ERROR */ -+/*************************************************************************/ -+ -+// EDM -+//#if (!_WIN32 && !_linux && !_NETWARE) -+// #error "Unknown target OS." -+//#endif -+ -+/*************************************************************************/ -+/* OS INDEPENDENT CODE */ -+/*************************************************************************/ -+ -+/* * * * * * * * * * Class Independent IOCTL Constants * * * * * * * * * */ -+ -+// Return codes for all IOCTL's regardless of class -+// (IoctlHeader.ReturnCode) -+ -+#define CSMI_SAS_STATUS_SUCCESS 0 -+#define CSMI_SAS_STATUS_FAILED 1 -+#define CSMI_SAS_STATUS_BAD_CNTL_CODE 2 -+#define CSMI_SAS_STATUS_INVALID_PARAMETER 3 -+#define CSMI_SAS_STATUS_WRITE_ATTEMPTED 4 -+ -+// Signature value -+// (IoctlHeader.Signature) -+ -+#define CSMI_ALL_SIGNATURE "CSMIALL" -+ -+// Timeout value default of 60 seconds -+// (IoctlHeader.Timeout) -+ -+#define CSMI_ALL_TIMEOUT 60 -+ -+// Direction values for data flow on this IOCTL -+// (IoctlHeader.Direction, Linux only) -+#define CSMI_SAS_DATA_READ 0 -+#define CSMI_SAS_DATA_WRITE 1 -+ -+// I/O Bus Types -+// ISA and EISA bus types are not supported -+// (bIoBusType) -+ -+#define CSMI_SAS_BUS_TYPE_PCI 3 -+#define CSMI_SAS_BUS_TYPE_PCMCIA 4 -+ -+// Controller Status -+// (uStatus) -+ -+#define CSMI_SAS_CNTLR_STATUS_GOOD 1 -+#define CSMI_SAS_CNTLR_STATUS_FAILED 2 -+#define CSMI_SAS_CNTLR_STATUS_OFFLINE 3 -+#define CSMI_SAS_CNTLR_STATUS_POWEROFF 4 -+ -+// Offline Status Reason -+// (uOfflineReason) -+ -+#define CSMI_SAS_OFFLINE_REASON_NO_REASON 0 -+#define CSMI_SAS_OFFLINE_REASON_INITIALIZING 1 -+#define CSMI_SAS_OFFLINE_REASON_BACKSIDE_BUS_DEGRADED 2 -+#define CSMI_SAS_OFFLINE_REASON_BACKSIDE_BUS_FAILURE 3 -+ -+// Controller Class -+// (bControllerClass) -+ -+#define CSMI_SAS_CNTLR_CLASS_HBA 5 -+ -+// Controller Flag bits -+// (uControllerFlags) -+ -+#define CSMI_SAS_CNTLR_SAS_HBA 0x00000001 -+#define CSMI_SAS_CNTLR_SAS_RAID 0x00000002 -+#define CSMI_SAS_CNTLR_SATA_HBA 0x00000004 -+#define CSMI_SAS_CNTLR_SATA_RAID 0x00000008 -+#define CSMI_SAS_CNTLR_SMART_ARRAY 0x00000010 -+ -+// for firmware download -+#define CSMI_SAS_CNTLR_FWD_SUPPORT 0x00010000 -+#define CSMI_SAS_CNTLR_FWD_ONLINE 0x00020000 -+#define CSMI_SAS_CNTLR_FWD_SRESET 0x00040000 -+#define CSMI_SAS_CNTLR_FWD_HRESET 0x00080000 -+#define CSMI_SAS_CNTLR_FWD_RROM 0x00100000 -+ -+// for RAID configuration supported -+#define CSMI_SAS_CNTLR_RAID_CFG_SUPPORT 0x01000000 -+ -+// Download Flag bits -+// (uDownloadFlags) -+#define CSMI_SAS_FWD_VALIDATE 0x00000001 -+#define CSMI_SAS_FWD_SOFT_RESET 0x00000002 -+#define CSMI_SAS_FWD_HARD_RESET 0x00000004 -+ -+// Firmware Download Status -+// (usStatus) -+#define CSMI_SAS_FWD_SUCCESS 0 -+#define CSMI_SAS_FWD_FAILED 1 -+#define CSMI_SAS_FWD_USING_RROM 2 -+#define CSMI_SAS_FWD_REJECT 3 -+#define CSMI_SAS_FWD_DOWNREV 4 -+ -+// Firmware Download Severity -+// (usSeverity> -+#define CSMI_SAS_FWD_INFORMATION 0 -+#define CSMI_SAS_FWD_WARNING 1 -+#define CSMI_SAS_FWD_ERROR 2 -+#define CSMI_SAS_FWD_FATAL 3 -+ -+/* * * * * * * * * * SAS RAID Class IOCTL Constants * * * * * * * * */ -+ -+// Return codes for the RAID IOCTL's regardless of class -+// (IoctlHeader.ReturnCode) -+ -+#define CSMI_SAS_RAID_SET_OUT_OF_RANGE 1000 -+#define CSMI_SAS_RAID_SET_BUFFER_TOO_SMALL 1001 -+#define CSMI_SAS_RAID_SET_DATA_CHANGED 1002 -+ -+// Signature value -+// (IoctlHeader.Signature) -+ -+#define CSMI_RAID_SIGNATURE "CSMIARY" -+ -+// Timeout value default of 60 seconds -+// (IoctlHeader.Timeout) -+ -+#define CSMI_RAID_TIMEOUT 60 -+ -+// RAID Types -+// (bRaidType) -+#define CSMI_SAS_RAID_TYPE_NONE 0 -+#define CSMI_SAS_RAID_TYPE_0 1 -+#define CSMI_SAS_RAID_TYPE_1 2 -+#define CSMI_SAS_RAID_TYPE_10 3 -+#define CSMI_SAS_RAID_TYPE_5 4 -+#define CSMI_SAS_RAID_TYPE_15 5 -+#define CSMI_SAS_RAID_TYPE_6 6 -+#define CSMI_SAS_RAID_TYPE_50 7 -+#define CSMI_SAS_RAID_TYPE_VOLUME 8 -+#define CSMI_SAS_RAID_TYPE_1E 9 -+#define CSMI_SAS_RAID_TYPE_OTHER 255 -+// the last value 255 was already defined for other -+// so end is defined as 254 -+#define CSMI_SAS_RAID_TYPE_END 254 -+ -+// RAID Status -+// (bStatus) -+#define CSMI_SAS_RAID_SET_STATUS_OK 0 -+#define CSMI_SAS_RAID_SET_STATUS_DEGRADED 1 -+#define CSMI_SAS_RAID_SET_STATUS_REBUILDING 2 -+#define CSMI_SAS_RAID_SET_STATUS_FAILED 3 -+#define CSMI_SAS_RAID_SET_STATUS_OFFLINE 4 -+#define CSMI_SAS_RAID_SET_STATUS_TRANSFORMING 5 -+#define CSMI_SAS_RAID_SET_STATUS_QUEUED_FOR_REBUILD 6 -+#define CSMI_SAS_RAID_SET_STATUS_QUEUED_FOR_TRANSFORMATION 7 -+ -+// RAID Drive Count -+// (bDriveCount, 0xF1 to 0xFF are reserved) -+#define CSMI_SAS_RAID_DRIVE_COUNT_TOO_BIG 0xF1 -+#define CSMI_SAS_RAID_DRIVE_COUNT_SUPRESSED 0xF2 -+ -+// RAID Data Type -+// (bDataType) -+#define CSMI_SAS_RAID_DATA_DRIVES 0 -+#define CSMI_SAS_RAID_DATA_DEVICE_ID 1 -+#define CSMI_SAS_RAID_DATA_ADDITIONAL_DATA 2 -+ -+// RAID Drive Status -+// (bDriveStatus) -+#define CSMI_SAS_DRIVE_STATUS_OK 0 -+#define CSMI_SAS_DRIVE_STATUS_REBUILDING 1 -+#define CSMI_SAS_DRIVE_STATUS_FAILED 2 -+#define CSMI_SAS_DRIVE_STATUS_DEGRADED 3 -+#define CSMI_SAS_DRIVE_STATUS_OFFLINE 4 -+#define CSMI_SAS_DRIVE_STATUS_QUEUED_FOR_REBUILD 5 -+ -+// RAID Drive Usage -+// (bDriveUsage) -+#define CSMI_SAS_DRIVE_CONFIG_NOT_USED 0 -+#define CSMI_SAS_DRIVE_CONFIG_MEMBER 1 -+#define CSMI_SAS_DRIVE_CONFIG_SPARE 2 -+#define CSMI_SAS_DRIVE_CONFIG_SPARE_ACTIVE 3 -+ -+// RAID Drive Type -+// (bDriveType) -+#define CSMI_SAS_DRIVE_TYPE_UNKNOWN 0 -+#define CSMI_SAS_DRIVE_TYPE_SINGLE_PORT_SAS 1 -+#define CSMI_SAS_DRIVE_TYPE_DUAL_PORT_SAS 2 -+#define CSMI_SAS_DRIVE_TYPE_SATA 3 -+#define CSMI_SAS_DRIVE_TYPE_SATA_PS 4 -+#define CSMI_SAS_DRIVE_TYPE_OTHER 255 -+ -+// RAID Write Protect -+// (bWriteProtect) -+#define CSMI_SAS_RAID_SET_WRITE_PROTECT_UNKNOWN 0 -+#define CSMI_SAS_RAID_SET_WRITE_PROTECT_UNCHANGED 0 -+#define CSMI_SAS_RAID_SET_WRITE_PROTECT_ENABLED 1 -+#define CSMI_SAS_RAID_SET_WRITE_PROTECT_DISABLED 2 -+ -+// RAID Cache Setting -+// (bCacheSetting) -+#define CSMI_SAS_RAID_SET_CACHE_UNKNOWN 0 -+#define CSMI_SAS_RAID_SET_CACHE_UNCHANGED 0 -+#define CSMI_SAS_RAID_SET_CACHE_ENABLED 1 -+#define CSMI_SAS_RAID_SET_CACHE_DISABLED 2 -+#define CSMI_SAS_RAID_SET_CACHE_CORRUPT 3 -+ -+// RAID Features -+// (uFeatures) -+#define CSMI_SAS_RAID_FEATURE_TRANSFORMATION 0x00000001 -+#define CSMI_SAS_RAID_FEATURE_REBUILD 0x00000002 -+#define CSMI_SAS_RAID_FEATURE_SPLIT_MIRROR 0x00000004 -+#define CSMI_SAS_RAID_FEATURE_MERGE_MIRROR 0x00000008 -+#define CSMI_SAS_RAID_FEATURE_LUN_RENUMBER 0x00000010 -+#define CSMI_SAS_RAID_FEATURE_SURFACE_SCAN 0x00000020 -+#define CSMI_SAS_RAID_FEATURE_SPARES_SHARED 0x00000040 -+ -+// RAID Priority -+// (bDefaultTransformPriority, etc.) -+#define CSMI_SAS_PRIORITY_UNKNOWN 0 -+#define CSMI_SAS_PRIORITY_UNCHANGED 0 -+#define CSMI_SAS_PRIORITY_AUTO 1 -+#define CSMI_SAS_PRIORITY_OFF 2 -+#define CSMI_SAS_PRIORITY_LOW 3 -+#define CSMI_SAS_PRIORITY_MEDIUM 4 -+#define CSMI_SAS_PRIORITY_HIGH 5 -+ -+// RAID Transformation Rules -+// (uRaidSetTransformationRules) -+#define CSMI_SAS_RAID_RULE_AVAILABLE_MEMORY 0x00000001 -+#define CSMI_SAS_RAID_RULE_OVERLAPPED_EXTENTS 0x00000002 -+ -+// RAID Cache Ratios Supported -+// (bCacheRatiosSupported) -+// from 0 to 100 defines the write to read ratio, 0 is 100% write -+#define CSMI_SAS_RAID_CACHE_RATIO_RANGE 101 -+#define CSMI_SAS_RAID_CACHE_RATIO_FIXED 102 -+#define CSMI_SAS_RAID_CACHE_RATIO_AUTO 103 -+#define CSMI_SAS_RAID_CACHE_RATIO_END 255 -+ -+// RAID Cache Ratio Flag -+// (bCacheRatioFlag) -+#define CSMI_SAS_RAID_CACHE_RATIO_DISABLE 0 -+#define CSMI_SAS_RAID_CACHE_RATIO_ENABLE 1 -+ -+// RAID Clear Configuration Signature -+// (bClearConfiguration) -+#define CSMI_SAS_RAID_CLEAR_CONFIGURATION_SIGNATURE "RAIDCLR" -+ -+// RAID Failure Codes -+// (uFailureCode) -+#define CSMI_SAS_FAIL_CODE_OK 0 -+#define CSMI_SAS_FAIL_CODE_PARAMETER_INVALID 1000 -+#define CSMI_SAS_FAIL_CODE_TRANSFORM_PRIORITY_INVALID 1001 -+#define CSMI_SAS_FAIL_CODE_REBUILD_PRIORITY_INVALID 1002 -+#define CSMI_SAS_FAIL_CODE_CACHE_RATIO_INVALID 1003 -+#define CSMI_SAS_FAIL_CODE_SURFACE_SCAN_INVALID 1004 -+#define CSMI_SAS_FAIL_CODE_CLEAR_CONFIGURATION_INVALID 1005 -+#define CSMI_SAS_FAIL_CODE_ELEMENT_INDEX_INVALID 1006 -+#define CSMI_SAS_FAIL_CODE_SUBELEMENT_INDEX_INVALID 1007 -+#define CSMI_SAS_FAIL_CODE_EXTENT_INVALID 1008 -+#define CSMI_SAS_FAIL_CODE_BLOCK_COUNT_INVALID 1009 -+#define CSMI_SAS_FAIL_CODE_DRIVE_INDEX_INVALID 1010 -+#define CSMI_SAS_FAIL_CODE_EXISTING_LUN_INVALID 1011 -+#define CSMI_SAS_FAIL_CODE_RAID_TYPE_INVALID 1012 -+#define CSMI_SAS_FAIL_CODE_STRIPE_SIZE_INVALID 1013 -+#define CSMI_SAS_FAIL_CODE_TRANSFORMATION_INVALID 1014 -+#define CSMI_SAS_FAIL_CODE_CHANGE_COUNT_INVALID 1015 -+#define CSMI_SAS_FAIL_CODE_ENUMERATION_TYPE_INVALID 1016 -+ -+#define CSMI_SAS_FAIL_CODE_EXCEEDED_RAID_SET_COUNT 2000 -+#define CSMI_SAS_FAIL_CODE_DUPLICATE_LUN 2001 -+ -+#define CSMI_SAS_FAIL_CODE_WAIT_FOR_OPERATION 3000 -+ -+// RAID Enumeration Types -+// (uEnumerationType) -+#define CSMI_SAS_RAID_ELEMENT_TYPE_DRIVE 0 -+#define CSMI_SAS_RAID_ELEMENT_TYPE_MODULE 1 -+#define CSMI_SAS_RAID_ELEMENT_TYPE_DRIVE_RAID_SET 2 -+#define CSMI_SAS_RAID_ELEMENT_TYPE_EXTENT_DRIVE 3 -+ -+// RAID Extent Types -+// (bExtentType) -+#define CSMI_SAS_RAID_EXTENT_RESERVED 0 -+#define CSMI_SAS_RAID_EXTENT_METADATA 1 -+#define CSMI_SAS_RAID_EXTENT_ALLOCATED 2 -+#define CSMI_SAS_RAID_EXTENT_UNALLOCATED 3 -+ -+// RAID Operation Types -+// (uOperationType) -+#define CSMI_SAS_RAID_SET_CREATE 0 -+#define CSMI_SAS_RAID_SET_LABEL 1 -+#define CSMI_SAS_RAID_SET_TRANSFORM 2 -+#define CSMI_SAS_RAID_SET_DELETE 3 -+#define CSMI_SAS_RAID_SET_WRITE_PROTECT 4 -+#define CSMI_SAS_RAID_SET_CACHE 5 -+#define CSMI_SAS_RAID_SET_ONLINE_STATE 6 -+#define CSMI_SAS_RAID_SET_SPARE 7 -+ -+// RAID Transform Types -+// (bTransformType) -+#define CSMI_SAS_RAID_SET_TRANSFORM_SPLIT_MIRROR 0 -+#define CSMI_SAS_RAID_SET_TRANSFORM_MERGE_RAID_0 1 -+#define CSMI_SAS_RAID_SET_TRANSFORM_LUN_RENUMBER 2 -+#define CSMI_SAS_RAID_SET_TRANSFORM_RAID_SET 3 -+ -+// RAID Online State -+// (bOnlineState) -+#define CSMI_SAS_RAID_SET_STATE_UNKNOWN 0 -+#define CSMI_SAS_RAID_SET_STATE_ONLINE 1 -+#define CSMI_SAS_RAID_SET_STATE_OFFLINE 2 -+ -+/* * * * * * * * * * SAS HBA Class IOCTL Constants * * * * * * * * * */ -+ -+// Return codes for SAS IOCTL's -+// (IoctlHeader.ReturnCode) -+ -+#define CSMI_SAS_PHY_INFO_CHANGED CSMI_SAS_STATUS_SUCCESS -+#define CSMI_SAS_PHY_INFO_NOT_CHANGEABLE 2000 -+#define CSMI_SAS_LINK_RATE_OUT_OF_RANGE 2001 -+ -+#define CSMI_SAS_PHY_DOES_NOT_EXIST 2002 -+#define CSMI_SAS_PHY_DOES_NOT_MATCH_PORT 2003 -+#define CSMI_SAS_PHY_CANNOT_BE_SELECTED 2004 -+#define CSMI_SAS_SELECT_PHY_OR_PORT 2005 -+#define CSMI_SAS_PORT_DOES_NOT_EXIST 2006 -+#define CSMI_SAS_PORT_CANNOT_BE_SELECTED 2007 -+#define CSMI_SAS_CONNECTION_FAILED 2008 -+ -+#define CSMI_SAS_NO_SATA_DEVICE 2009 -+#define CSMI_SAS_NO_SATA_SIGNATURE 2010 -+#define CSMI_SAS_SCSI_EMULATION 2011 -+#define CSMI_SAS_NOT_AN_END_DEVICE 2012 -+#define CSMI_SAS_NO_SCSI_ADDRESS 2013 -+#define CSMI_SAS_NO_DEVICE_ADDRESS 2014 -+ -+// Signature value -+// (IoctlHeader.Signature) -+ -+#define CSMI_SAS_SIGNATURE "CSMISAS" -+ -+// Timeout value default of 60 seconds -+// (IoctlHeader.Timeout) -+ -+#define CSMI_SAS_TIMEOUT 60 -+ -+// Device types -+// (bDeviceType) -+ -+#define CSMI_SAS_PHY_UNUSED 0x00 -+#define CSMI_SAS_NO_DEVICE_ATTACHED 0x00 -+#define CSMI_SAS_END_DEVICE 0x10 -+#define CSMI_SAS_EDGE_EXPANDER_DEVICE 0x20 -+#define CSMI_SAS_FANOUT_EXPANDER_DEVICE 0x30 -+ -+// Protocol options -+// (bInitiatorPortProtocol, bTargetPortProtocol) -+ -+#define CSMI_SAS_PROTOCOL_SATA 0x01 -+#define CSMI_SAS_PROTOCOL_SMP 0x02 -+#define CSMI_SAS_PROTOCOL_STP 0x04 -+#define CSMI_SAS_PROTOCOL_SSP 0x08 -+ -+// Negotiated and hardware link rates -+// (bNegotiatedLinkRate, bMinimumLinkRate, bMaximumLinkRate) -+ -+#define CSMI_SAS_LINK_RATE_UNKNOWN 0x00 -+#define CSMI_SAS_PHY_DISABLED 0x01 -+#define CSMI_SAS_LINK_RATE_FAILED 0x02 -+#define CSMI_SAS_SATA_SPINUP_HOLD 0x03 -+#define CSMI_SAS_SATA_PORT_SELECTOR 0x04 -+#define CSMI_SAS_LINK_RATE_1_5_GBPS 0x08 -+#define CSMI_SAS_LINK_RATE_3_0_GBPS 0x09 -+#define CSMI_SAS_LINK_VIRTUAL 0x10 -+ -+// Discover state -+// (bAutoDiscover) -+ -+#define CSMI_SAS_DISCOVER_NOT_SUPPORTED 0x00 -+#define CSMI_SAS_DISCOVER_NOT_STARTED 0x01 -+#define CSMI_SAS_DISCOVER_IN_PROGRESS 0x02 -+#define CSMI_SAS_DISCOVER_COMPLETE 0x03 -+#define CSMI_SAS_DISCOVER_ERROR 0x04 -+ -+// Phy features -+ -+#define CSMI_SAS_PHY_VIRTUAL_SMP 0x01 -+ -+// Programmed link rates -+// (bMinimumLinkRate, bMaximumLinkRate) -+// (bProgrammedMinimumLinkRate, bProgrammedMaximumLinkRate) -+ -+#define CSMI_SAS_PROGRAMMED_LINK_RATE_UNCHANGED 0x00 -+#define CSMI_SAS_PROGRAMMED_LINK_RATE_1_5_GBPS 0x08 -+#define CSMI_SAS_PROGRAMMED_LINK_RATE_3_0_GBPS 0x09 -+ -+// Link rate -+// (bNegotiatedLinkRate in CSMI_SAS_SET_PHY_INFO) -+ -+#define CSMI_SAS_LINK_RATE_NEGOTIATE 0x00 -+#define CSMI_SAS_LINK_RATE_PHY_DISABLED 0x01 -+ -+// Signal class -+// (bSignalClass in CSMI_SAS_SET_PHY_INFO) -+ -+#define CSMI_SAS_SIGNAL_CLASS_UNKNOWN 0x00 -+#define CSMI_SAS_SIGNAL_CLASS_DIRECT 0x01 -+#define CSMI_SAS_SIGNAL_CLASS_SERVER 0x02 -+#define CSMI_SAS_SIGNAL_CLASS_ENCLOSURE 0x03 -+ -+// Link error reset -+// (bResetCounts) -+ -+#define CSMI_SAS_LINK_ERROR_DONT_RESET_COUNTS 0x00 -+#define CSMI_SAS_LINK_ERROR_RESET_COUNTS 0x01 -+ -+// Phy identifier -+// (bPhyIdentifier) -+ -+#define CSMI_SAS_USE_PORT_IDENTIFIER 0xFF -+ -+// Port identifier -+// (bPortIdentifier) -+ -+#define CSMI_SAS_IGNORE_PORT 0xFF -+ -+// Programmed link rates -+// (bConnectionRate) -+ -+#define CSMI_SAS_LINK_RATE_NEGOTIATED 0x00 -+#define CSMI_SAS_LINK_RATE_1_5_GBPS 0x08 -+#define CSMI_SAS_LINK_RATE_3_0_GBPS 0x09 -+ -+// Connection status -+// (bConnectionStatus) -+ -+#define CSMI_SAS_OPEN_ACCEPT 0 -+#define CSMI_SAS_OPEN_REJECT_BAD_DESTINATION 1 -+#define CSMI_SAS_OPEN_REJECT_RATE_NOT_SUPPORTED 2 -+#define CSMI_SAS_OPEN_REJECT_NO_DESTINATION 3 -+#define CSMI_SAS_OPEN_REJECT_PATHWAY_BLOCKED 4 -+#define CSMI_SAS_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED 5 -+#define CSMI_SAS_OPEN_REJECT_RESERVE_ABANDON 6 -+#define CSMI_SAS_OPEN_REJECT_RESERVE_CONTINUE 7 -+#define CSMI_SAS_OPEN_REJECT_RESERVE_INITIALIZE 8 -+#define CSMI_SAS_OPEN_REJECT_RESERVE_STOP 9 -+#define CSMI_SAS_OPEN_REJECT_RETRY 10 -+#define CSMI_SAS_OPEN_REJECT_STP_RESOURCES_BUSY 11 -+#define CSMI_SAS_OPEN_REJECT_WRONG_DESTINATION 12 -+ -+// SSP Status -+// (bSSPStatus) -+ -+#define CSMI_SAS_SSP_STATUS_UNKNOWN 0x00 -+#define CSMI_SAS_SSP_STATUS_WAITING 0x01 -+#define CSMI_SAS_SSP_STATUS_COMPLETED 0x02 -+#define CSMI_SAS_SSP_STATUS_FATAL_ERROR 0x03 -+#define CSMI_SAS_SSP_STATUS_RETRY 0x04 -+#define CSMI_SAS_SSP_STATUS_NO_TAG 0x05 -+ -+// SSP Flags -+// (uFlags) -+ -+#define CSMI_SAS_SSP_READ 0x00000001 -+#define CSMI_SAS_SSP_WRITE 0x00000002 -+#define CSMI_SAS_SSP_UNSPECIFIED 0x00000004 -+ -+#define CSMI_SAS_SSP_TASK_ATTRIBUTE_SIMPLE 0x00000000 -+#define CSMI_SAS_SSP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 0x00000010 -+#define CSMI_SAS_SSP_TASK_ATTRIBUTE_ORDERED 0x00000020 -+#define CSMI_SAS_SSP_TASK_ATTRIBUTE_ACA 0x00000040 -+ -+// SSP Data present -+// (bDataPresent) -+ -+#define CSMI_SAS_SSP_NO_DATA_PRESENT 0x00 -+#define CSMI_SAS_SSP_RESPONSE_DATA_PRESENT 0x01 -+#define CSMI_SAS_SSP_SENSE_DATA_PRESENT 0x02 -+ -+// STP Flags -+// (uFlags) -+ -+#define CSMI_SAS_STP_READ 0x00000001 -+#define CSMI_SAS_STP_WRITE 0x00000002 -+#define CSMI_SAS_STP_UNSPECIFIED 0x00000004 -+#define CSMI_SAS_STP_PIO 0x00000010 -+#define CSMI_SAS_STP_DMA 0x00000020 -+#define CSMI_SAS_STP_PACKET 0x00000040 -+#define CSMI_SAS_STP_DMA_QUEUED 0x00000080 -+#define CSMI_SAS_STP_EXECUTE_DIAG 0x00000100 -+#define CSMI_SAS_STP_RESET_DEVICE 0x00000200 -+ -+// Task Management Flags -+// (uFlags) -+ -+#define CSMI_SAS_TASK_IU 0x00000001 -+#define CSMI_SAS_HARD_RESET_SEQUENCE 0x00000002 -+#define CSMI_SAS_SUPPRESS_RESULT 0x00000004 -+ -+// Task Management Functions -+// (bTaskManagement) -+ -+#define CSMI_SAS_SSP_ABORT_TASK 0x01 -+#define CSMI_SAS_SSP_ABORT_TASK_SET 0x02 -+#define CSMI_SAS_SSP_CLEAR_TASK_SET 0x04 -+#define CSMI_SAS_SSP_LOGICAL_UNIT_RESET 0x08 -+#define CSMI_SAS_SSP_CLEAR_ACA 0x40 -+#define CSMI_SAS_SSP_QUERY_TASK 0x80 -+ -+// Task Management Information -+// (uInformation) -+ -+#define CSMI_SAS_SSP_TEST 1 -+#define CSMI_SAS_SSP_EXCEEDED 2 -+#define CSMI_SAS_SSP_DEMAND 3 -+#define CSMI_SAS_SSP_TRIGGER 4 -+ -+// Connector Pinout Information -+// (uPinout) -+ -+#define CSMI_SAS_CON_UNKNOWN 0x00000001 -+#define CSMI_SAS_CON_SFF_8482 0x00000002 -+#define CSMI_SAS_CON_SFF_8470_LANE_1 0x00000100 -+#define CSMI_SAS_CON_SFF_8470_LANE_2 0x00000200 -+#define CSMI_SAS_CON_SFF_8470_LANE_3 0x00000400 -+#define CSMI_SAS_CON_SFF_8470_LANE_4 0x00000800 -+#define CSMI_SAS_CON_SFF_8484_LANE_1 0x00010000 -+#define CSMI_SAS_CON_SFF_8484_LANE_2 0x00020000 -+#define CSMI_SAS_CON_SFF_8484_LANE_3 0x00040000 -+#define CSMI_SAS_CON_SFF_8484_LANE_4 0x00080000 -+ -+// Connector Location Information -+// (bLocation) -+ -+// same as uPinout above... -+// #define CSMI_SAS_CON_UNKNOWN 0x01 -+#define CSMI_SAS_CON_INTERNAL 0x02 -+#define CSMI_SAS_CON_EXTERNAL 0x04 -+#define CSMI_SAS_CON_SWITCHABLE 0x08 -+#define CSMI_SAS_CON_AUTO 0x10 -+#define CSMI_SAS_CON_NOT_PRESENT 0x20 -+#define CSMI_SAS_CON_NOT_CONNECTED 0x80 -+ -+// Device location identification -+// (bIdentify) -+ -+#define CSMI_SAS_LOCATE_UNKNOWN 0x00 -+#define CSMI_SAS_LOCATE_FORCE_OFF 0x01 -+#define CSMI_SAS_LOCATE_FORCE_ON 0x02 -+ -+// Location Valid flags -+// (uLocationFlags) -+ -+#define CSMI_SAS_LOCATE_SAS_ADDRESS_VALID 0x00000001 -+#define CSMI_SAS_LOCATE_SAS_LUN_VALID 0x00000002 -+#define CSMI_SAS_LOCATE_ENCLOSURE_IDENTIFIER_VALID 0x00000004 -+#define CSMI_SAS_LOCATE_ENCLOSURE_NAME_VALID 0x00000008 -+#define CSMI_SAS_LOCATE_BAY_PREFIX_VALID 0x00000010 -+#define CSMI_SAS_LOCATE_BAY_IDENTIFIER_VALID 0x00000020 -+#define CSMI_SAS_LOCATE_LOCATION_STATE_VALID 0x00000040 -+ -+/* * * * * * * * SAS Phy Control Class IOCTL Constants * * * * * * * * */ -+ -+// Return codes for SAS Phy Control IOCTL's -+// (IoctlHeader.ReturnCode) -+ -+// Signature value -+// (IoctlHeader.Signature) -+ -+#define CSMI_PHY_SIGNATURE "CSMIPHY" -+ -+// Phy Control Functions -+// (bFunction) -+ -+// values 0x00 to 0xFF are consistent in definition with the SMP PHY CONTROL -+// function defined in the SAS spec -+#define CSMI_SAS_PC_NOP 0x00000000 -+#define CSMI_SAS_PC_LINK_RESET 0x00000001 -+#define CSMI_SAS_PC_HARD_RESET 0x00000002 -+#define CSMI_SAS_PC_PHY_DISABLE 0x00000003 -+// 0x04 to 0xFF reserved... -+#define CSMI_SAS_PC_GET_PHY_SETTINGS 0x00000100 -+ -+// Link Flags -+#define CSMI_SAS_PHY_ACTIVATE_CONTROL 0x00000001 -+#define CSMI_SAS_PHY_UPDATE_SPINUP_RATE 0x00000002 -+#define CSMI_SAS_PHY_AUTO_COMWAKE 0x00000004 -+ -+// Device Types for Phy Settings -+// (bType) -+#define CSMI_SAS_UNDEFINED 0x00 -+#define CSMI_SAS_SATA 0x01 -+#define CSMI_SAS_SAS 0x02 -+ -+// Transmitter Flags -+// (uTransmitterFlags) -+#define CSMI_SAS_PHY_PREEMPHASIS_DISABLED 0x00000001 -+ -+// Receiver Flags -+// (uReceiverFlags) -+#define CSMI_SAS_PHY_EQUALIZATION_DISABLED 0x00000001 -+ -+// Pattern Flags -+// (uPatternFlags) -+// #define CSMI_SAS_PHY_ACTIVATE_CONTROL 0x00000001 -+#define CSMI_SAS_PHY_DISABLE_SCRAMBLING 0x00000002 -+#define CSMI_SAS_PHY_DISABLE_ALIGN 0x00000004 -+#define CSMI_SAS_PHY_DISABLE_SSC 0x00000008 -+ -+#define CSMI_SAS_PHY_FIXED_PATTERN 0x00000010 -+#define CSMI_SAS_PHY_USER_PATTERN 0x00000020 -+ -+// Fixed Patterns -+// (bFixedPattern) -+#define CSMI_SAS_PHY_CJPAT 0x00000001 -+#define CSMI_SAS_PHY_ALIGN 0x00000002 -+ -+// Type Flags -+// (bTypeFlags) -+#define CSMI_SAS_PHY_POSITIVE_DISPARITY 0x01 -+#define CSMI_SAS_PHY_NEGATIVE_DISPARITY 0x02 -+#define CSMI_SAS_PHY_CONTROL_CHARACTER 0x04 -+ -+// Miscellaneous -+#define SLOT_NUMBER_UNKNOWN 0xFFFF -+ -+/*************************************************************************/ -+/* DATA STRUCTURES */ -+/*************************************************************************/ -+ -+/* * * * * * * * * * Class Independent Structures * * * * * * * * * */ -+ -+// EDM #pragma CSMI_SAS_BEGIN_PACK(8) -+#pragma pack(8) -+ -+// CC_CSMI_SAS_DRIVER_INFO -+ -+typedef struct _CSMI_SAS_DRIVER_INFO { -+ __u8 szName[81]; -+ __u8 szDescription[81]; -+ __u16 usMajorRevision; -+ __u16 usMinorRevision; -+ __u16 usBuildRevision; -+ __u16 usReleaseRevision; -+ __u16 usCSMIMajorRevision; -+ __u16 usCSMIMinorRevision; -+} CSMI_SAS_DRIVER_INFO, -+ *PCSMI_SAS_DRIVER_INFO; -+ -+typedef struct _CSMI_SAS_DRIVER_INFO_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_DRIVER_INFO Information; -+} CSMI_SAS_DRIVER_INFO_BUFFER, -+ *PCSMI_SAS_DRIVER_INFO_BUFFER; -+ -+// CC_CSMI_SAS_CNTLR_CONFIGURATION -+ -+typedef struct _CSMI_SAS_PCI_BUS_ADDRESS { -+ __u8 bBusNumber; -+ __u8 bDeviceNumber; -+ __u8 bFunctionNumber; -+ __u8 bReserved; -+} CSMI_SAS_PCI_BUS_ADDRESS, -+ *PCSMI_SAS_PCI_BUS_ADDRESS; -+ -+typedef union _CSMI_SAS_IO_BUS_ADDRESS { -+ CSMI_SAS_PCI_BUS_ADDRESS PciAddress; -+ __u8 bReserved[32]; -+} CSMI_SAS_IO_BUS_ADDRESS, -+ *PCSMI_SAS_IO_BUS_ADDRESS; -+ -+typedef struct _CSMI_SAS_CNTLR_CONFIG { -+ __u32 uBaseIoAddress; -+ struct { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } BaseMemoryAddress; -+ __u32 uBoardID; -+ __u16 usSlotNumber; -+ __u8 bControllerClass; -+ __u8 bIoBusType; -+ CSMI_SAS_IO_BUS_ADDRESS BusAddress; -+ __u8 szSerialNumber[81]; -+ __u16 usMajorRevision; -+ __u16 usMinorRevision; -+ __u16 usBuildRevision; -+ __u16 usReleaseRevision; -+ __u16 usBIOSMajorRevision; -+ __u16 usBIOSMinorRevision; -+ __u16 usBIOSBuildRevision; -+ __u16 usBIOSReleaseRevision; -+ __u32 uControllerFlags; -+ __u16 usRromMajorRevision; -+ __u16 usRromMinorRevision; -+ __u16 usRromBuildRevision; -+ __u16 usRromReleaseRevision; -+ __u16 usRromBIOSMajorRevision; -+ __u16 usRromBIOSMinorRevision; -+ __u16 usRromBIOSBuildRevision; -+ __u16 usRromBIOSReleaseRevision; -+ __u8 bReserved[7]; -+} CSMI_SAS_CNTLR_CONFIG, -+ *PCSMI_SAS_CNTLR_CONFIG; -+ -+typedef struct _CSMI_SAS_CNTLR_CONFIG_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_CNTLR_CONFIG Configuration; -+} CSMI_SAS_CNTLR_CONFIG_BUFFER, -+ *PCSMI_SAS_CNTLR_CONFIG_BUFFER; -+ -+// CC_CSMI_SAS_CNTLR_STATUS -+ -+typedef struct _CSMI_SAS_CNTLR_STATUS { -+ __u32 uStatus; -+ __u32 uOfflineReason; -+ __u8 bReserved[28]; -+} CSMI_SAS_CNTLR_STATUS, -+ *PCSMI_SAS_CNTLR_STATUS; -+ -+typedef struct _CSMI_SAS_CNTLR_STATUS_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_CNTLR_STATUS Status; -+} CSMI_SAS_CNTLR_STATUS_BUFFER, -+ *PCSMI_SAS_CNTLR_STATUS_BUFFER; -+ -+// CC_CSMI_SAS_FIRMWARE_DOWNLOAD -+ -+typedef struct _CSMI_SAS_FIRMWARE_DOWNLOAD { -+ __u32 uBufferLength; -+ __u32 uDownloadFlags; -+ __u8 bReserved[32]; -+ __u16 usStatus; -+ __u16 usSeverity; -+} CSMI_SAS_FIRMWARE_DOWNLOAD, -+ *PCSMI_SAS_FIRMWARE_DOWNLOAD; -+ -+typedef struct _CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_FIRMWARE_DOWNLOAD Information; -+ __u8 bDataBuffer[1]; -+} CSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER, -+ *PCSMI_SAS_FIRMWARE_DOWNLOAD_BUFFER; -+ -+// CC_CSMI_SAS_RAID_INFO -+ -+typedef struct _CSMI_SAS_RAID_INFO { -+ __u32 uNumRaidSets; -+ __u32 uMaxDrivesPerSet; -+ __u32 uMaxRaidSets; -+ __u8 bMaxRaidTypes; -+ __u8 bReservedByteFields[7]; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulMinRaidSetBlocks; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulMaxRaidSetBlocks; -+ __u32 uMaxPhysicalDrives; -+ __u32 uMaxExtents; -+ __u32 uMaxModules; -+ __u32 uMaxTransformationMemory; -+ __u32 uChangeCount; -+ __u8 bReserved[44]; -+} CSMI_SAS_RAID_INFO, -+ *PCSMI_SAS_RAID_INFO; -+ -+typedef struct _CSMI_SAS_RAID_INFO_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_RAID_INFO Information; -+} CSMI_SAS_RAID_INFO_BUFFER, -+ *PCSMI_SAS_RAID_INFO_BUFFER; -+ -+// CC_CSMI_SAS_GET_RAID_CONFIG -+ -+typedef struct _CSMI_SAS_RAID_DRIVES { -+ __u8 bModel[40]; -+ __u8 bFirmware[8]; -+ __u8 bSerialNumber[40]; -+ __u8 bSASAddress[8]; -+ __u8 bSASLun[8]; -+ __u8 bDriveStatus; -+ __u8 bDriveUsage; -+ __u16 usBlockSize; -+ __u8 bDriveType; -+ __u8 bReserved[15]; -+ __u32 uDriveIndex; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulTotalUserBlocks; -+} CSMI_SAS_RAID_DRIVES, -+ *PCSMI_SAS_RAID_DRIVES; -+ -+typedef struct _CSMI_SAS_RAID_DEVICE_ID { -+ __u8 bDeviceIdentificationVPDPage[1]; -+} CSMI_SAS_RAID_DEVICE_ID, -+ *PCSMI_SAS_RAID_DEVICE_ID; -+ -+typedef struct _CSMI_SAS_RAID_SET_ADDITIONAL_DATA { -+ __u8 bLabel[16]; -+ __u8 bRaidSetLun[8]; -+ __u8 bWriteProtection; -+ __u8 bCacheSetting; -+ __u8 bCacheRatio; -+ __u16 usBlockSize; -+ __u8 bReservedBytes[11]; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulRaidSetExtentOffset; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulRaidSetBlocks; -+ __u32 uStripeSizeInBlocks; -+ __u32 uSectorsPerTrack; -+ __u8 bApplicationScratchPad[16]; -+ __u32 uNumberOfHeads; -+ __u32 uNumberOfTracks; -+ __u8 bReserved[24]; -+} CSMI_SAS_RAID_SET_ADDITIONAL_DATA, -+ *PCSMI_SAS_RAID_SET_ADDITIONAL_DATA; -+ -+typedef struct _CSMI_SAS_RAID_CONFIG { -+ __u32 uRaidSetIndex; -+ __u32 uCapacity; -+ __u32 uStripeSize; -+ __u8 bRaidType; -+ __u8 bStatus; -+ __u8 bInformation; -+ __u8 bDriveCount; -+ __u8 bDataType; -+ __u8 bReserved[11]; -+ __u32 uFailureCode; -+ __u32 uChangeCount; -+ union { -+ CSMI_SAS_RAID_DRIVES Drives[1]; -+ CSMI_SAS_RAID_DEVICE_ID DeviceId[1]; -+ CSMI_SAS_RAID_SET_ADDITIONAL_DATA Data[1]; -+ }; -+} CSMI_SAS_RAID_CONFIG, -+ *PCSMI_SAS_RAID_CONFIG; -+ -+typedef struct _CSMI_SAS_RAID_CONFIG_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_RAID_CONFIG Configuration; -+} CSMI_SAS_RAID_CONFIG_BUFFER, -+ *PCSMI_SAS_RAID_CONFIG_BUFFER; -+ -+// CC_CSMI_SAS_GET_RAID_FEATURES -+ -+typedef struct _CSMI_SAS_RAID_TYPE_DESCRIPTION { -+ __u8 bRaidType; -+ __u8 bReservedBytes[7]; -+ __u32 uSupportedStripeSizeMap; -+ __u8 bReserved[24]; -+} CSMI_SAS_RAID_TYPE_DESCRIPTION, -+ *PCSMI_SAS_RAID_TYPE_DESCRIPTION; -+ -+typedef struct _CSMI_SAS_RAID_FEATURES { -+ __u32 uFeatures; -+ __u8 bReservedFeatures[32]; -+ __u8 bDefaultTransformPriority; -+ __u8 bTransformPriority; -+ __u8 bDefaultRebuildPriority; -+ __u8 bRebuildPriority; -+ __u8 bDefaultSurfaceScanPriority; -+ __u8 bSurfaceScanPriority; -+ __u16 usReserved; -+ __u32 uRaidSetTransformationRules; -+ __u32 uReserved[11]; -+ CSMI_SAS_RAID_TYPE_DESCRIPTION RaidType[24]; -+ __u8 bCacheRatiosSupported[104]; -+ __u32 uChangeCount; -+ __u32 uFailureCode; -+ __u8 bReserved[120]; -+} CSMI_SAS_RAID_FEATURES, -+ *PCSMI_SAS_RAID_FEATURES; -+ -+typedef struct _CSMI_SAS_RAID_FEATURES_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_RAID_FEATURES Information; -+} CSMI_SAS_RAID_FEATURES_BUFFER, -+ *PCSMI_SAS_RAID_FEATURES_BUFFER; -+ -+// CC_CSMI_SAS_SET_RAID_CONTROL -+ -+typedef struct _CSMI_SAS_RAID_CONTROL { -+ __u8 bTransformPriority; -+ __u8 bRebuildPriority; -+ __u8 bCacheRatioFlag; -+ __u8 bCacheRatio; -+ __u8 bSurfaceScanPriority; -+ __u8 bReservedBytes[15]; -+ __u8 bClearConfiguration[8]; -+ __u32 uChangeCount; -+ __u8 bReserved[88]; -+ __u32 uFailureCode; -+ __u8 bFailureDescription[80]; -+} CSMI_SAS_RAID_CONTROL, -+ *PCSMI_SAS_RAID_CONTROL; -+ -+typedef struct _CSMI_SAS_RAID_CONTROL_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_RAID_CONTROL Information; -+} CSMI_SAS_RAID_CONTROL_BUFFER, -+ *PCSMI_SAS_RAID_CONTROL_BUFFER; -+ -+// CC_CSMI_SAS_GET_RAID_ELEMENT -+ -+typedef struct _CSMI_SAS_DRIVE_EXTENT_INFO { -+ __u32 uDriveIndex; -+ __u8 bExtentType; -+ __u8 bReservedBytes[7]; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulExtentOffset; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulExtentBlocks; -+ __u32 uRaidSetIndex; -+ __u8 bReserved[96]; -+} CSMI_SAS_DRIVE_EXTENT_INFO, -+ *PCSMI_SAS_DRIVE_EXTENT_INFO; -+ -+typedef struct _CSMI_SAS_RAID_MODULE_INFO { -+ __u8 bReserved[128]; -+} CSMI_SAS_RAID_MODULE_INFO, -+ *PCSMI_SAS_RAID_MODULE_INFO; -+ -+typedef struct _CSMI_SAS_DRIVE_LOCATION { -+ __u8 bConnector[16]; -+ __u8 bBoxName[16]; -+ __u32 uBay; -+ __u8 bReservedBytes[4]; -+ __u8 bAttachedSASAddress[8]; -+ __u8 bAttachedPhyIdentifier; -+ __u8 bReserved[79]; -+} CSMI_SAS_DRIVE_LOCATION, -+ *PCSMI_SAS_DRIVE_LOCATION; -+ -+typedef struct _CSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA { -+ __u8 bNegotiatedLinkRate[2]; -+ __u8 bReserved[126]; -+} CSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA, -+ *PCSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA; -+ -+typedef struct _CSMI_SAS_DRIVE_INFO { -+ CSMI_SAS_RAID_DRIVES Device; -+ CSMI_SAS_RAID_DRIVES_ADDITIONAL_DATA Data; -+ CSMI_SAS_DRIVE_LOCATION Location; -+ __u8 bReserved[16]; -+} CSMI_SAS_DRIVE_INFO, -+ *PCSMI_SAS_DRIVE_INFO; -+ -+typedef struct _CSMI_SAS_RAID_ELEMENT { -+ __u32 uEnumerationType; -+ __u32 uElementIndex; -+ __u32 uNumElements; -+ __u32 uChangeCount; -+ __u32 uSubElementIndex; -+ __u8 bReserved[32]; -+ __u32 uFailureCode; -+ __u8 bFailureDescription[80]; -+ union { -+ CSMI_SAS_DRIVE_INFO Drive; -+ CSMI_SAS_RAID_MODULE_INFO Module; -+ CSMI_SAS_DRIVE_EXTENT_INFO Extent; -+ } Element; -+} CSMI_SAS_RAID_ELEMENT, -+ *PCSMI_SAS_RAID_ELEMENT; -+ -+typedef struct _CSMI_SAS_RAID_ELEMENT_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_RAID_ELEMENT Information; -+} CSMI_SAS_RAID_ELEMENT_BUFFER, -+ *PCSMI_SAS_RAID_ELEMENT_BUFFER; -+ -+// CC_CSMI_SAS_SET_RAID_OPERATION -+ -+typedef struct _CSMI_SAS_RAID_SET_LIST { -+ __u32 uRaidSetIndex; -+ __u8 bExistingLun[8]; -+ __u8 bNewLun[8]; -+ __u8 bReserved[12]; -+} CSMI_SAS_RAID_SET_LIST, -+ *PCSMI_SAS_RAID_SET_LIST; -+ -+typedef struct _CSMI_SAS_RAID_SET_DRIVE_LIST { -+ __u32 uDriveIndex; -+ __u8 bDriveUsage; -+ __u8 bReserved[27]; -+} CSMI_SAS_RAID_SET_DRIVE_LIST, -+ *PCSMI_SAS_RAID_SET_DRIVE_LIST; -+ -+typedef struct _CSMI_SAS_RAID_SET_SPARE_INFO { -+ __u32 uRaidSetIndex; -+ __u32 uDriveCount; -+ __u8 bApplicationScratchPad[16]; -+ __u8 bReserved[104]; -+} CSMI_SAS_RAID_SET_SPARE_INFO, -+ *PCSMI_SAS_RAID_SET_SPARE_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_ONLINE_STATE_INFO { -+ __u32 uRaidSetIndex; -+ __u8 bOnlineState; -+ __u8 bReserved[123]; -+} CSMI_SAS_RAID_SET_ONLINE_STATE_INFO, -+ *PCSMI_SAS_RAID_SET_ONLINE_STATE_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_CACHE_INFO { -+ __u32 uRaidSetIndex; -+ __u8 bCacheSetting; -+ __u8 bCacheRatioFlag; -+ __u8 bCacheRatio; -+ __u8 bReserved[121]; -+} CSMI_SAS_RAID_SET_CACHE_INFO, -+ *PCSMI_SAS_RAID_SET_CACHE_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_WRITE_PROTECT_INFO { -+ __u32 uRaidSetIndex; -+ __u8 bWriteProtectSetting; -+ __u8 bReserved[123]; -+} CSMI_SAS_RAID_SET_WRITE_PROTECT_INFO, -+ *PCSMI_SAS_RAID_SET_WRITE_PROTECT_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_DELETE_INFO { -+ __u32 uRaidSetIndex; -+ __u8 bReserved[124]; -+} CSMI_SAS_RAID_SET_DELETE_INFO, -+ *PCSMI_SAS_RAID_SET_DELETE_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_MODIFY_INFO { -+ __u8 bRaidType; -+ __u8 bReservedBytes[7]; -+ __u32 uStripeSize; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulRaidSetBlocks; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulRaidSetExtentOffset; -+ __u32 uDriveCount; -+ __u8 bReserved[96]; -+} CSMI_SAS_RAID_SET_MODIFY_INFO, -+ *PCSMI_SAS_RAID_SET_MODIFY_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_TRANSFORM_INFO { -+ __u8 bTransformType; -+ __u8 bReservedBytes[3]; -+ __u32 uRaidSetIndex; -+ __u8 bRaidType; -+ __u8 bReservedBytes2[11]; -+ __u32 uAdditionalRaidSetIndex; -+ __u32 uRaidSetCount; -+ __u8 bApplicationScratchPad[16]; -+ CSMI_SAS_RAID_SET_MODIFY_INFO Modify; -+ __u8 bReserved[80]; -+} CSMI_SAS_RAID_SET_TRANSFORM_INFO, -+ *PCSMI_SAS_RAID_SET_TRANSFORM_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_LABEL_INFO { -+ __u32 uRaidSetIndex; -+ __u8 bLabel[16]; -+ __u8 bReserved[108]; -+} CSMI_SAS_RAID_SET_LABEL_INFO, -+ *PCSMI_SAS_RAID_SET_LABEL_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_CREATE_INFO { -+ __u8 bRaidType; -+ __u8 bReservedBytes[7]; -+ __u32 uStripeSize; -+ __u32 uTrackSectorCount; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulRaidSetBlocks; -+ struct -+ { -+ __u32 uLowPart; -+ __u32 uHighPart; -+ } ulRaidSetExtentOffset; -+ __u32 uDriveCount; -+ __u8 bLabel[16]; -+ __u32 uRaidSetIndex; -+ __u8 bApplicationScratchPad[16]; -+ __u32 uNumberOfHeads; -+ __u32 uNumberOfTracks; -+ __u8 bReserved[48]; -+} CSMI_SAS_RAID_SET_CREATE_INFO, -+ *PCSMI_SAS_RAID_SET_CREATE_INFO; -+ -+typedef struct _CSMI_SAS_RAID_SET_OPERATION { -+ __u32 uOperationType; -+ __u32 uChangeCount; -+ __u32 uFailureCode; -+ __u8 bFailureDescription[80]; -+ __u8 bReserved[28]; -+ union { -+ CSMI_SAS_RAID_SET_CREATE_INFO Create; -+ CSMI_SAS_RAID_SET_LABEL_INFO Label; -+ CSMI_SAS_RAID_SET_TRANSFORM_INFO Transform; -+ CSMI_SAS_RAID_SET_DELETE_INFO Delete; -+ CSMI_SAS_RAID_SET_WRITE_PROTECT_INFO Protect; -+ CSMI_SAS_RAID_SET_CACHE_INFO Cache; -+ CSMI_SAS_RAID_SET_ONLINE_STATE_INFO State; -+ CSMI_SAS_RAID_SET_SPARE_INFO Spare; -+ } Operation; -+ union { -+ CSMI_SAS_RAID_SET_DRIVE_LIST DriveList[1]; -+ CSMI_SAS_RAID_SET_LIST RaidSetList[1]; -+ } Parameters; -+} CSMI_SAS_RAID_SET_OPERATION, -+ *PCSMI_SAS_RAID_SET_OPERATION; -+ -+typedef struct _CSMI_SAS_RAID_SET_OPERATION_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_RAID_SET_OPERATION Information; -+} CSMI_SAS_RAID_SET_OPERATION_BUFFER, -+ *PCSMI_SAS_RAID_SET_OPERATION_BUFFER; -+ -+/* * * * * * * * * * SAS HBA Class Structures * * * * * * * * * */ -+ -+// CC_CSMI_SAS_GET_PHY_INFO -+ -+typedef struct _CSMI_SAS_IDENTIFY { -+ __u8 bDeviceType; -+ __u8 bRestricted; -+ __u8 bInitiatorPortProtocol; -+ __u8 bTargetPortProtocol; -+ __u8 bRestricted2[8]; -+ __u8 bSASAddress[8]; -+ __u8 bPhyIdentifier; -+ __u8 bSignalClass; -+ __u8 bReserved[6]; -+} CSMI_SAS_IDENTIFY, -+ *PCSMI_SAS_IDENTIFY; -+ -+typedef struct _CSMI_SAS_PHY_ENTITY { -+ CSMI_SAS_IDENTIFY Identify; -+ __u8 bPortIdentifier; -+ __u8 bNegotiatedLinkRate; -+ __u8 bMinimumLinkRate; -+ __u8 bMaximumLinkRate; -+ __u8 bPhyChangeCount; -+ __u8 bAutoDiscover; -+ __u8 bPhyFeatures; -+ __u8 bReserved; -+ CSMI_SAS_IDENTIFY Attached; -+} CSMI_SAS_PHY_ENTITY, -+ *PCSMI_SAS_PHY_ENTITY; -+ -+typedef struct _CSMI_SAS_PHY_INFO { -+ __u8 bNumberOfPhys; -+ __u8 bReserved[3]; -+ CSMI_SAS_PHY_ENTITY Phy[32]; -+} CSMI_SAS_PHY_INFO, -+ *PCSMI_SAS_PHY_INFO; -+ -+typedef struct _CSMI_SAS_PHY_INFO_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_PHY_INFO Information; -+} CSMI_SAS_PHY_INFO_BUFFER, -+ *PCSMI_SAS_PHY_INFO_BUFFER; -+ -+// CC_CSMI_SAS_SET_PHY_INFO -+ -+typedef struct _CSMI_SAS_SET_PHY_INFO { -+ __u8 bPhyIdentifier; -+ __u8 bNegotiatedLinkRate; -+ __u8 bProgrammedMinimumLinkRate; -+ __u8 bProgrammedMaximumLinkRate; -+ __u8 bSignalClass; -+ __u8 bReserved[3]; -+} CSMI_SAS_SET_PHY_INFO, -+ *PCSMI_SAS_SET_PHY_INFO; -+ -+typedef struct _CSMI_SAS_SET_PHY_INFO_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_SET_PHY_INFO Information; -+} CSMI_SAS_SET_PHY_INFO_BUFFER, -+ *PCSMI_SAS_SET_PHY_INFO_BUFFER; -+ -+// CC_CSMI_SAS_GET_LINK_ERRORS -+ -+typedef struct _CSMI_SAS_LINK_ERRORS { -+ __u8 bPhyIdentifier; -+ __u8 bResetCounts; -+ __u8 bReserved[2]; -+ __u32 uInvalidDwordCount; -+ __u32 uRunningDisparityErrorCount; -+ __u32 uLossOfDwordSyncCount; -+ __u32 uPhyResetProblemCount; -+} CSMI_SAS_LINK_ERRORS, -+ *PCSMI_SAS_LINK_ERRORS; -+ -+typedef struct _CSMI_SAS_LINK_ERRORS_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_LINK_ERRORS Information; -+} CSMI_SAS_LINK_ERRORS_BUFFER, -+ *PCSMI_SAS_LINK_ERRORS_BUFFER; -+ -+// CC_CSMI_SAS_SMP_PASSTHRU -+ -+typedef struct _CSMI_SAS_SMP_REQUEST { -+ __u8 bFrameType; -+ __u8 bFunction; -+ __u8 bReserved[2]; -+ __u8 bAdditionalRequestBytes[1016]; -+} CSMI_SAS_SMP_REQUEST, -+ *PCSMI_SAS_SMP_REQUEST; -+ -+typedef struct _CSMI_SAS_SMP_RESPONSE { -+ __u8 bFrameType; -+ __u8 bFunction; -+ __u8 bFunctionResult; -+ __u8 bReserved; -+ __u8 bAdditionalResponseBytes[1016]; -+} CSMI_SAS_SMP_RESPONSE, -+ *PCSMI_SAS_SMP_RESPONSE; -+ -+typedef struct _CSMI_SAS_SMP_PASSTHRU { -+ __u8 bPhyIdentifier; -+ __u8 bPortIdentifier; -+ __u8 bConnectionRate; -+ __u8 bReserved; -+ __u8 bDestinationSASAddress[8]; -+ __u32 uRequestLength; -+ CSMI_SAS_SMP_REQUEST Request; -+ __u8 bConnectionStatus; -+ __u8 bReserved2[3]; -+ __u32 uResponseBytes; -+ CSMI_SAS_SMP_RESPONSE Response; -+} CSMI_SAS_SMP_PASSTHRU, -+ *PCSMI_SAS_SMP_PASSTHRU; -+ -+typedef struct _CSMI_SAS_SMP_PASSTHRU_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_SMP_PASSTHRU Parameters; -+} CSMI_SAS_SMP_PASSTHRU_BUFFER, -+ *PCSMI_SAS_SMP_PASSTHRU_BUFFER; -+ -+// CC_CSMI_SAS_SSP_PASSTHRU -+ -+typedef struct _CSMI_SAS_SSP_PASSTHRU { -+ __u8 bPhyIdentifier; -+ __u8 bPortIdentifier; -+ __u8 bConnectionRate; -+ __u8 bReserved; -+ __u8 bDestinationSASAddress[8]; -+ __u8 bLun[8]; -+ __u8 bCDBLength; -+ __u8 bAdditionalCDBLength; -+ __u8 bReserved2[2]; -+ __u8 bCDB[16]; -+ __u32 uFlags; -+ __u8 bAdditionalCDB[24]; -+ __u32 uDataLength; -+} CSMI_SAS_SSP_PASSTHRU, -+ *PCSMI_SAS_SSP_PASSTHRU; -+ -+typedef struct _CSMI_SAS_SSP_PASSTHRU_STATUS { -+ __u8 bConnectionStatus; -+ __u8 bSSPStatus; -+ __u8 bReserved[2]; -+ __u8 bDataPresent; -+ __u8 bStatus; -+ __u8 bResponseLength[2]; -+ __u8 bResponse[256]; -+ __u32 uDataBytes; -+} CSMI_SAS_SSP_PASSTHRU_STATUS, -+ *PCSMI_SAS_SSP_PASSTHRU_STATUS; -+ -+typedef struct _CSMI_SAS_SSP_PASSTHRU_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_SSP_PASSTHRU Parameters; -+ CSMI_SAS_SSP_PASSTHRU_STATUS Status; -+ __u8 bDataBuffer[1]; -+} CSMI_SAS_SSP_PASSTHRU_BUFFER, -+ *PCSMI_SAS_SSP_PASSTHRU_BUFFER; -+ -+// CC_CSMI_SAS_STP_PASSTHRU -+ -+typedef struct _CSMI_SAS_STP_PASSTHRU { -+ __u8 bPhyIdentifier; -+ __u8 bPortIdentifier; -+ __u8 bConnectionRate; -+ __u8 bReserved; -+ __u8 bDestinationSASAddress[8]; -+ __u8 bReserved2[4]; -+ __u8 bCommandFIS[20]; -+ __u32 uFlags; -+ __u32 uDataLength; -+} CSMI_SAS_STP_PASSTHRU, -+ *PCSMI_SAS_STP_PASSTHRU; -+ -+typedef struct _CSMI_SAS_STP_PASSTHRU_STATUS { -+ __u8 bConnectionStatus; -+ __u8 bReserved[3]; -+ __u8 bStatusFIS[20]; -+ __u32 uSCR[16]; -+ __u32 uDataBytes; -+} CSMI_SAS_STP_PASSTHRU_STATUS, -+ *PCSMI_SAS_STP_PASSTHRU_STATUS; -+ -+typedef struct _CSMI_SAS_STP_PASSTHRU_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_STP_PASSTHRU Parameters; -+ CSMI_SAS_STP_PASSTHRU_STATUS Status; -+ __u8 bDataBuffer[1]; -+} CSMI_SAS_STP_PASSTHRU_BUFFER, -+ *PCSMI_SAS_STP_PASSTHRU_BUFFER; -+ -+// CC_CSMI_SAS_GET_SATA_SIGNATURE -+ -+typedef struct _CSMI_SAS_SATA_SIGNATURE { -+ __u8 bPhyIdentifier; -+ __u8 bReserved[3]; -+ __u8 bSignatureFIS[20]; -+} CSMI_SAS_SATA_SIGNATURE, -+ *PCSMI_SAS_SATA_SIGNATURE; -+ -+typedef struct _CSMI_SAS_SATA_SIGNATURE_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_SATA_SIGNATURE Signature; -+} CSMI_SAS_SATA_SIGNATURE_BUFFER, -+ *PCSMI_SAS_SATA_SIGNATURE_BUFFER; -+ -+// CC_CSMI_SAS_GET_SCSI_ADDRESS -+ -+typedef struct _CSMI_SAS_GET_SCSI_ADDRESS_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ __u8 bSASAddress[8]; -+ __u8 bSASLun[8]; -+ __u8 bHostIndex; -+ __u8 bPathId; -+ __u8 bTargetId; -+ __u8 bLun; -+} CSMI_SAS_GET_SCSI_ADDRESS_BUFFER, -+ *PCSMI_SAS_GET_SCSI_ADDRESS_BUFFER; -+ -+// CC_CSMI_SAS_GET_DEVICE_ADDRESS -+ -+typedef struct _CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ __u8 bHostIndex; -+ __u8 bPathId; -+ __u8 bTargetId; -+ __u8 bLun; -+ __u8 bSASAddress[8]; -+ __u8 bSASLun[8]; -+} CSMI_SAS_GET_DEVICE_ADDRESS_BUFFER, -+ *PCSMI_SAS_GET_DEVICE_ADDRESS_BUFFER; -+ -+// CC_CSMI_SAS_TASK_MANAGEMENT -+ -+typedef struct _CSMI_SAS_SSP_TASK_IU { -+ __u8 bHostIndex; -+ __u8 bPathId; -+ __u8 bTargetId; -+ __u8 bLun; -+ __u32 uFlags; -+ __u32 uQueueTag; -+ __u32 uReserved; -+ __u8 bTaskManagementFunction; -+ __u8 bReserved[7]; -+ __u32 uInformation; -+} CSMI_SAS_SSP_TASK_IU, -+ *PCSMI_SAS_SSP_TASK_IU; -+ -+typedef struct _CSMI_SAS_SSP_TASK_IU_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_SSP_TASK_IU Parameters; -+ CSMI_SAS_SSP_PASSTHRU_STATUS Status; -+} CSMI_SAS_SSP_TASK_IU_BUFFER, -+ *PCSMI_SAS_SSP_TASK_IU_BUFFER; -+ -+// CC_CSMI_SAS_GET_CONNECTOR_INFO -+ -+typedef struct _CSMI_SAS_GET_CONNECTOR_INFO { -+ __u32 uPinout; -+ __u8 bConnector[16]; -+ __u8 bLocation; -+ __u8 bReserved[15]; -+} CSMI_SAS_CONNECTOR_INFO, -+ *PCSMI_SAS_CONNECTOR_INFO; -+ -+typedef struct _CSMI_SAS_CONNECTOR_INFO_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ CSMI_SAS_CONNECTOR_INFO Reference[32]; -+} CSMI_SAS_CONNECTOR_INFO_BUFFER, -+ *PCSMI_SAS_CONNECTOR_INFO_BUFFER; -+ -+// CC_CSMI_SAS_GET_LOCATION -+ -+typedef struct _CSMI_SAS_LOCATION_IDENTIFIER { -+ __u32 bLocationFlags; -+ __u8 bSASAddress[8]; -+ __u8 bSASLun[8]; -+ __u8 bEnclosureIdentifier[8]; -+ __u8 bEnclosureName[32]; -+ __u8 bBayPrefix[32]; -+ __u8 bBayIdentifier; -+ __u8 bLocationState; -+ __u8 bReserved[2]; -+} CSMI_SAS_LOCATION_IDENTIFIER, -+ *PCSMI_SAS_LOCATION_IDENTIFIER; -+ -+typedef struct _CSMI_SAS_GET_LOCATION_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ __u8 bHostIndex; -+ __u8 bPathId; -+ __u8 bTargetId; -+ __u8 bLun; -+ __u8 bIdentify; -+ __u8 bNumberOfLocationIdentifiers; -+ __u8 bLengthOfLocationIdentifier; -+ CSMI_SAS_LOCATION_IDENTIFIER Location[1]; -+} CSMI_SAS_GET_LOCATION_BUFFER, -+ *PCSMI_SAS_GET_LOCATION_BUFFER; -+ -+// CC_CSMI_SAS_PHY_CONTROL -+ -+typedef struct _CSMI_SAS_CHARACTER { -+ __u8 bTypeFlags; -+ __u8 bValue; -+} CSMI_SAS_CHARACTER, -+ *PCSMI_SAS_CHARACTER; -+ -+typedef struct _CSMI_SAS_PHY_CONTROL { -+ __u8 bType; -+ __u8 bRate; -+ __u8 bReserved[6]; -+ __u32 uVendorUnique[8]; -+ __u32 uTransmitterFlags; -+ __i8 bTransmitAmplitude; -+ __i8 bTransmitterPreemphasis; -+ __i8 bTransmitterSlewRate; -+ __i8 bTransmitterReserved[13]; -+ __u8 bTransmitterVendorUnique[64]; -+ __u32 uReceiverFlags; -+ __i8 bReceiverThreshold; -+ __i8 bReceiverEqualizationGain; -+ __i8 bReceiverReserved[14]; -+ __u8 bReceiverVendorUnique[64]; -+ __u32 uPatternFlags; -+ __u8 bFixedPattern; -+ __u8 bUserPatternLength; -+ __u8 bPatternReserved[6]; -+ CSMI_SAS_CHARACTER UserPatternBuffer[16]; -+} CSMI_SAS_PHY_CONTROL, -+ *PCSMI_SAS_PHY_CONTROL; -+ -+typedef struct _CSMI_SAS_PHY_CONTROL_BUFFER { -+ IOCTL_HEADER IoctlHeader; -+ __u32 uFunction; -+ __u8 bPhyIdentifier; -+ __u16 usLengthOfControl; -+ __u8 bNumberOfControls; -+ __u8 bReserved[4]; -+ __u32 uLinkFlags; -+ __u8 bSpinupRate; -+ __u8 bLinkReserved[7]; -+ __u32 uVendorUnique[8]; -+ CSMI_SAS_PHY_CONTROL Control[1]; -+} CSMI_SAS_PHY_CONTROL_BUFFER, -+ *PCSMI_SAS_PHY_CONTROL_BUFFER; -+ -+//EDM #pragma CSMI_SAS_END_PACK -+#pragma pack() -+ -+#endif // _CSMI_SAS_H_ ---- a/drivers/message/fusion/lsi/mpi.h -+++ b/drivers/message/fusion/lsi/mpi.h -@@ -6,7 +6,7 @@ - * Title: MPI Message independent structures and definitions - * Creation Date: July 27, 2000 - * -- * mpi.h Version: 01.05.16 -+ * mpi.h Version: 01.05.17 - * - * Version History - * --------------- -@@ -82,6 +82,7 @@ - * 08-07-07 01.05.14 Bumped MPI_HEADER_VERSION_UNIT. - * 01-15-08 01.05.15 Bumped MPI_HEADER_VERSION_UNIT. - * 03-28-08 01.05.16 Bumped MPI_HEADER_VERSION_UNIT. -+ * 07-11-08 01.05.17 Bumped MPI_HEADER_VERSION_UNIT. - * -------------------------------------------------------------------------- - */ - -@@ -112,7 +113,7 @@ - /* Note: The major versions of 0xe0 through 0xff are reserved */ - - /* versioning for this MPI header set */ --#define MPI_HEADER_VERSION_UNIT (0x13) -+#define MPI_HEADER_VERSION_UNIT (0x14) - #define MPI_HEADER_VERSION_DEV (0x00) - #define MPI_HEADER_VERSION_UNIT_MASK (0xFF00) - #define MPI_HEADER_VERSION_UNIT_SHIFT (8) ---- a/drivers/message/fusion/lsi/mpi_cnfg.h -+++ b/drivers/message/fusion/lsi/mpi_cnfg.h -@@ -6,7 +6,7 @@ - * Title: MPI Config message, structures, and Pages - * Creation Date: July 27, 2000 - * -- * mpi_cnfg.h Version: 01.05.18 -+ * mpi_cnfg.h Version: 01.05.19 - * - * Version History - * --------------- -@@ -322,6 +322,14 @@ - * 03-28-08 01.05.18 Defined new bits in Manufacturing Page 4 ExtFlags field - * to control coercion size and the mixing of SAS and SATA - * SSD drives. -+ * 07-11-08 01.05.19 Added defines MPI_MANPAGE4_EXTFLAGS_RAID0_SINGLE_DRIVE -+ * and MPI_MANPAGE4_EXTFLAGS_SSD_SCRUB_DISABLE for ExtFlags -+ * field of Manufacturing Page 4. -+ * Added defines for a new bit in BIOS Page 1 BiosOptions -+ * field to control adapter scan order. -+ * Added BootDeviceWaitTime field to SAS IO Unit Page 2. -+ * Added MPI_SAS_PHY0_PHYINFO_PHY_VACANT for use in PhyInfo -+ * field of SAS Expander Page 1. - * -------------------------------------------------------------------------- - */ - -@@ -700,6 +708,8 @@ typedef struct _CONFIG_PAGE_MANUFACTURIN - #define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01) - - /* defines for the ExtFlags field */ -+#define MPI_MANPAGE4_EXTFLAGS_RAID0_SINGLE_DRIVE (0x0400) -+#define MPI_MANPAGE4_EXTFLAGS_SSD_SCRUB_DISABLE (0x0200) - #define MPI_MANPAGE4_EXTFLAGS_MASK_COERCION_SIZE (0x0180) - #define MPI_MANPAGE4_EXTFLAGS_SHIFT_COERCION_SIZE (7) - #define MPI_MANPAGE4_EXTFLAGS_1GB_COERCION_SIZE (0) -@@ -1219,6 +1229,10 @@ typedef struct _CONFIG_PAGE_BIOS_1 - #define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400) - #define MPI_BIOSPAGE1_OPTIONS_FC_ENABLE (0x00000200) - #define MPI_BIOSPAGE1_OPTIONS_SAS_ENABLE (0x00000100) -+ -+#define MPI_BIOSPAGE1_OPTIONS_SCAN_HIGH_TO_LOW (0x00000002) -+#define MPI_BIOSPAGE1_OPTIONS_SCAN_LOW_TO_HIGH (0x00000000) -+ - #define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) - - /* values for the IOCSettings field */ -@@ -2712,7 +2726,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_ - { - CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ - U8 NumDevsPerEnclosure; /* 08h */ -- U8 Reserved1; /* 09h */ -+ U8 BootDeviceWaitTime; /* 09h */ - U16 Reserved2; /* 0Ah */ - U16 MaxPersistentIDs; /* 0Ch */ - U16 NumPersistentIDsUsed; /* 0Eh */ -@@ -2722,7 +2736,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_ - } CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2, - SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t; - --#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x06) -+#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x07) - - /* values for SAS IO Unit Page 2 Status field */ - #define MPI_SAS_IOUNIT2_STATUS_DEVICE_LIMIT_EXCEEDED (0x08) -@@ -2997,6 +3011,7 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0 - #define MPI_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) - - /* values for SAS PHY Page 0 PhyInfo field */ -+#define MPI_SAS_PHY0_PHYINFO_PHY_VACANT (0x80000000) - #define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000) - #define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000) - #define MPI_SAS_PHY0_PHYINFO_VIRTUAL_PHY (0x00001000) ---- a/drivers/message/fusion/lsi/mpi_history.txt -+++ b/drivers/message/fusion/lsi/mpi_history.txt -@@ -6,15 +6,15 @@ - Copyright (c) 2000-2008 LSI Corporation. - - --------------------------------------- -- Header Set Release Version: 01.05.19 -- Header Set Release Date: 03-28-08 -+ Header Set Release Version: 01.05.20 -+ Header Set Release Date: 07-11-08 - --------------------------------------- - - Filename Current version Prior version - ---------- --------------- ------------- -- mpi.h 01.05.16 01.05.15 -- mpi_ioc.h 01.05.16 01.05.15 -- mpi_cnfg.h 01.05.18 01.05.17 -+ mpi.h 01.05.17 01.05.16 -+ mpi_ioc.h 01.05.16 01.05.16 -+ mpi_cnfg.h 01.05.19 01.05.18 - mpi_init.h 01.05.09 01.05.09 - mpi_targ.h 01.05.06 01.05.06 - mpi_fc.h 01.05.01 01.05.01 -@@ -24,7 +24,7 @@ - mpi_inb.h 01.05.01 01.05.01 - mpi_sas.h 01.05.05 01.05.05 - mpi_type.h 01.05.02 01.05.02 -- mpi_history.txt 01.05.19 01.05.18 -+ mpi_history.txt 01.05.20 01.05.19 - - - * Date Version Description -@@ -99,6 +99,7 @@ mpi.h - * 08-07-07 01.05.14 Bumped MPI_HEADER_VERSION_UNIT. - * 01-15-08 01.05.15 Bumped MPI_HEADER_VERSION_UNIT. - * 03-28-08 01.05.16 Bumped MPI_HEADER_VERSION_UNIT. -+ * 07-11-08 01.05.17 Bumped MPI_HEADER_VERSION_UNIT. - * -------------------------------------------------------------------------- - - mpi_ioc.h -@@ -130,7 +131,7 @@ mpi_ioc.h - * 08-08-01 01.02.01 Original release for v1.2 work. - * New format for FWVersion and ProductId in - * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. -- * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and -+ * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and - * related structure and defines. - * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. - * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. -@@ -190,7 +191,7 @@ mpi_ioc.h - * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED. - * Added MaxInitiators field to PortFacts reply. - * Added SAS Device Status Change ReasonCode for -- * asynchronous notification. -+ * asynchronous notificaiton. - * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event - * data structure. - * Added new ImageType values for FWDownload and FWUpload -@@ -523,6 +524,14 @@ mpi_cnfg.h - * 03-28-08 01.05.18 Defined new bits in Manufacturing Page 4 ExtFlags field - * to control coercion size and the mixing of SAS and SATA - * SSD drives. -+ * 07-11-08 01.05.19 Added defines MPI_MANPAGE4_EXTFLAGS_RAID0_SINGLE_DRIVE -+ * and MPI_MANPAGE4_EXTFLAGS_SSD_SCRUB_DISABLE for ExtFlags -+ * field of Manufacturing Page 4. -+ * Added defines for a new bit in BIOS Page 1 BiosOptions -+ * field to control adapter scan order. -+ * Added BootDeviceWaitTime field to SAS IO Unit Page 2. -+ * Added MPI_SAS_PHY0_PHYINFO_PHY_VACANT for use in PhyInfo -+ * field of SAS Expander Page 1. - * -------------------------------------------------------------------------- - - mpi_init.h -@@ -623,7 +632,7 @@ mpi_fc.h - * 11-02-00 01.01.01 Original release for post 1.0 work - * 12-04-00 01.01.02 Added messages for Common Transport Send and - * Primitive Send. -- * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix -+ * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix - * and modified the FcPrimitiveSend flags. - * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger - * field. -@@ -743,20 +752,20 @@ mpi_type.h - - mpi_history.txt Parts list history - --Filename 01.05.19 01.05.18 01.05.17 01.05.16 01.05.15 ------------ -------- -------- -------- -------- -------- --mpi.h 01.05.16 01.05.15 01.05.14 01.05.13 01.05.12 --mpi_ioc.h 01.05.16 01.05.15 01.05.15 01.05.14 01.05.13 --mpi_cnfg.h 01.05.18 01.05.17 01.05.16 01.05.15 01.05.14 --mpi_init.h 01.05.09 01.05.09 01.05.09 01.05.09 01.05.09 --mpi_targ.h 01.05.06 01.05.06 01.05.06 01.05.06 01.05.06 --mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 --mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 --mpi_raid.h 01.05.05 01.05.05 01.05.04 01.05.03 01.05.03 --mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 --mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 --mpi_sas.h 01.05.05 01.05.05 01.05.04 01.05.04 01.05.04 --mpi_type.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 -+Filename 01.05.20 01.05.19 01.05.18 01.05.17 01.05.16 01.05.15 -+---------- -------- -------- -------- -------- -------- -------- -+mpi.h 01.05.17 01.05.16 01.05.15 01.05.14 01.05.13 01.05.12 -+mpi_ioc.h 01.05.16 01.05.16 01.05.15 01.05.15 01.05.14 01.05.13 -+mpi_cnfg.h 01.05.19 01.05.18 01.05.17 01.05.16 01.05.15 01.05.14 -+mpi_init.h 01.05.09 01.05.09 01.05.09 01.05.09 01.05.09 01.05.09 -+mpi_targ.h 01.05.06 01.05.06 01.05.06 01.05.06 01.05.06 01.05.06 -+mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 -+mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 -+mpi_raid.h 01.05.05 01.05.05 01.05.05 01.05.04 01.05.03 01.05.03 -+mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 -+mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 -+mpi_sas.h 01.05.05 01.05.05 01.05.05 01.05.04 01.05.04 01.05.04 -+mpi_type.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 - - Filename 01.05.14 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09 - ---------- -------- -------- -------- -------- -------- -------- ---- a/drivers/message/fusion/lsi/mpi_log_sas.h -+++ b/drivers/message/fusion/lsi/mpi_log_sas.h -@@ -161,11 +161,10 @@ - - #define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200) - #define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300) --#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) --/* Bits 0-3 encode Transport Status Register (offset 0x08) */ --/* Bit 0 is Status Bit 0: FrameXferErr */ --/* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */ --/* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */ -+#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) /* Bits 0-3 encode Transport Status Register (offset 0x08) */ -+ /* Bit 0 is Status Bit 0: FrameXferErr */ -+ /* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */ -+ /* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */ - - #define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500) - #define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600) -@@ -180,8 +179,7 @@ - #define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01) - #define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00) - #define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000) --#define PL_LOGINFO_SUB_CODE_BREAK_ON_SATA_CONNECTION (0x00002000) --/* not currently used in mainline */ -+#define PL_LOGINFO_SUB_CODE_BREAK_ON_SATA_CONNECTION (0x00002000) /* not currently used in mainline */ - #define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK (0x00003000) - #define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK_AIP (0x00004000) - #define PL_LOGINFO_SUB_CODE_BREAK_ON_INCOMPLETE_BREAK_RCVD (0x00005000) -@@ -309,6 +307,8 @@ - #define IR_LOGINFO_DEV_FW_UPDATE_ERR_PORT_IO_TIMEOUTS_REQUIRED (0x00010055) - /* Device Firmware Update: Unable to allocate memory for page */ - #define IR_LOGINFO_DEV_FW_UPDATE_ERR_ALLOC_CFG_PAGE (0x00010056) -+/* Device Firmware Update: */ -+//#define IR_LOGINFO_DEV_FW_UPDATE_ERR_ (0x00010054) - - - /****************************************************************************/ ---- a/drivers/message/fusion/lsi/mpi_type.h -+++ b/drivers/message/fusion/lsi/mpi_type.h -@@ -20,6 +20,7 @@ - * 08-08-01 01.02.01 Original release for v1.2 work. - * 05-11-04 01.03.01 Original release for MPI v1.3. - * 08-19-04 01.05.01 Original release for MPI v1.5. -+ * 08-30-05 01.05.02 Added PowerPC option to #ifdef's. - * -------------------------------------------------------------------------- - */ - -@@ -49,8 +50,18 @@ typedef signed short S16; - typedef unsigned short U16; - - --typedef int32_t S32; --typedef u_int32_t U32; -+#if defined(unix) || defined(__arm) || defined(ALPHA) || defined(__PPC__) || defined(__ppc) -+ -+ typedef signed int S32; -+ typedef unsigned int U32; -+ -+#else -+ -+ typedef signed long S32; -+ typedef unsigned long U32; -+ -+#endif -+ - - typedef struct _S64 - { ---- a/drivers/message/fusion/mptbase.c -+++ b/drivers/message/fusion/mptbase.c -@@ -58,6 +58,7 @@ - #include - #include /* needed for in_interrupt() proto */ - #include -+#include - #include - #ifdef CONFIG_MTRR - #include -@@ -100,12 +101,13 @@ static int mpt_channel_mapping; - module_param(mpt_channel_mapping, int, 0); - MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)"); - --static int mpt_debug_level; -+int mpt_debug_level; - static int mpt_set_debug_level(const char *val, struct kernel_param *kp); - module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int, - &mpt_debug_level, 0600); - MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h \ - - (default=0)"); -+EXPORT_SYMBOL(mpt_debug_level); - - int mpt_fwfault_debug; - EXPORT_SYMBOL(mpt_fwfault_debug); -@@ -126,7 +128,7 @@ static int mfcounter = 0; - * Public data... - */ - --static struct proc_dir_entry *mpt_proc_root_dir; -+struct proc_dir_entry *mpt_proc_root_dir; - - #define WHOINIT_UNKNOWN 0xAA - -@@ -144,7 +146,7 @@ static int MptDriverClass[MPT_MAX_PRO - static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS]; - /* Reset handler lookup table */ - static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; --static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; -+static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; - - - /* -@@ -157,7 +159,6 @@ static u8 last_drv_idx; - /* - * Forward protos... - */ --static irqreturn_t mpt_interrupt(int irq, void *bus_id); - static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, - MPT_FRAME_HDR *reply); - static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, -@@ -188,8 +189,8 @@ static int GetIoUnitPage2(MPT_ADAPTER *i - int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); - static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); - static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); --static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); --static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); -+static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); -+static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); - static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); - static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, - int sleepFlag); -@@ -220,11 +221,11 @@ static void mpt_inactive_raid_list_free( - static int __init fusion_init (void); - static void __exit fusion_exit (void); - --#define CHIPREG_READ32(addr) readl_relaxed(addr) -+#define CHIPREG_READ32(addr) readl_relaxed(addr) - #define CHIPREG_READ32_dmasync(addr) readl(addr) --#define CHIPREG_WRITE32(addr,val) writel(val, addr) -+#define CHIPREG_WRITE32(addr,val) writel(val, addr) - #define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr) --#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr) -+#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr) - - static void - pci_disable_io_access(struct pci_dev *pdev) -@@ -246,6 +247,15 @@ pci_enable_io_access(struct pci_dev *pde - pci_write_config_word(pdev, PCI_COMMAND, command_reg); - } - -+/** -+ * mpt_set_debug_level - global setting of the mpt_debug_level -+ * found via /sys/module/mptbase/parameters/mpt_debug_level -+ * @val: -+ * @kp: -+ * -+ * Returns -+ **/ -+ - static int mpt_set_debug_level(const char *val, struct kernel_param *kp) - { - int ret = param_set_int(val, kp); -@@ -492,6 +502,9 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa) - mpt_sas_log_info(ioc, log_info); - } - -+ /* TODO - add shost_attrs, or command line option, and -+ * extend this to SAS/FC -+ */ - if (ioc_stat & MPI_IOCSTATUS_MASK) - mpt_iocstatus_info(ioc, (u32)ioc_stat, mf); - -@@ -782,6 +795,8 @@ mpt_device_driver_register(struct mpt_pc - - /* call per pci device probe entry point */ - list_for_each_entry(ioc, &ioc_list, list) { -+ if (!pci_get_drvdata(ioc->pcidev)) -+ continue; - id = ioc->pcidev->driver ? - ioc->pcidev->driver->id_table : NULL; - if (dd_cbfunc->probe) -@@ -914,7 +929,8 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER - - DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf); - -- mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx]; -+ mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | -+ ioc->RequestNB[req_idx]; - dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d " - "RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, - ioc->RequestNB[req_idx])); -@@ -1023,7 +1039,8 @@ mpt_add_sge_64bit(void *pAddr, u32 flags - } - - /** -- * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround). -+ * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr -+ * (1078 workaround). - * @pAddr: virtual address for SGE - * @flagslength: SGE flags and data transfer length - * @dma_addr: Physical address -@@ -1140,7 +1157,7 @@ mpt_send_handshake_request(u8 cb_idx, MP - * is in proper (pre-alloc'd) request buffer range... - */ - ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req); -- if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) { -+ if (ii >= 0 && ii < ioc->req_depth) { - MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req; - mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii); - mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; -@@ -1582,6 +1599,7 @@ mpt_get_product_name(u16 vendor, u16 dev - * @ioc: Pointer to pointer to IOC adapter - * - **/ -+#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) - static int - mpt_mapresources(MPT_ADAPTER *ioc) - { -@@ -1591,9 +1609,9 @@ mpt_mapresources(MPT_ADAPTER *ioc) - unsigned long port; - u32 msize; - u32 psize; -- u8 revision; - int r = -ENODEV; - struct pci_dev *pdev; -+ struct sysinfo s; - - pdev = ioc->pcidev; - ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); -@@ -1608,22 +1626,21 @@ mpt_mapresources(MPT_ADAPTER *ioc) - return r; - } - -- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); -- - if (sizeof(dma_addr_t) > 4) { -- const uint64_t required_mask = dma_get_required_mask -- (&pdev->dev); -+ uint64_t required_mask; -+ -+ required_mask = dma_get_required_mask(&pdev->dev); -+ - if (required_mask > DMA_BIT_MASK(32) - && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) - && !pci_set_consistent_dma_mask(pdev, -- DMA_BIT_MASK(64))) { -+ DMA_BIT_MASK(64))) { - ioc->dma_mask = DMA_BIT_MASK(64); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", - ioc->name)); - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) -- && !pci_set_consistent_dma_mask(pdev, -- DMA_BIT_MASK(32))) { -+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { - ioc->dma_mask = DMA_BIT_MASK(32); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", -@@ -1635,8 +1652,7 @@ mpt_mapresources(MPT_ADAPTER *ioc) - } - } else { - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) -- && !pci_set_consistent_dma_mask(pdev, -- DMA_BIT_MASK(32))) { -+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { - ioc->dma_mask = DMA_BIT_MASK(32); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", -@@ -1648,6 +1664,11 @@ mpt_mapresources(MPT_ADAPTER *ioc) - } - } - -+ si_meminfo(&s); -+ printk(MYIOC_s_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, " -+ "total memory = %ld kB\n", -+ ioc->name, ioc->dma_mask == DMA_BIT_MASK(64) ? "64" : "32", -+ convert_to_kb(s.totalram)); - mem_phys = msize = 0; - port = psize = 0; - for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) { -@@ -1804,7 +1825,6 @@ mpt_attach(struct pci_dev *pdev, const s - /* Find lookup slot. */ - INIT_LIST_HEAD(&ioc->list); - -- - /* Initialize workqueue */ - INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); - -@@ -1841,14 +1861,14 @@ mpt_attach(struct pci_dev *pdev, const s - case MPI_MANUFACTPAGE_DEVICEID_FC929X: - if (revision < XL_929) { - /* 929X Chip Fix. Set Split transactions level -- * for PCIX. Set MOST bits to zero. -- */ -+ * for PCIX. Set MOST bits to zero. -+ */ - pci_read_config_byte(pdev, 0x6a, &pcixcmd); - pcixcmd &= 0x8F; - pci_write_config_byte(pdev, 0x6a, pcixcmd); - } else { - /* 929XL Chip Fix. Set MMRBC to 0x08. -- */ -+ */ - pci_read_config_byte(pdev, 0x6a, &pcixcmd); - pcixcmd |= 0x08; - pci_write_config_byte(pdev, 0x6a, pcixcmd); -@@ -1948,7 +1968,6 @@ mpt_attach(struct pci_dev *pdev, const s - iounmap(ioc->memmap); - if (r != -5) - pci_release_selected_regions(pdev, ioc->bars); -- - destroy_workqueue(ioc->reset_work_q); - ioc->reset_work_q = NULL; - -@@ -2000,7 +2019,7 @@ mpt_attach(struct pci_dev *pdev, const s - void - mpt_detach(struct pci_dev *pdev) - { -- MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - char pname[32]; - u8 cb_idx; - unsigned long flags; -@@ -2273,6 +2292,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u3 - /* hard_reset_done = 0 if a soft reset was performed - * and 1 if a hard reset was performed. - */ -+ if (!hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) { -+ /* (re)Enable alt-IOC! (reply interrupt, FreeQ) */ -+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": alt-ioc reply irq re-enabled\n", -+ ioc->alt_ioc->name)); -+ CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); -+ ioc->alt_ioc->active = 1; -+ reset_alt_ioc_active = 0; -+ } -+ - if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) { - if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) - alt_ioc_ready = 1; -@@ -2472,7 +2500,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u3 - /* - * Initalize link list for inactive raid volumes. - */ -- mutex_init(&ioc->raid_data.inactive_list_mutex); -+ init_MUTEX(&ioc->raid_data.inactive_list_mutex); - INIT_LIST_HEAD(&ioc->raid_data.inactive_list); - - switch (ioc->bus_type) { -@@ -2641,13 +2669,12 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) - if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) - printk(MYIOC_s_ERR_FMT "%s: IOC msg unit " - "reset failed to put ioc in ready state!\n", -- ioc->name, __func__); -+ ioc->name, __FUNCTION__); - } else - printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset " -- "failed!\n", ioc->name, __func__); -+ "failed!\n", ioc->name, __FUNCTION__); - } - -- - /* Disable adapter interrupts! */ - synchronize_irq(ioc->pcidev->irq); - CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); -@@ -2690,8 +2717,10 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) - mpt_inactive_raid_list_free(ioc); - kfree(ioc->raid_data.pIocPg2); - kfree(ioc->raid_data.pIocPg3); -+ kfree(ioc->raid_data.pIocPg6); - ioc->spi_data.nvram = NULL; - ioc->raid_data.pIocPg3 = NULL; -+ ioc->raid_data.pIocPg6 = NULL; - - if (ioc->spi_data.pIocPg4 != NULL) { - sz = ioc->spi_data.IocPg4Sz; -@@ -2882,6 +2911,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force - */ - if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { - statefault = 2; -+ ioc->is_fault = 1; - printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n", - ioc->name); - printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n", -@@ -3066,6 +3096,8 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepF - } - - facts->MsgVersion = le16_to_cpu(facts->MsgVersion); -+ if (facts->MsgVersion == MPI_VERSION_01_05) -+ facts->HeaderVersion = le16_to_cpu(facts->HeaderVersion); - facts->MsgContext = le32_to_cpu(facts->MsgContext); - facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions); - facts->IOCStatus = le16_to_cpu(facts->IOCStatus); -@@ -3301,7 +3333,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepF - if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { - // set MsgVersion and HeaderVersion host driver was built with - ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION); -- ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION); -+ ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION); - - if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) { - ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE; -@@ -3516,13 +3548,15 @@ mpt_do_upload(MPT_ADAPTER *ioc, int slee - u32 flagsLength; - int ii, sz, reply_sz; - int cmdStatus; -- int request_size; -+ int request_size; -+ - /* If the image size is 0, we are done. - */ -- if ((sz = ioc->facts.FWImageSize) == 0) -+ sz = ioc->facts.FWImageSize; -+ if (!sz) - return 0; - -- if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0) -+ if (mpt_alloc_fw_memory(ioc, sz) != 0) - return -ENOMEM; - - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n", -@@ -3557,7 +3591,7 @@ mpt_do_upload(MPT_ADAPTER *ioc, int slee - ioc->SGE_size; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload " - " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest, -- ioc->facts.FWImageSize, request_size)); -+ sz, request_size)); - DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); - - ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest, -@@ -3574,9 +3608,9 @@ mpt_do_upload(MPT_ADAPTER *ioc, int slee - int status; - status = le16_to_cpu(preply->IOCStatus) & - MPI_IOCSTATUS_MASK; -- if (status == MPI_IOCSTATUS_SUCCESS && -- ioc->facts.FWImageSize == -- le32_to_cpu(preply->ActualImageSize)) -+ if ((status == MPI_IOCSTATUS_SUCCESS) && -+ (ioc->facts.FWImageSize == -+ le32_to_cpu(preply->ActualImageSize))) - cmdStatus = 0; - } - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", -@@ -3618,7 +3652,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFw - u32 diagRwData; - u32 nextImage; - u32 load_addr; -- u32 ioc_state=0; -+ u32 doorbell; - - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n", - ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader)); -@@ -3672,6 +3706,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFw - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); - - /* Set the DiagRwEn and Disable ARM bits */ -+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); - CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM)); - - fwSize = (pFwHeader->ImageSize + 3)/4; -@@ -3713,11 +3748,13 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFw - } - - /* Write the IopResetVectorRegAddr */ -- ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr)); -+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", -+ ioc->name, pFwHeader->IopResetRegAddr)); - CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr); - - /* Write the IopResetVectorValue */ -- ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue)); -+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Value=%x! \n", -+ ioc->name, pFwHeader->IopResetVectorValue)); - CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue); - - /* Clear the internal flash bad bit - autoincrementing register, -@@ -3734,17 +3771,6 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFw - CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); - CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData); - -- } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ { -- diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); -- CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | -- MPI_DIAG_CLEAR_FLASH_BAD_SIG); -- -- /* wait 1 msec */ -- if (sleepFlag == CAN_SLEEP) { -- msleep (1); -- } else { -- mdelay (1); -- } - } - - if (ioc->errata_flag_1064) -@@ -3754,51 +3780,64 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFw - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot diag0val=%x, " - "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n", - ioc->name, diag0val)); -- diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE); -+ diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM); - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot now diag0val=%x\n", - ioc->name, diag0val)); - CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); - -- /* Write 0xFF to reset the sequencer */ -- CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); -+ if (ioc->bus_type == SAS ) { -+ /* wait 1 sec */ -+ if (sleepFlag == CAN_SLEEP) -+ msleep(1000); -+ else -+ mdelay(1000); - -- if (ioc->bus_type == SAS) { -- ioc_state = mpt_GetIocState(ioc, 0); -- if ( (GetIocFacts(ioc, sleepFlag, -- MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) { -- ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "GetIocFacts failed: IocState=%x\n", -- ioc->name, ioc_state)); -- return -EFAULT; -+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); -+ ddlprintk(ioc, printk (MYIOC_s_DEBUG_FMT -+ "diag0val=%x, turning off RW_ENABLE\n", ioc->name, -+ diag0val)); -+ diag0val &= ~(MPI_DIAG_RW_ENABLE); -+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "now diag0val=%x\n", ioc->name, diag0val)); -+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); -+ -+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); -+ if (diag0val & MPI_DIAG_FLASH_BAD_SIG) { -+ diag0val |= MPI_DIAG_CLEAR_FLASH_BAD_SIG; -+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); -+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); - } -+ diag0val &= ~(MPI_DIAG_DISABLE_ARM); -+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); -+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); -+ CHIPREG_WRITE32(&ioc->chip->DiagRwAddress, 0x3f000004); - } - -- for (count=0; countname, count, ioc_state)); -- if (ioc->bus_type == SAS) { -+ /* Write 0xFF to reset the sequencer */ -+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); -+ -+ for (count = 0; count < 30; count ++) { -+ doorbell = CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_IOC_STATE_MASK; -+ if (doorbell == MPI_IOC_STATE_READY) { -+ if (ioc->bus_type == SAS) - return 0; -- } - if ((SendIocInit(ioc, sleepFlag)) != 0) { -- ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "downloadboot: SendIocInit failed\n", -- ioc->name)); -+ ddlprintk(ioc, printk(MYIOC_s_WARN_FMT -+ "SendIocInit failed\n", ioc->name)); - return -EFAULT; - } - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "downloadboot: SendIocInit successful\n", -- ioc->name)); -+ "SendIocInit successful\n", ioc->name)); - return 0; - } -- if (sleepFlag == CAN_SLEEP) { -- msleep (10); -- } else { -- mdelay (10); -- } -+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "looking for READY STATE:" -+ " doorbell=%x count=%d\n", ioc->name, doorbell, count)); -+ if (sleepFlag == CAN_SLEEP) -+ msleep(1000); -+ else -+ mdelay(1000); - } -- ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "downloadboot failed! IocState=%x\n",ioc->name, ioc_state)); -+ ddlprintk(ioc, printk(MYIOC_s_WARN_FMT "downloadboot failed! count=%d\n", ioc->name, count)); - return -EFAULT; - } - -@@ -3853,6 +3892,7 @@ KickStart(MPT_ADAPTER *ioc, int force, i - if (hard_reset_done < 0) - return hard_reset_done; - -+ /* may not have worked but hard_reset_done doesn't always signal failure */ - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset successful!\n", - ioc->name)); - -@@ -3861,7 +3901,7 @@ KickStart(MPT_ADAPTER *ioc, int force, i - ioc_state = mpt_GetIocState(ioc, 1); - if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) { - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStart successful! (cnt=%d)\n", -- ioc->name, cnt)); -+ ioc->name, cnt)); - return hard_reset_done; - } - if (sleepFlag == CAN_SLEEP) { -@@ -3899,7 +3939,7 @@ static int - mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) - { - u32 diag0val; -- u32 doorbell; -+ u32 doorbell = 0; - int hard_reset_done = 0; - int count = 0; - u32 diag1val = 0; -@@ -3931,8 +3971,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - */ - for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { - if (MptResetHandlers[cb_idx]) -- (*(MptResetHandlers[cb_idx]))(ioc, -- MPT_IOC_PRE_RESET); -+ (*(MptResetHandlers[cb_idx]))(ioc, MPT_IOC_PRE_RESET); - } - - for (count = 0; count < 60; count ++) { -@@ -3941,29 +3980,39 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - - drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "looking for READY STATE: doorbell=%x" -- " count=%d\n", -+ " count=%d\n", - ioc->name, doorbell, count)); - - if (doorbell == MPI_IOC_STATE_READY) { - return 1; - } - -+ /* -+ * Early out for hard fault -+ */ -+ if (count && doorbell == MPI_IOC_STATE_FAULT) -+ break; -+ - /* wait 1 sec */ - if (sleepFlag == CAN_SLEEP) - msleep(1000); - else - mdelay(1000); - } -+ -+ if (doorbell != MPI_IOC_STATE_READY) -+ printk(MYIOC_s_ERR_FMT "Failed to come READY after " -+ "reset! IocState=%x", ioc->name, doorbell); - return -1; - } - - /* Use "Diagnostic reset" method! (only thing available!) */ - diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); - -- if (ioc->debug_level & MPT_DEBUG) { -+ if (ioc->debug_level & MPT_DEBUG_RESET) { - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); -- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG1: diag0=%08x, diag1=%08x\n", -+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG1: diag0=%08x, diag1=%08x\n", - ioc->name, diag0val, diag1val)); - } - -@@ -3999,14 +4048,14 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - - diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); - -- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wrote magic DiagWriteEn sequence (%x)\n", -+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wrote magic DiagWriteEn sequence (%x)\n", - ioc->name, diag0val)); - } - -- if (ioc->debug_level & MPT_DEBUG) { -+ if (ioc->debug_level & MPT_DEBUG_RESET) { - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); -- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG2: diag0=%08x, diag1=%08x\n", -+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG2: diag0=%08x, diag1=%08x\n", - ioc->name, diag0val, diag1val)); - } - /* -@@ -4022,7 +4071,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - */ - CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER); - hard_reset_done = 1; -- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset performed\n", -+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset performed\n", - ioc->name)); - - /* -@@ -4033,11 +4082,9 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - */ - for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { - if (MptResetHandlers[cb_idx]) { -- mpt_signal_reset(cb_idx, -- ioc, MPT_IOC_PRE_RESET); -+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET); - if (ioc->alt_ioc) { -- mpt_signal_reset(cb_idx, -- ioc->alt_ioc, MPT_IOC_PRE_RESET); -+ mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_PRE_RESET); - } - } - } -@@ -4059,7 +4106,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - break; - } - -- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n", -+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n", - ioc->name, diag0val, count)); - /* wait 1 sec */ - if (sleepFlag == CAN_SLEEP) { -@@ -4092,6 +4139,12 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - break; - } - -+ /* -+ * Early out for hard fault -+ */ -+ if (count && doorbell == MPI_IOC_STATE_FAULT) -+ break; -+ - /* wait 1 sec */ - if (sleepFlag == CAN_SLEEP) { - msleep (1000); -@@ -4108,10 +4161,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - } - - diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); -- if (ioc->debug_level & MPT_DEBUG) { -+ if (ioc->debug_level & MPT_DEBUG_RESET) { - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); -- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG3: diag0=%08x, diag1=%08x\n", -+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG3: diag0=%08x, diag1=%08x\n", - ioc->name, diag0val, diag1val)); - } - -@@ -4167,11 +4220,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ign - return -3; - } - -- if (ioc->debug_level & MPT_DEBUG) { -+ if (ioc->debug_level & MPT_DEBUG_RESET) { - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); -- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG4: diag0=%08x, diag1=%08x\n", -- ioc->name, diag0val, diag1val)); -+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG4: diag0=%08x, diag1=%08x\n", -+ ioc->name, diag0val, diag1val)); - } - - /* -@@ -4207,7 +4260,7 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_ - drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOC reset(0x%02x)!\n", - ioc->name, reset_type)); - CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<ReqToChain = (int *) mem; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReqToChain alloc @ %p, sz=%d bytes\n", -- ioc->name, mem, sz)); -+ ioc->name, mem, sz)); - mem = kmalloc(sz, GFP_ATOMIC); - if (mem == NULL) - return -1; - - ioc->RequestNB = (int *) mem; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestNB alloc @ %p, sz=%d bytes\n", -- ioc->name, mem, sz)); -+ ioc->name, mem, sz)); - } - for (ii = 0; ii < ioc->req_depth; ii++) { - ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN; -@@ -4345,7 +4398,7 @@ initChainBuffers(MPT_ADAPTER *ioc) - - ioc->ChainToChain = (int *) mem; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainToChain alloc @ %p, sz=%d bytes\n", -- ioc->name, mem, sz)); -+ ioc->name, mem, sz)); - } else { - mem = (u8 *) ioc->ChainToChain; - } -@@ -4411,22 +4464,22 @@ PrimeIocFifos(MPT_ADAPTER *ioc) - - total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", -- ioc->name, ioc->reply_sz, ioc->reply_depth)); -+ ioc->name, ioc->reply_sz, ioc->reply_depth)); - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d[%x] bytes\n", -- ioc->name, reply_sz, reply_sz)); -+ ioc->name, reply_sz, reply_sz)); - - sz = (ioc->req_sz * ioc->req_depth); - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d bytes, RequestDepth=%d\n", -- ioc->name, ioc->req_sz, ioc->req_depth)); -+ ioc->name, ioc->req_sz, ioc->req_depth)); - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d[%x] bytes\n", -- ioc->name, sz, sz)); -+ ioc->name, sz, sz)); - total_size += sz; - - sz = num_chain * ioc->req_sz; /* chain buffer pool size */ - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d bytes, ChainDepth=%d\n", -- ioc->name, ioc->req_sz, num_chain)); -+ ioc->name, ioc->req_sz, num_chain)); - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d[%x] bytes num_chain=%d\n", -- ioc->name, sz, sz, num_chain)); -+ ioc->name, sz, sz, num_chain)); - - total_size += sz; - mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma); -@@ -4437,7 +4490,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc) - } - - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Total alloc @ %p[%p], sz=%d[%x] bytes\n", -- ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size)); -+ ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size)); - - memset(mem, 0, total_size); - ioc->alloc_total += total_size; -@@ -4448,7 +4501,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc) - ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); - - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n", -- ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma)); -+ ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma)); - - alloc_dma += reply_sz; - mem += reply_sz; -@@ -4459,7 +4512,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc) - ioc->req_frames_dma = alloc_dma; - - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffers @ %p[%p]\n", -- ioc->name, mem, (void *)(ulong)alloc_dma)); -+ ioc->name, mem, (void *)(ulong)alloc_dma)); - - ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); - -@@ -4487,13 +4540,11 @@ PrimeIocFifos(MPT_ADAPTER *ioc) - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffers @ %p(%p)\n", - ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA)); - -- /* Initialize the free chain Q. -- */ -+ /* Initialize the free chain Q. */ - - INIT_LIST_HEAD(&ioc->FreeChainQ); - -- /* Post the chain buffers to the FreeChainQ. -- */ -+ /* Post the chain buffers to the FreeChainQ. */ - mem = (u8 *)ioc->ChainBuffer; - for (i=0; i < num_chain; i++) { - mf = (MPT_FRAME_HDR *) mem; -@@ -4530,15 +4581,14 @@ PrimeIocFifos(MPT_ADAPTER *ioc) - ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF); - ioc->alloc_total += sz; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SenseBuffers @ %p[%p]\n", -- ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma)); -+ ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma)); - - } - -- /* Post Reply frames to FIFO -- */ -+ /* Post Reply frames to FIFO */ - alloc_dma = ioc->alloc_dma; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n", -- ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma)); -+ ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma)); - - for (i = 0; i < ioc->reply_depth; i++) { - /* Write each address to the IOC! */ -@@ -4767,18 +4817,18 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int - cntdn = 1000 * howlong; - if (sleepFlag == CAN_SLEEP) { - while (--cntdn) { -+ msleep(1); - intstat = CHIPREG_READ32(&ioc->chip->IntStatus); - if (intstat & MPI_HIS_DOORBELL_INTERRUPT) - break; -- msleep(1); - count++; - } - } else { - while (--cntdn) { -+ udelay (1000); - intstat = CHIPREG_READ32(&ioc->chip->IntStatus); - if (intstat & MPI_HIS_DOORBELL_INTERRUPT) - break; -- udelay (1000); - count++; - } - } -@@ -5015,7 +5065,7 @@ mptbase_sas_persist_operation(MPT_ADAPTE - MPT_FRAME_HDR *mf = NULL; - MPIHeader_t *mpi_hdr; - int ret = 0; -- unsigned long timeleft; -+ unsigned long timeleft; - - mutex_lock(&ioc->mptbase_cmds.mutex); - -@@ -5044,7 +5094,7 @@ mptbase_sas_persist_operation(MPT_ADAPTE - printk(KERN_DEBUG "%s: no msg frames!\n", __func__); - ret = -1; - goto out; -- } -+ } - - mpi_hdr = (MPIHeader_t *) mf; - sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf; -@@ -5063,7 +5113,8 @@ mptbase_sas_persist_operation(MPT_ADAPTE - if (!timeleft) { - printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n", - ioc->name, __func__); -- mpt_HardResetHandler(ioc, CAN_SLEEP); -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - } - goto out; -@@ -5097,12 +5148,12 @@ static void - mptbase_raid_process_event_data(MPT_ADAPTER *ioc, - MpiEventDataRaid_t * pRaidEventData) - { -- int volume; -- int reason; -- int disk; -- int status; -- int flags; -- int state; -+ int volume; -+ int reason; -+ int disk; -+ int status; -+ int flags; -+ int state; - - volume = pRaidEventData->VolumeID; - reason = pRaidEventData->ReasonCode; -@@ -5198,8 +5249,8 @@ mptbase_raid_process_event_data(MPT_ADAP - : state == MPI_PHYSDISK0_STATUS_FAILED_REQUESTED - ? "failed requested" - : state == MPI_PHYSDISK0_STATUS_OTHER_OFFLINE -- ? "offline" -- : "state unknown", -+ ? "offline" -+ : "state unknown", - flags & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC - ? ", out of sync" : "", - flags & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED -@@ -5477,7 +5528,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc - */ - ioc->spi_data.bus_reset = - (le32_to_cpu(pPP2->PortFlags) & -- MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET) ? -+ MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET) ? - 0 : 1 ; - - /* Save the Port Page 2 data -@@ -5556,6 +5607,69 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTE - return 0; - } - -+static void -+mpt_read_ioc_pg_6(MPT_ADAPTER *ioc) -+{ -+ CONFIGPARMS cfg; -+ ConfigPageHeader_t header; -+ IOCPage6_t *pIoc6=NULL; -+ dma_addr_t ioc6_dma; -+ int iocpage6sz; -+ void *mem; -+ -+ /* Free the old page -+ */ -+ if (ioc->raid_data.pIocPg6) { -+ kfree(ioc->raid_data.pIocPg6); -+ ioc->raid_data.pIocPg6 = NULL; -+ } -+ -+ /* There is at least one physical disk. -+ * Read and save IOC Page 3 -+ */ -+ header.PageVersion = 0; -+ header.PageLength = 0; -+ header.PageNumber = 6; -+ header.PageType = MPI_CONFIG_PAGETYPE_IOC; -+ cfg.cfghdr.hdr = &header; -+ cfg.physAddr = -1; -+ cfg.pageAddr = 0; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.dir = 0; -+ cfg.timeout = 0; -+ if (mpt_config(ioc, &cfg) != 0) -+ goto out; -+ -+ if (header.PageLength == 0) -+ goto out; -+ -+ /* Read Header good, alloc memory -+ */ -+ iocpage6sz = header.PageLength * 4; -+ pIoc6 = pci_alloc_consistent(ioc->pcidev, iocpage6sz, &ioc6_dma); -+ if (!pIoc6) -+ goto out; -+ -+ /* Read the Page and save the data -+ * into malloc'd memory. -+ */ -+ cfg.physAddr = ioc6_dma; -+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -+ if (mpt_config(ioc, &cfg) != 0) -+ goto out; -+ -+ mem = kmalloc(iocpage6sz, GFP_ATOMIC); -+ if (!mem) -+ goto out; -+ -+ memcpy(mem, pIoc6, iocpage6sz); -+ ioc->raid_data.pIocPg6 = mem; -+ -+ out: -+ if (pIoc6) -+ pci_free_consistent(ioc->pcidev, iocpage6sz, pIoc6, ioc6_dma); -+} -+ - /** - * mpt_inactive_raid_list_free - This clears this link list. - * @ioc : pointer to per adapter structure -@@ -5568,13 +5682,13 @@ mpt_inactive_raid_list_free(MPT_ADAPTER - if (list_empty(&ioc->raid_data.inactive_list)) - return; - -- mutex_lock(&ioc->raid_data.inactive_list_mutex); -+ down(&ioc->raid_data.inactive_list_mutex); - list_for_each_entry_safe(component_info, pNext, - &ioc->raid_data.inactive_list, list) { - list_del(&component_info->list); - kfree(component_info); - } -- mutex_unlock(&ioc->raid_data.inactive_list_mutex); -+ up(&ioc->raid_data.inactive_list_mutex); - } - - /** -@@ -5591,10 +5705,12 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *i - ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidVolumePage0_t buffer = NULL; -- int i; -- RaidPhysDiskPage0_t phys_disk; -+ int i, j; -+ RaidPhysDiskPage0_t phys_disk; -+ RaidPhysDiskPage1_t *phys_disk_1; - struct inactive_raid_component_info *component_info; - int handle_inactive_volumes; -+ int num_paths, device_is_online; - - memset(&cfg, 0 , sizeof(CONFIGPARMS)); - memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); -@@ -5633,12 +5749,35 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *i - if (!handle_inactive_volumes) - goto out; - -- mutex_lock(&ioc->raid_data.inactive_list_mutex); -+ down(&ioc->raid_data.inactive_list_mutex); - for (i = 0; i < buffer->NumPhysDisks; i++) { - if(mpt_raid_phys_disk_pg0(ioc, - buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) - continue; - -+ if (phys_disk.PhysDiskStatus.State != -+ MPI_PHYSDISK0_STATUS_ONLINE) -+ continue; -+ -+ /* check to see if device is online by checking phys_disk_pg1 */ -+ device_is_online = 0; -+ num_paths = mpt_raid_phys_disk_get_num_paths(ioc, -+ buffer->PhysDisk[i].PhysDiskNum); -+ if (num_paths < 2) -+ continue; -+ phys_disk_1 = kzalloc(offsetof(RaidPhysDiskPage1_t,Path) + -+ (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); -+ if (!phys_disk_1) -+ continue; -+ mpt_raid_phys_disk_pg1(ioc, buffer->PhysDisk[i].PhysDiskNum, -+ phys_disk_1); -+ for (j = 0; j < num_paths && !device_is_online; j++) -+ if (!phys_disk_1->Path[j].Flags) -+ device_is_online = 1; -+ kfree(phys_disk_1); -+ if (!device_is_online) -+ continue; -+ - if ((component_info = kmalloc(sizeof (*component_info), - GFP_KERNEL)) == NULL) - continue; -@@ -5653,7 +5792,7 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *i - list_add_tail(&component_info->list, - &ioc->raid_data.inactive_list); - } -- mutex_unlock(&ioc->raid_data.inactive_list_mutex); -+ up(&ioc->raid_data.inactive_list_mutex); - - out: - if (buffer) -@@ -5743,8 +5882,8 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, - int - mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num) - { -- CONFIGPARMS cfg; -- ConfigPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidPhysDiskPage1_t buffer = NULL; - int rc; -@@ -5795,7 +5934,6 @@ mpt_raid_phys_disk_get_num_paths(MPT_ADA - - return rc; - } --EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths); - - /** - * mpt_raid_phys_disk_pg1 - returns phys disk page 1 -@@ -5809,11 +5947,10 @@ EXPORT_SYMBOL(mpt_raid_phys_disk_get_num - * -ENOMEM if pci_alloc failed - **/ - int --mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, -- RaidPhysDiskPage1_t *phys_disk) -+mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, RaidPhysDiskPage1_t *phys_disk) - { -- CONFIGPARMS cfg; -- ConfigPageHeader_t hdr; -+ CONFIGPARMS cfg; -+ ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidPhysDiskPage1_t buffer = NULL; - int rc; -@@ -5863,17 +6000,14 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, - for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) { - phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID; - phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus; -- phys_disk->Path[i].OwnerIdentifier = -- buffer->Path[i].OwnerIdentifier; -+ phys_disk->Path[i].OwnerIdentifier = buffer->Path[i].OwnerIdentifier; - phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags); - memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64)); - sas_address = le64_to_cpu(sas_address); - memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64)); -- memcpy(&sas_address, -- &buffer->Path[i].OwnerWWID, sizeof(__le64)); -+ memcpy(&sas_address, &buffer->Path[i].OwnerWWID, sizeof(__le64)); - sas_address = le64_to_cpu(sas_address); -- memcpy(&phys_disk->Path[i].OwnerWWID, -- &sas_address, sizeof(__le64)); -+ memcpy(&phys_disk->Path[i].OwnerWWID, &sas_address, sizeof(__le64)); - } - - out: -@@ -5884,8 +6018,33 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, - - return rc; - } --EXPORT_SYMBOL(mpt_raid_phys_disk_pg1); - -+/** -+ * mpt_sort_ioc_pg2 - compare function for sorting volumes -+ * in ascending order -+ * @a: ioc_pg2 raid volume page -+ * @b: ioc_pg2 raid volume page -+ * -+ * Return: -+ * 0 same, 1 (a is bigger), -1 (b is bigger) -+ **/ -+static int -+mpt_sort_ioc_pg2(const void *a, const void *b) -+{ -+ ConfigPageIoc2RaidVol_t * volume_a = (ConfigPageIoc2RaidVol_t *)a; -+ ConfigPageIoc2RaidVol_t * volume_b = (ConfigPageIoc2RaidVol_t *)b; -+ -+ if (volume_a->VolumeBus == volume_b->VolumeBus) { -+ if (volume_a->VolumeID == volume_b->VolumeID) -+ return 0; -+ if (volume_a->VolumeID < volume_b->VolumeID) -+ return -1; -+ return 1; -+ } -+ if (volume_a->VolumeBus < volume_b->VolumeBus) -+ return -1; -+ return 1; -+} - - /** - * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes -@@ -5949,16 +6108,22 @@ mpt_findImVolumes(MPT_ADAPTER *ioc) - if (!mem) - goto out; - -+ /* -+ * sort volumes in ascending order -+ */ -+ sort(pIoc2->RaidVolume, pIoc2->NumActiveVolumes, -+ sizeof(ConfigPageIoc2RaidVol_t), mpt_sort_ioc_pg2, NULL); - memcpy(mem, (u8 *)pIoc2, iocpage2sz); - ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem; - -- mpt_read_ioc_pg_3(ioc); -- - for (i = 0; i < pIoc2->NumActiveVolumes ; i++) - mpt_inactive_raid_volumes(ioc, - pIoc2->RaidVolume[i].VolumeBus, - pIoc2->RaidVolume[i].VolumeID); - -+ mpt_read_ioc_pg_3(ioc); -+ mpt_read_ioc_pg_6(ioc); -+ - out: - pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma); - -@@ -6118,6 +6283,9 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc) - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - if (mpt_config(ioc, &cfg) == 0) { - -+#if defined(CPQ_CIM) -+ ioc->pci_slot_number = pIoc1->PCISlotNum; -+#endif - tmp = le32_to_cpu(pIoc1->Flags) & MPI_IOCPAGE1_REPLY_COALESCING; - if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) { - tmp = le32_to_cpu(pIoc1->CoalescingTimeout); -@@ -6294,9 +6462,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS - long timeout; - int ret; - u8 page_type = 0, extend_page; -- unsigned long timeleft; -+ unsigned long timeleft; - unsigned long flags; -- int in_isr; -+ int in_isr; - u8 issue_hard_reset = 0; - u8 retry_count = 0; - -@@ -6308,7 +6476,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS - dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", - ioc->name)); - return -EPERM; -- } -+ } - - /* don't send a config page during diag reset */ - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -@@ -6447,7 +6615,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS - dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n", - ret, le32_to_cpu(pReply->IOCLogInfo))); - --out: -+ out: - - CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status) - mutex_unlock(&ioc->mptbase_cmds.mutex); -@@ -6455,7 +6623,8 @@ out: - issue_hard_reset = 0; - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); -- mpt_HardResetHandler(ioc, CAN_SLEEP); -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - /* attempt one retry for a timed out command */ - if (!retry_count) { -@@ -6469,7 +6638,6 @@ out: - } - } - return ret; -- - } - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -@@ -6823,7 +6991,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, - *size = y; - } - /** -- * mpt_set_taskmgmt_in_progress_flag - set flags associated with task management -+ * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment - * @ioc: Pointer to MPT_ADAPTER structure - * - * Returns 0 for SUCCESS or -1 if FAILED. -@@ -6840,6 +7008,7 @@ mpt_set_taskmgmt_in_progress_flag(MPT_AD - if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress || - (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) { - retval = -1; -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - goto out; - } - retval = 0; -@@ -6849,14 +7018,14 @@ mpt_set_taskmgmt_in_progress_flag(MPT_AD - ioc->alt_ioc->taskmgmt_in_progress = 1; - ioc->alt_ioc->taskmgmt_quiesce_io = 1; - } -- out: - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ -+ out: - return retval; - } --EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag); - - /** -- * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task management -+ * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment - * @ioc: Pointer to MPT_ADAPTER structure - * - **/ -@@ -6874,8 +7043,6 @@ mpt_clear_taskmgmt_in_progress_flag(MPT_ - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - } --EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag); -- - - /** - * mpt_halt_firmware - Halts the firmware if it is operational and panic -@@ -6893,20 +7060,173 @@ mpt_halt_firmware(MPT_ADAPTER *ioc) - if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { - printk(MYIOC_s_ERR_FMT "IOC is in FAULT state (%04xh)!!!\n", - ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); -+ if(mpt_fwfault_debug == 2) -+ for(;;); -+ else - panic("%s: IOC Fault (%04xh)!!!\n", ioc->name, - ioc_raw_state & MPI_DOORBELL_DATA_MASK); - } else { - CHIPREG_WRITE32(&ioc->chip->Doorbell, 0xC0FFEE00); -- panic("%s: Firmware is halted due to command timeout\n", -- ioc->name); -+ if(mpt_fwfault_debug == 2) { -+ printk("%s: Firmware is halted due to command timeout\n" -+ ,ioc->name); -+ for(;;); -+ } -+ else -+ panic("%s: Firmware is halted due to command timeout\n", -+ ioc->name); - } - } --EXPORT_SYMBOL(mpt_halt_firmware); - --/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ --/* -- * Reset Handling -- */ -+/** -+ * mpt_SoftResetHandler - Issues a less expensive reset -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @sleepFlag: Indicates if sleep or schedule must be called. -+ -+ * -+ * Returns 0 for SUCCESS or -1 if FAILED. -+ * -+ * Message Unit Reset - instructs the IOC to reset the Reply Post and -+ * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded. -+ * All posted buffers are freed, and event notification is turned off. -+ * IOC doesnt reply to any outstanding request. This will transfer IOC -+ * to READY state. -+ **/ -+int -+mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag) -+{ -+ int rc; -+ int ii; -+ u8 cb_idx; -+ unsigned long flags; -+ u32 ioc_state; -+ unsigned long time_count; -+ int i; -+ -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n", ioc->name)); -+ -+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK; -+ -+ if(mpt_fwfault_debug) -+ mpt_halt_firmware(ioc); -+ -+ if (ioc_state == MPI_IOC_STATE_FAULT || ioc_state == MPI_IOC_STATE_RESET) { -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "skipping, either in FAULT or RESET state!\n", ioc->name)); -+ return -1; -+ } -+ -+ if (ioc->bus_type == FC) { -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "skipping, because the bus type is FC!\n", ioc->name)); -+ return -1; -+ } -+ -+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -+ if (ioc->ioc_reset_in_progress) { -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ return -1; -+ } -+ ioc->ioc_reset_in_progress = 1; -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ -+ rc = -1; -+ -+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { -+ if (MptResetHandlers[cb_idx]) -+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET); -+ } -+ -+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -+ if (ioc->taskmgmt_in_progress) { -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ return -1; -+ } -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ /* Disable reply interrupts (also blocks FreeQ) */ -+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); -+ ioc->active = 0; -+ time_count = jiffies; -+ -+ rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag); -+ -+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { -+ if (MptResetHandlers[cb_idx]) -+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET); -+ } -+ -+ if (rc) -+ goto out; -+ -+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK; -+ if (ioc_state != MPI_IOC_STATE_READY) -+ goto out; -+ -+ for (ii = 0; ii < 5; ii++) { -+ /* Get IOC facts! Allow 5 retries */ -+ if ((rc = GetIocFacts(ioc, sleepFlag, -+ MPT_HOSTEVENT_IOC_RECOVER)) == 0) -+ break; -+ if (sleepFlag == CAN_SLEEP) { -+ msleep(100); -+ } else { -+ mdelay(100); -+ } -+ } -+ if (ii == 5) -+ goto out; -+ -+ if ((rc = PrimeIocFifos(ioc)) != 0) -+ goto out; -+ -+ if ((rc = SendIocInit(ioc, sleepFlag)) != 0) -+ goto out; -+ -+ if ((rc = SendEventNotification(ioc, 1, sleepFlag)) != 0) -+ goto out; -+ -+ if (ioc->hard_resets < -1) -+ ioc->hard_resets++; -+ -+ /* -+ * At this point, we know soft reset succeeded. -+ */ -+ -+ ioc->active = 1; -+ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); -+ -+ out: -+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -+ ioc->ioc_reset_in_progress = 0; -+ ioc->taskmgmt_quiesce_io = 0; -+ ioc->taskmgmt_in_progress = 0; -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ -+ if (ioc->active) { /* otherwise, hard reset coming */ -+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { -+ if (MptResetHandlers[cb_idx]) -+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET); -+ } -+ } -+ /* -+ * Cleanup diag buffer allocated memory -+ */ -+ for (i = 0; i < MPI_DIAG_BUF_TYPE_COUNT; i++) { -+ if (ioc->DiagBuffer[i] == NULL) -+ continue; -+ pci_free_consistent(ioc->pcidev, ioc->DiagBuffer_sz[i], -+ ioc->DiagBuffer[i], ioc->DiagBuffer_dma[i]); -+ ioc->DiagBuffer[i] = NULL; -+ ioc->DiagBuffer_Status[i] = 0; -+ } -+ -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler: completed (%d seconds): %s\n", -+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000, -+ ((rc == 0) ? "SUCCESS" : "FAILED"))); -+ -+ return rc; -+} -+ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * mpt_HardResetHandler - Generic reset handler -@@ -6931,7 +7251,7 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, i - u8 cb_idx; - unsigned long flags; - unsigned long time_count; -- -+ int i; - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); - #ifdef MFCNT - printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name); -@@ -6969,22 +7289,23 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, i - } - - time_count = jiffies; -- rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag); -- if (rc != 0) { -- printk(KERN_WARNING MYNAM -- ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name); -+ if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { -+ printk(KERN_WARNING MYNAM ": WARNING - (%d) Cannot recover %s\n", -+ rc, ioc->name); - } else { - if (ioc->hard_resets < -1) - ioc->hard_resets++; - } - - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -- ioc->ioc_reset_in_progress = 0; -+ if (ioc->is_fault == 1) -+ ioc->is_fault = 2; - ioc->taskmgmt_quiesce_io = 0; -+ ioc->ioc_reset_in_progress = 0; - ioc->taskmgmt_in_progress = 0; - if (ioc->alt_ioc) { -- ioc->alt_ioc->ioc_reset_in_progress = 0; - ioc->alt_ioc->taskmgmt_quiesce_io = 0; -+ ioc->alt_ioc->ioc_reset_in_progress = 0; - ioc->alt_ioc->taskmgmt_in_progress = 0; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -@@ -6993,11 +7314,22 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, i - if (MptResetHandlers[cb_idx]) { - mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET); - if (ioc->alt_ioc) -- mpt_signal_reset(cb_idx, -- ioc->alt_ioc, MPT_IOC_POST_RESET); -+ mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_POST_RESET); - } - } - -+ /* -+ * Cleanup diag buffer allocated memory -+ */ -+ for (i = 0; i < MPI_DIAG_BUF_TYPE_COUNT; i++) { -+ if (ioc->DiagBuffer[i] == NULL) -+ continue; -+ pci_free_consistent(ioc->pcidev, ioc->DiagBuffer_sz[i], -+ ioc->DiagBuffer[i], ioc->DiagBuffer_dma[i]); -+ ioc->DiagBuffer[i] = NULL; -+ ioc->DiagBuffer_Status[i] = 0; -+ } -+ - dtmprintk(ioc, - printk(MYIOC_s_DEBUG_FMT - "HardResetHandler: completed (%d seconds): %s\n", ioc->name, -@@ -7150,6 +7482,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, - "SAS Device Status Change: Internal Device " - "Reset : id=%d channel=%d", id, channel); - break; -+ case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: -+ snprintf(evStr, EVENT_DESCR_STR_SZ, -+ "SAS Device Status Change: Internal Device " -+ "Reset Completed: id=%d channel=%d", id, channel); -+ break; - case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: Internal Task " -@@ -7170,6 +7507,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, - "SAS Device Status Change: Internal Query " - "Task : id=%d channel=%d", id, channel); - break; -+ case MPI_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: -+ snprintf(evStr, EVENT_DESCR_STR_SZ, -+ "SAS Device Status Change: Async Notification " -+ "Task : id=%d channel=%d", id, channel); -+ break; - default: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: Unknown: " -@@ -7336,12 +7678,28 @@ mpt_display_event_info(MPT_ADAPTER *ioc, - { - u8 phy_num = (u8)(evData0); - u8 port_num = (u8)(evData0 >> 8); -- u8 port_width = (u8)(evData0 >> 16); -+ u8 num_phys = (u8)(evData0 >> 16); - u8 primative = (u8)(evData0 >> 24); -+ char *primative_str = NULL; -+ -+ switch (primative) { -+ case MPI_EVENT_PRIMITIVE_CHANGE: -+ primative_str = "change"; -+ break; -+ case MPI_EVENT_PRIMITIVE_EXPANDER: -+ primative_str = "expander"; -+ break; -+ case MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT: -+ primative_str = "asyn event"; -+ break; -+ default: -+ primative_str = "reserved"; -+ break; -+ } - snprintf(evStr, EVENT_DESCR_STR_SZ, -- "SAS Broadcase Primative: phy=%d port=%d " -- "width=%d primative=0x%02x", -- phy_num, port_num, port_width, primative); -+ "SAS Broadcast Primative: phy=%d port=%d " -+ "num_phys=%d primative=%s (0x%02x)", -+ phy_num, port_num, num_phys, primative_str, primative); - break; - } - -@@ -7712,7 +8070,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 l - "IO Not Yet Executed", /* 13h */ - "IO Executed", /* 14h */ - "Persistent Reservation Out Not Affiliation " -- "Owner", /* 15h */ -+ "Owner", /* 15h */ - "Open Transmit DMA Abort", /* 16h */ - "IO Device Missing Delay Retry", /* 17h */ - "IO Cancelled Due to Recieve Error", /* 18h */ -@@ -7737,19 +8095,19 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 l - NULL /* 07h */ - }; - static char *raid_sub_code_str[] = { -- NULL, /* 00h */ -+ NULL, /* 00h */ - "Volume Creation Failed: Data Passed too " -- "Large", /* 01h */ -+ "Large", /* 01h */ - "Volume Creation Failed: Duplicate Volumes " -- "Attempted", /* 02h */ -+ "Attempted", /* 02h */ - "Volume Creation Failed: Max Number " - "Supported Volumes Exceeded", /* 03h */ - "Volume Creation Failed: DMA Error", /* 04h */ - "Volume Creation Failed: Invalid Volume Type", /* 05h */ - "Volume Creation Failed: Error Reading " -- "MFG Page 4", /* 06h */ -+ "MFG Page 4", /* 06h */ - "Volume Creation Failed: Creating Internal " -- "Structures", /* 07h */ -+ "Structures", /* 07h */ - NULL, /* 08h */ - NULL, /* 09h */ - NULL, /* 0Ah */ -@@ -7758,12 +8116,12 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 l - NULL, /* 0Dh */ - NULL, /* 0Eh */ - NULL, /* 0Fh */ -- "Activation failed: Already Active Volume", /* 10h */ -- "Activation failed: Unsupported Volume Type", /* 11h */ -- "Activation failed: Too Many Active Volumes", /* 12h */ -- "Activation failed: Volume ID in Use", /* 13h */ -- "Activation failed: Reported Failure", /* 14h */ -- "Activation failed: Importing a Volume", /* 15h */ -+ "Activation failed: Already Active Volume", /* 10h */ -+ "Activation failed: Unsupported Volume Type", /* 11h */ -+ "Activation failed: Too Many Active Volumes", /* 12h */ -+ "Activation failed: Volume ID in Use", /* 13h */ -+ "Activation failed: Reported Failure", /* 14h */ -+ "Activation failed: Importing a Volume", /* 15h */ - NULL, /* 16h */ - NULL, /* 17h */ - NULL, /* 18h */ -@@ -7774,12 +8132,12 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 l - NULL, /* 1Dh */ - NULL, /* 1Eh */ - NULL, /* 1Fh */ -- "Phys Disk failed: Too Many Phys Disks", /* 20h */ -+ "Phys Disk failed: Too Many Phys Disks", /* 20h */ - "Phys Disk failed: Data Passed too Large", /* 21h */ -- "Phys Disk failed: DMA Error", /* 22h */ -- "Phys Disk failed: Invalid ", /* 23h */ -+ "Phys Disk failed: DMA Error", /* 22h */ -+ "Phys Disk failed: Invalid ", /* 23h */ - "Phys Disk failed: Creating Phys Disk Config " -- "Page", /* 24h */ -+ "Page", /* 24h */ - NULL, /* 25h */ - NULL, /* 26h */ - NULL, /* 27h */ -@@ -7797,22 +8155,22 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 l - "Device ", /* 32h */ - "Compatibility Error: Removable Device Found", /* 33h */ - "Compatibility Error: Device SCSI Version not " -- "2 or Higher", /* 34h */ -+ "2 or Higher", /* 34h */ - "Compatibility Error: SATA Device, 48 BIT LBA " -- "not Supported", /* 35h */ -+ "not Supported", /* 35h */ - "Compatibility Error: Device doesn't have " -- "512 Byte Block Sizes", /* 36h */ -+ "512 Byte Block Sizes", /* 36h */ - "Compatibility Error: Volume Type Check Failed", /* 37h */ - "Compatibility Error: Volume Type is " -- "Unsupported by FW", /* 38h */ -+ "Unsupported by FW", /* 38h */ - "Compatibility Error: Disk Drive too Small for " -- "use in Volume", /* 39h */ -+ "use in Volume", /* 39h */ - "Compatibility Error: Phys Disk for Create " -- "Volume not Found", /* 3Ah */ -+ "Volume not Found", /* 3Ah */ - "Compatibility Error: Too Many or too Few " -- "Disks for Volume Type", /* 3Bh */ -+ "Disks for Volume Type", /* 3Bh */ - "Compatibility Error: Disk stripe Sizes " -- "Must be 64KB", /* 3Ch */ -+ "Must be 64KB", /* 3Ch */ - "Compatibility Error: IME Size Limited to < 2TB", /* 3Dh */ - }; - -@@ -8210,6 +8568,7 @@ EXPORT_SYMBOL(mpt_resume); - EXPORT_SYMBOL(mpt_suspend); - #endif - EXPORT_SYMBOL(ioc_list); -+EXPORT_SYMBOL(mpt_proc_root_dir); - EXPORT_SYMBOL(mpt_register); - EXPORT_SYMBOL(mpt_deregister); - EXPORT_SYMBOL(mpt_event_register); -@@ -8227,13 +8586,18 @@ EXPORT_SYMBOL(mpt_verify_adapter); - EXPORT_SYMBOL(mpt_GetIocState); - EXPORT_SYMBOL(mpt_print_ioc_summary); - EXPORT_SYMBOL(mpt_HardResetHandler); -+EXPORT_SYMBOL(mpt_SoftResetHandler); - EXPORT_SYMBOL(mpt_config); - EXPORT_SYMBOL(mpt_findImVolumes); - EXPORT_SYMBOL(mpt_alloc_fw_memory); - EXPORT_SYMBOL(mpt_free_fw_memory); - EXPORT_SYMBOL(mptbase_sas_persist_operation); - EXPORT_SYMBOL(mpt_raid_phys_disk_pg0); -- -+EXPORT_SYMBOL(mpt_raid_phys_disk_pg1); -+EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths); -+EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag); -+EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag); -+EXPORT_SYMBOL(mpt_halt_firmware); - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * fusion_init - Fusion MPT base driver initialization routine. ---- a/drivers/message/fusion/mptbase.h -+++ b/drivers/message/fusion/mptbase.h -@@ -49,10 +49,6 @@ - #define MPTBASE_H_INCLUDED - /*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - --#include --#include --#include -- - #include "lsi/mpi_type.h" - #include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */ - #include "lsi/mpi_ioc.h" /* Fusion MPT IOC(ontroller) defs */ -@@ -76,9 +72,13 @@ - #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR - #endif - --#define MPT_LINUX_VERSION_COMMON "3.04.13" --#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.13" -+#define MPT_LINUX_VERSION_COMMON "4.22.00.00" -+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-4.22.00.00" - #define WHAT_MAGIC_STRING "@" "(" "#" ")" -+#define MPT_LINUX_MAJOR_VERSION 4 -+#define MPT_LINUX_MINOR_VERSION 22 -+#define MPT_LINUX_BUILD_VERSION 00 -+#define MPT_LINUX_RELEASE_VERSION 00 - - #define show_mptmod_ver(s,ver) \ - printk(KERN_INFO "%s %s\n", s, ver); -@@ -87,6 +87,8 @@ - /* - * Fusion MPT(linux) driver configurable stuff... - */ -+#define MPT_POLLING_INTERVAL 1000 /* in milliseconds */ -+ - #define MPT_MAX_ADAPTERS 18 - #define MPT_MAX_PROTOCOL_DRIVERS 16 - #define MPT_MAX_BUS 1 /* Do not change */ -@@ -135,7 +137,6 @@ - - #define MPT_COALESCING_TIMEOUT 0x10 - -- - /* - * SCSI transfer rate defines. - */ -@@ -164,10 +165,10 @@ - /* - * Set the MAX_SGE value based on user input. - */ --#ifdef CONFIG_FUSION_MAX_SGE --#if CONFIG_FUSION_MAX_SGE < 16 -+#ifdef CONFIG_FUSION_MAX_SGE -+#if CONFIG_FUSION_MAX_SGE < 16 - #define MPT_SCSI_SG_DEPTH 16 --#elif CONFIG_FUSION_MAX_SGE > 128 -+#elif CONFIG_FUSION_MAX_SGE > 128 - #define MPT_SCSI_SG_DEPTH 128 - #else - #define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE -@@ -176,10 +177,10 @@ - #define MPT_SCSI_SG_DEPTH 40 - #endif - --#ifdef CONFIG_FUSION_MAX_FC_SGE --#if CONFIG_FUSION_MAX_FC_SGE < 16 -+#ifdef CONFIG_FUSION_MAX_FC_SGE -+#if CONFIG_FUSION_MAX_FC_SGE < 16 - #define MPT_SCSI_FC_SG_DEPTH 16 --#elif CONFIG_FUSION_MAX_FC_SGE > 256 -+#elif CONFIG_FUSION_MAX_FC_SGE > 256 - #define MPT_SCSI_FC_SG_DEPTH 256 - #else - #define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE -@@ -189,9 +190,8 @@ - #endif - - /* debug print string length used for events and iocstatus */ --# define EVENT_DESCR_STR_SZ 100 -+# define EVENT_DESCR_STR_SZ 100 - --#define MPT_POLLING_INTERVAL 1000 /* in milliseconds */ - - #ifdef __KERNEL__ /* { */ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -@@ -239,7 +239,6 @@ typedef struct _ATTO_CONFIG_PAGE_SCSI_PO - } fATTO_CONFIG_PAGE_SCSI_PORT_2, MPI_POINTER PTR_ATTO_CONFIG_PAGE_SCSI_PORT_2, - ATTO_SCSIPortPage2_t, MPI_POINTER pATTO_SCSIPortPage2_t; - -- - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /* - * MPT protocol driver defs... -@@ -369,6 +368,31 @@ typedef struct _SYSIF_REGS - * in conjunction with SYSIF_REGS accesses! - */ - -+/* -+ * End to End Data Protection Support -+ */ -+#define EEDP_SUPPORT -+#ifdef EEDP_SUPPORT -+ -+#define PRO_R MPI_SCSIIO32_EEDPFLAGS_CHKRM_OP -+#define PRO_W MPI_SCSIIO32_EEDPFLAGS_INSERT_OP -+#define PRO_V MPI_SCSIIO32_EEDPFLAGS_INSERT_OP -+ -+/* the read capacity 16 byte parameter block - defined in SBC-3 */ -+struct read_cap_parameter{ -+ u64 logical_block_addr; -+ u32 logical_block_length; -+ u8 prot_en:1; -+ u8 p_type:3; -+ u8 reserved0:4; -+ u8 logical_blocks_per_phyical_block:4; -+ u8 reserved1:4; -+ u16 lowest_aligned_log_block_address:14; -+ u16 reserved2:2; -+ u8 reserved3[16]; -+}; -+#endif -+ - - /* - * Dynamic Multi-Pathing specific stuff... -@@ -378,7 +402,7 @@ typedef struct _SYSIF_REGS - #define MPT_TARGET_NO_NEGO_WIDE 0x01 - #define MPT_TARGET_NO_NEGO_SYNC 0x02 - #define MPT_TARGET_NO_NEGO_QAS 0x04 --#define MPT_TAPE_NEGO_IDP 0x08 -+#define MPT_TAPE_NEGO_IDP 0x08 - - /* - * VirtDevice - FC LUN device or SCSI target device -@@ -387,8 +411,8 @@ typedef struct _VirtTarget { - struct scsi_target *starget; - u8 tflags; - u8 ioc_id; -- u8 id; -- u8 channel; -+ u8 id; /* logical target id */ -+ u8 channel; /* logical channel number */ - u8 minSyncFactor; /* 0xFF is async */ - u8 maxOffset; /* 0 if async */ - u8 maxWidth; /* 0 if narrow, 1 if wide */ -@@ -396,13 +420,18 @@ typedef struct _VirtTarget { - u8 raidVolume; /* set, if RAID Volume */ - u8 type; /* byte 0 of Inquiry data */ - u8 deleted; /* target in process of being removed */ -- u32 num_luns; -+ int num_luns; - } VirtTarget; - - typedef struct _VirtDevice { - VirtTarget *vtarget; - u8 configured_lun; - int lun; -+#ifdef EEDP_SUPPORT -+ u8 eedp_enable; -+ u8 eedp_type; -+ u32 eedp_block_length; -+#endif - } VirtDevice; - - /* -@@ -442,21 +471,14 @@ do { \ - } while (0) - - --/* -- * IOCTL structure and associated defines -- */ -- --#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ -- - #define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */ - #define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */ - #define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */ --#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred -- on the current*/ -+#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred on the current*/ - #define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */ - #define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */ --#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from -- complete routine */ -+#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from complete routine */ -+ - - #define INITIALIZE_MGMT_STATUS(status) \ - status = MPT_MGMT_STATUS_PENDING; -@@ -475,7 +497,7 @@ typedef struct _MPT_MGMT { - u8 status; /* current command status */ - int completion_code; - u32 msg_context; --} MPT_MGMT; -+}MPT_MGMT; - - /* - * Event Structure and define -@@ -538,7 +560,7 @@ typedef struct _SasCfgData { - * @inactive_list - */ - struct inactive_raid_component_info { -- struct list_head list; -+ struct list_head list; - u8 volumeID; /* volume target id */ - u8 volumeBus; /* volume channel */ - IOC_3_PHYS_DISK d; /* phys disk info */ -@@ -547,7 +569,8 @@ struct inactive_raid_component_info { - typedef struct _RaidCfgData { - IOCPage2_t *pIocPg2; /* table of Raid Volumes */ - IOCPage3_t *pIocPg3; /* table of physical disks */ -- struct mutex inactive_list_mutex; -+ IOCPage6_t *pIocPg6; /* table of IR static data */ -+ struct semaphore inactive_list_mutex; - struct list_head inactive_list; /* link list for physical - disk that belong in - inactive volumes */ -@@ -578,8 +601,8 @@ struct mptfc_rport_info - }; - - typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr); --typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length, -- dma_addr_t dma_addr); -+typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr); -+typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc); - - /* - * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS -@@ -591,8 +614,7 @@ typedef struct _MPT_ADAPTER - char name[MPT_NAME_LENGTH]; /* "iocN" */ - char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */ - #ifdef CONFIG_FUSION_LOGGING -- /* used in mpt_display_event_info */ -- char evStr[EVENT_DESCR_STR_SZ]; -+ char evStr[EVENT_DESCR_STR_SZ]; /* used in mpt_display_event_info */ - #endif - char board_name[16]; - char board_assembly[16]; -@@ -605,8 +627,8 @@ typedef struct _MPT_ADAPTER - SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */ - SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */ - u8 bus_type; -- u32 mem_phys; /* == f4020000 (mmap) */ -- u32 pio_mem_phys; /* Programmed IO (downloadboot) */ -+ unsigned long mem_phys; /* == f4020000 (mmap) */ -+ unsigned long pio_mem_phys; /* Programmed IO (downloadboot) */ - int mem_size; /* mmap memory size */ - int number_of_buses; - int devices_per_bus; -@@ -621,10 +643,8 @@ typedef struct _MPT_ADAPTER - int reply_depth; /* Num Allocated reply frames */ - int reply_sz; /* Reply frame size */ - int num_chain; /* Number of chain buffers */ -- MPT_ADD_SGE add_sge; /* Pointer to add_sge -- function */ -- MPT_ADD_CHAIN add_chain; /* Pointer to add_chain -- function */ -+ MPT_ADD_SGE add_sge; /* Pointer to add_sge function */ -+ MPT_ADD_CHAIN add_chain; /* Pointer to add_chain function */ - /* Pool of buffers for chaining. ReqToChain - * and ChainToChain track index of chain buffers. - * ChainBuffer (DMA) virt/phys addresses. -@@ -653,18 +673,18 @@ typedef struct _MPT_ADAPTER - dma_addr_t sense_buf_pool_dma; - u32 sense_buf_low_dma; - u8 *HostPageBuffer; /* SAS - host page buffer support */ -- u32 HostPageBuffer_sz; -- dma_addr_t HostPageBuffer_dma; -+ u32 HostPageBuffer_sz; -+ dma_addr_t HostPageBuffer_dma; - int mtrr_reg; - struct pci_dev *pcidev; /* struct pci_dev pointer */ -- int bars; /* bitmask of BAR's that must be configured */ -- int msi_enable; -+ int bars; /* bitmask of BAR's that must be configured */ -+ int msi_enable; - u8 __iomem *memmap; /* mmap address */ - struct Scsi_Host *sh; /* Scsi Host pointer */ -- SpiCfgData spi_data; /* Scsi config. data */ -- RaidCfgData raid_data; /* Raid config. data */ -- SasCfgData sas_data; /* Sas config. data */ -- FcCfgData fc_data; /* Fc config. data */ -+ SpiCfgData spi_data; /* Scsi config. data */ -+ RaidCfgData raid_data; /* Raid config. data */ -+ SasCfgData sas_data; /* Sas config. data */ -+ FcCfgData fc_data; /* Fc config. data */ - struct proc_dir_entry *ioc_dentry; - struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ - u32 biosVersion; /* BIOS version from IO Unit Page 2 */ -@@ -673,7 +693,7 @@ typedef struct _MPT_ADAPTER - int eventLogSize; /* Max number of cached events */ - struct _mpt_ioctl_events *events; /* pointer to event log */ - u8 *cached_fw; /* Pointer to FW */ -- dma_addr_t cached_fw_dma; -+ dma_addr_t cached_fw_dma; - int hs_reply_idx; - #ifndef MFCNT - u32 pad0; -@@ -688,8 +708,14 @@ typedef struct _MPT_ADAPTER - FCPortPage0_t fc_port_page0[2]; - LANPage0_t lan_cnfg_page0; - LANPage1_t lan_cnfg_page1; -+#if defined(CPQ_CIM) -+ u32 csmi_change_count; /* count to track all IR -+ events for CSMI */ -+ u8 pci_slot_number; /* ioc page 1 - pci slot number */ -+#endif - - u8 ir_firmware; /* =1 if IR firmware detected */ -+ - /* - * Description: errata_flag_1064 - * If a PCIX read occurs within 1 or 2 cycles after the chip receives -@@ -701,7 +727,6 @@ typedef struct _MPT_ADAPTER - u8 FirstWhoInit; - u8 upload_fw; /* If set, do a fw upload */ - u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ -- u8 pad1[4]; - u8 DoneCtx; - u8 TaskCtx; - u8 InternalCtx; -@@ -709,37 +734,39 @@ typedef struct _MPT_ADAPTER - struct net_device *netdev; - struct list_head sas_topology; - struct mutex sas_topology_mutex; -+ u8 disable_hotplug_remove; - -- struct workqueue_struct *fw_event_q; -+ struct workqueue_struct *fw_event_q; - struct list_head fw_event_list; - spinlock_t fw_event_lock; - u8 fw_events_off; /* if '1', then ignore events */ -- char fw_event_q_name[MPT_KOBJ_NAME_LEN]; -- -- struct mutex sas_discovery_mutex; -- u8 sas_discovery_runtime; -- u8 sas_discovery_ignore_events; -+ char fw_event_q_name[MPT_KOBJ_NAME_LEN]; - -- /* port_info object for the host */ -- struct mptsas_portinfo *hba_port_info; -+ struct mptsas_portinfo *hba_port_info; /* port_info object for the host */ - u64 hba_port_sas_addr; - u16 hba_port_num_phy; - struct list_head sas_device_info_list; -- struct mutex sas_device_info_mutex; -+ struct semaphore sas_device_info_mutex; - u8 old_sas_discovery_protocal; - u8 sas_discovery_quiesce_io; - int sas_index; /* index refrencing */ - MPT_MGMT sas_mgmt; -- MPT_MGMT mptbase_cmds; /* for sending config pages */ - MPT_MGMT internal_cmds; -+ MPT_MGMT mptbase_cmds; /* for sending config pages */ - MPT_MGMT taskmgmt_cmds; -- MPT_MGMT ioctl_cmds; -- spinlock_t taskmgmt_lock; /* diagnostic reset lock */ -+ MPT_MGMT ioctl_cmds; /* ioctl data pointer */ -+ spinlock_t taskmgmt_lock; /* diagnostic reset lock */ - int taskmgmt_in_progress; - u8 taskmgmt_quiesce_io; - u8 ioc_reset_in_progress; -- struct work_struct sas_persist_task; -+ MPT_SCHEDULE_TARGET_RESET schedule_target_reset; -+#if defined(CPQ_CIM) -+ u8 num_ports; -+#endif - -+ char reset_work_q_name[MPT_KOBJ_NAME_LEN]; -+ struct workqueue_struct *reset_work_q; -+ struct delayed_work fault_reset_work; - struct work_struct fc_setup_reset_work; - struct list_head fc_rports; - struct work_struct fc_lsc_work; -@@ -748,26 +775,32 @@ typedef struct _MPT_ADAPTER - struct work_struct fc_rescan_work; - char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN]; - struct workqueue_struct *fc_rescan_work_q; -- -- /* driver forced bus resets count */ -- unsigned long hard_resets; -- /* fw/external bus resets count */ -- unsigned long soft_resets; -- /* cmd timeouts */ -- unsigned long timeouts; -- -+ unsigned long hard_resets; /* driver forced bus resets count */ -+ unsigned long soft_resets; /* fw/external bus resets count */ -+ unsigned long timeouts; /* cmd timeouts */ - struct scsi_cmnd **ScsiLookup; - spinlock_t scsi_lookup_lock; -- u64 dma_mask; -+ int sdev_queue_depth; /* sdev queue depth */ -+ u64 dma_mask; - u32 broadcast_aen_busy; -- char reset_work_q_name[MPT_KOBJ_NAME_LEN]; -- struct workqueue_struct *reset_work_q; -- struct delayed_work fault_reset_work; -- -+#if defined(DIAG_BUFFER_SUPPORT) -+ u8 *DiagBuffer[MPI_DIAG_BUF_TYPE_COUNT]; -+ u32 DataSize[MPI_DIAG_BUF_TYPE_COUNT]; -+ u32 DiagBuffer_sz[MPI_DIAG_BUF_TYPE_COUNT]; -+ dma_addr_t DiagBuffer_dma[MPI_DIAG_BUF_TYPE_COUNT]; -+ u8 TraceLevel[MPI_DIAG_BUF_TYPE_COUNT]; -+ u8 DiagBuffer_Status[MPI_DIAG_BUF_TYPE_COUNT]; -+ u32 UniqueId[MPI_DIAG_BUF_TYPE_COUNT]; -+ u32 ExtendedType[MPI_DIAG_BUF_TYPE_COUNT]; -+ u32 ProductSpecific[MPI_DIAG_BUF_TYPE_COUNT][4]; -+#endif - u8 sg_addr_size; -- u8 in_rescan; - u8 SGE_size; -- -+ u8 in_rescan; -+ /* diag buffer bits for sysfs */ -+ u8 is_fault; -+ u32 ring_buffer_offset; -+ u32 ring_buffer_sz; - } MPT_ADAPTER; - - /* -@@ -804,11 +837,9 @@ typedef struct _mpt_sge { - dma_addr_t Address; - } MptSge_t; - -- - #define mpt_msg_flags(ioc) \ -- (ioc->sg_addr_size == sizeof(u64)) ? \ -- MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ -- MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 -+ (ioc->sg_addr_size == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ -+ MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 - - #define MPT_SGE_FLAGS_64_BIT_ADDRESSING \ - (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT) -@@ -830,26 +861,10 @@ typedef struct _mpt_sge { - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - --#define SCSI_STD_SENSE_BYTES 18 --#define SCSI_STD_INQUIRY_BYTES 36 --#define SCSI_MAX_INQUIRY_BYTES 96 -- - /* - * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers - * Private to the driver. - */ --/* LOCAL structure and fields used when processing -- * internally generated commands. These include: -- * bus scan, dv and config requests. -- */ --typedef struct _MPT_LOCAL_REPLY { -- ConfigPageHeader_t header; -- int completion; -- u8 sense[SCSI_STD_SENSE_BYTES]; -- u8 scsiStatus; -- u8 skip; -- u32 pad; --} MPT_LOCAL_REPLY; - - #define MPT_HOST_BUS_UNKNOWN (0xFF) - #define MPT_HOST_TOO_MANY_TM (0x05) -@@ -865,13 +880,6 @@ typedef struct _MPT_LOCAL_REPLY { - #define MPT_NVRAM_WIDE_DISABLE (0x00100000) - #define MPT_NVRAM_BOOT_CHOICE (0x00200000) - --/* The TM_STATE variable is used to provide strict single threading of TM -- * requests as well as communicate TM error conditions. -- */ --#define TM_STATE_NONE (0) --#define TM_STATE_IN_PROGRESS (1) --#define TM_STATE_ERROR (2) -- - typedef enum { - FC, - SPI, -@@ -881,7 +889,7 @@ typedef enum { - typedef struct _MPT_SCSI_HOST { - MPT_ADAPTER *ioc; - ushort sel_timeout[MPT_MAX_FC_DEVICES]; -- char *info_kbuf; -+ char *info_kbuf; - long last_queue_full; - u16 spi_pending; - struct list_head target_reset_list; -@@ -889,14 +897,6 @@ typedef struct _MPT_SCSI_HOST { - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /* -- * More Dynamic Multi-Pathing stuff... -- */ -- --/* Forward decl, a strange C thing, to prevent gcc compiler warnings */ --struct scsi_cmnd; -- --/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ --/* - * Generic structure passed to the base mpt_config function. - */ - typedef struct _x_config_parms { -@@ -934,37 +934,37 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame( - extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); - extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); - extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); -- - extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag); - extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); - extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked); - extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); - extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); -+extern int mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag); - extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); - extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); - extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); - extern int mpt_findImVolumes(MPT_ADAPTER *ioc); - extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); - extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); --extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, -- pRaidPhysDiskPage1_t phys_disk); --extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, -- u8 phys_disk_num); -+extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage1_t phys_disk); -+extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num); -+ - extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); - extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); --extern void mpt_halt_firmware(MPT_ADAPTER *ioc); -- -+extern void mpt_halt_firmware(MPT_ADAPTER *ioc); - - /* - * Public data decl's... - */ - extern struct list_head ioc_list; -+extern struct proc_dir_entry *mpt_proc_root_dir; -+extern int mpt_debug_level; - extern int mpt_fwfault_debug; - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - #endif /* } __KERNEL__ */ - --#ifdef CONFIG_64BIT -+#if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) || defined(__x86_64__) || defined(__powerpc64__) - #define CAST_U32_TO_PTR(x) ((void *)(u64)x) - #define CAST_PTR_TO_U32(x) ((u32)(u64)x) - #else ---- a/drivers/message/fusion/mptctl.c -+++ b/drivers/message/fusion/mptctl.c -@@ -71,6 +71,15 @@ - #include "mptbase.h" - #include "mptctl.h" - -+#if defined(CPQ_CIM) -+#include "mptsas.h" -+#include "csmi/csmisas.h" -+#endif // CPQ_CIM -+ -+#if defined(DIAG_BUFFER_SUPPORT) -+#include "rejected_ioctls/diag_buffer.h" -+#endif -+ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - #define my_NAME "Fusion MPT misc device (ioctl) driver" - #define my_VERSION MPT_LINUX_VERSION_COMMON -@@ -113,6 +122,42 @@ static int mptctl_do_reset(unsigned long - static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); - static int mptctl_hp_targetinfo(unsigned long arg); - -+#if defined(CPQ_CIM) -+/* csmisas proto's*/ -+static int csmisas_get_driver_info(unsigned long arg); -+static int csmisas_get_cntlr_status(unsigned long arg); -+static int csmisas_get_cntlr_config(unsigned long arg); -+static int csmisas_get_phy_info(unsigned long arg); -+static int csmisas_get_scsi_address(unsigned long arg); -+static int csmisas_get_link_errors(unsigned long arg); -+static int csmisas_smp_passthru(unsigned long arg); -+static int csmisas_firmware_download(unsigned long arg); -+static int csmisas_get_raid_info(unsigned long arg); -+static int csmisas_get_raid_config(unsigned long arg); -+static int csmisas_get_raid_features(unsigned long arg); -+static int csmisas_set_raid_control(unsigned long arg); -+static int csmisas_get_raid_element(unsigned long arg); -+static int csmisas_set_raid_operation(unsigned long arg); -+static int csmisas_set_phy_info(unsigned long arg); -+static int csmisas_ssp_passthru(unsigned long arg); -+static int csmisas_stp_passthru(unsigned long arg); -+static int csmisas_get_sata_signature(unsigned long arg); -+static int csmisas_get_device_address(unsigned long arg); -+static int csmisas_task_managment(unsigned long arg); -+static int csmisas_phy_control(unsigned long arg); -+static int csmisas_get_connector_info(unsigned long arg); -+static int csmisas_get_location(unsigned long arg); -+#endif // CPQ_CIM -+ -+#if defined(DIAG_BUFFER_SUPPORT) -+/* diag_buffer proto's */ -+static int mptctl_register_diag_buffer(unsigned long arg); -+static int mptctl_release_diag_buffer(unsigned long arg); -+static int mptctl_unregister_diag_buffer(unsigned long arg); -+static int mptctl_query_diag_buffer(unsigned long arg); -+static int mptctl_read_diag_buffer(unsigned long arg); -+#endif // DIAG_BUFFER_SUPPORT -+ - static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); - static void mptctl_remove(struct pci_dev *); - -@@ -128,7 +173,6 @@ static MptSge_t *kbuf_alloc_2_sgl(int by - struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); - static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, - struct buflist *buflist, MPT_ADAPTER *ioc); --static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function); - - /* - * Reset Handler cleanup function -@@ -234,8 +278,7 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME - le32_to_cpu(reply->u.reply.IOCLogInfo))); - - if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) || -- (req->u.hdr.Function == -- MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { -+ (req->u.hdr.Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { - - if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT -@@ -246,8 +289,7 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME - le16_to_cpu(reply->u.sreply.TaskTag), - le32_to_cpu(reply->u.sreply.TransferCount))); - -- if (reply->u.sreply.SCSIState & -- MPI_SCSI_STATE_AUTOSENSE_VALID) { -+ if (reply->u.sreply.SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { - sz = req->u.scsireq.SenseBufferLength; - req_index = - le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); -@@ -262,10 +304,16 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME - /* We are done, issue wake up - */ - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { -- if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) -+ if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) { - mpt_clear_taskmgmt_in_progress_flag(ioc); -- ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; -- complete(&ioc->ioctl_cmds.done); -+ ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; -+ complete(&ioc->ioctl_cmds.done); -+ if (ioc->bus_type == SAS) -+ ioc->schedule_target_reset(ioc); -+ } else { -+ ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; -+ complete(&ioc->ioctl_cmds.done); -+ } - } - - out_continuation: -@@ -275,55 +323,14 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME - return 1; - } - --/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ --/* mptctl_timeout_expired -- * -- * Expecting an interrupt, however timed out. -- * -- */ --static void --mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) --{ -- unsigned long flags; -- -- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n", -- ioc->name, __func__)); -- -- if (mpt_fwfault_debug) -- mpt_halt_firmware(ioc); -- -- spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -- if (ioc->ioc_reset_in_progress) { -- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) -- mpt_free_msg_frame(ioc, mf); -- return; -- } -- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -- -- -- if (!mptctl_bus_reset(ioc, mf->u.hdr.Function)) -- return; -- -- /* Issue a reset for this device. -- * The IOC is not responding. -- */ -- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", -- ioc->name)); -- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) -- mpt_HardResetHandler(ioc, CAN_SLEEP); -- mpt_free_msg_frame(ioc, mf); --} -- - static int - mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) - { - if (!mf) - return 0; - -- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "TaskMgmt completed (mf=%p, mr=%p)\n", -- ioc->name, mf, mr)); -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p, mr=%p)\n", -+ ioc->name, mf, mr)); - - ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; - -@@ -338,17 +345,15 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, - mpt_clear_taskmgmt_in_progress_flag(ioc); - ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->taskmgmt_cmds.done); -+ if (ioc->bus_type == SAS) -+ ioc->schedule_target_reset(ioc); - return 1; - } - return 0; - } - --/* mptctl_bus_reset -- * -- * Bus reset code. -- * -- */ --static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function) -+static int -+mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id) - { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; -@@ -359,13 +364,6 @@ static int mptctl_bus_reset(MPT_ADAPTER - unsigned long time_count; - u16 iocstatus; - -- /* bus reset is only good for SCSI IO, RAID PASSTHRU */ -- if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) || -- (function == MPI_FUNCTION_SCSI_IO_REQUEST)) { -- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT -- "TaskMgmt, not SCSI_IO!!\n", ioc->name)); -- return -EPERM; -- } - - mutex_lock(&ioc->taskmgmt_cmds.mutex); - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { -@@ -375,15 +373,13 @@ static int mptctl_bus_reset(MPT_ADAPTER - - retval = 0; - -- /* Send request -- */ -- mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc); -- if (mf == NULL) { -- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT -- "TaskMgmt, no msg frames!!\n", ioc->name)); -+ if ((mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc)) == NULL) { -+ dtmprintk(ioc, -+ printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n", -+ ioc->name)); - mpt_clear_taskmgmt_in_progress_flag(ioc); - retval = -ENOMEM; -- goto mptctl_bus_reset_done; -+ goto tm_done; - } - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", -@@ -392,10 +388,13 @@ static int mptctl_bus_reset(MPT_ADAPTER - pScsiTm = (SCSITaskMgmt_t *) mf; - memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; -- pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; -- pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; -- pScsiTm->TargetID = 0; -- pScsiTm->Bus = 0; -+ pScsiTm->TaskType = tm_type; -+ if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) && -+ (ioc->bus_type == FC)) -+ pScsiTm->MsgFlags = -+ MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; -+ pScsiTm->TargetID = target_id; -+ pScsiTm->Bus = bus_id; - pScsiTm->ChainOffset = 0; - pScsiTm->Reserved = 0; - pScsiTm->Reserved1 = 0; -@@ -406,43 +405,45 @@ static int mptctl_bus_reset(MPT_ADAPTER - pScsiTm->Reserved2[ii] = 0; - - switch (ioc->bus_type) { -- case FC: -- timeout = 40; -- break; -- case SAS: -- timeout = 30; -- break; -- case SPI: -- default: -- timeout = 2; -- break; -+ case FC: -+ timeout = 40; -+ break; -+ case SAS: -+ timeout = 30; -+ break; -+ case SPI: -+ default: -+ timeout = 10; -+ break; - } - -- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "TaskMgmt type=%d timeout=%ld\n", -- ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout)); -+ dtmprintk(ioc, -+ printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n", -+ ioc->name, tm_type, timeout)); - - INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) -- CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) - time_count = jiffies; - if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && - (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) - mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf); - else { - retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc, -- sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP); -+ sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); - if (retval != 0) { -- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT -+ dfailprintk(ioc, -+ printk(MYIOC_s_ERR_FMT - "TaskMgmt send_handshake FAILED!" - " (ioc %p, mf %p, rc=%d) \n", ioc->name, - ioc, mf, retval)); -+ mpt_free_msg_frame(ioc, mf); - mpt_clear_taskmgmt_in_progress_flag(ioc); -- goto mptctl_bus_reset_done; -+ goto tm_done; - } - } - - /* Now wait for the command to complete */ - ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); -+ - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt failed\n", ioc->name)); -@@ -452,14 +453,14 @@ static int mptctl_bus_reset(MPT_ADAPTER - retval = 0; - else - retval = -1; /* return failure */ -- goto mptctl_bus_reset_done; -+ goto tm_done; - } - - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt failed\n", ioc->name)); - retval = -1; /* return failure */ -- goto mptctl_bus_reset_done; -+ goto tm_done; - } - - pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; -@@ -467,7 +468,7 @@ static int mptctl_bus_reset(MPT_ADAPTER - "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, " - "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, " - "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus, -- pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, -+ pScsiTmReply->TargetID, tm_type, - le16_to_cpu(pScsiTmReply->IOCStatus), - le32_to_cpu(pScsiTmReply->IOCLogInfo), - pScsiTmReply->ResponseCode, -@@ -485,14 +486,67 @@ static int mptctl_bus_reset(MPT_ADAPTER - retval = -1; /* return failure */ - } - -- -- mptctl_bus_reset_done: -+ tm_done: - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) - return retval; - } - - -+static void -+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) -+{ -+ unsigned long flags; -+ int ret_val = -1; -+ SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf; -+ u8 function = mf->u.hdr.Function; -+ -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n", -+ ioc->name, __FUNCTION__)); -+ -+ if(mpt_fwfault_debug) -+ mpt_halt_firmware(ioc); -+ -+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -+ if (ioc->ioc_reset_in_progress) { -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) -+ mpt_free_msg_frame(ioc, mf); -+ return; -+ } -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ -+ -+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) -+ -+ if (ioc->bus_type == SAS) { -+ if (function == MPI_FUNCTION_SCSI_IO_REQUEST) -+ ret_val = mptctl_do_taskmgmt(ioc, -+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, -+ scsi_req->Bus, scsi_req->TargetID); -+ else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) -+ ret_val = mptctl_do_taskmgmt(ioc, -+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, -+ scsi_req->Bus, 0); -+ if (!ret_val) -+ return; -+ } else { -+ if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) || -+ (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) -+ ret_val = mptctl_do_taskmgmt(ioc, -+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, -+ scsi_req->Bus, 0); -+ if (!ret_val) -+ return; -+ } -+ -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n", -+ ioc->name)); -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); -+ mpt_free_msg_frame(ioc, mf); -+} -+ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /* mptctl_ioc_reset - * -@@ -580,15 +634,17 @@ static int - mptctl_fasync(int fd, struct file *filep, int mode) - { - MPT_ADAPTER *ioc; -- int ret; - -- lock_kernel(); - list_for_each_entry(ioc, &ioc_list, list) - ioc->aen_event_read_flag=0; - -- ret = fasync_helper(fd, filep, mode, &async_queue); -- unlock_kernel(); -- return ret; -+ return fasync_helper(fd, filep, mode, &async_queue); -+} -+ -+static int -+mptctl_release(struct inode *inode, struct file *filep) -+{ -+ return fasync_helper(-1, filep, 0, &async_queue); - } - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -@@ -608,6 +664,7 @@ __mptctl_ioctl(struct file *file, unsign - int ret; - MPT_ADAPTER *iocp = NULL; - -+ - if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { - printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " - "Unable to copy mpt_ioctl_header data @ %p\n", -@@ -624,12 +681,6 @@ __mptctl_ioctl(struct file *file, unsign - (iocp == NULL)) - return -ENODEV; - -- if (!iocp->active) { -- printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", -- __FILE__, __LINE__); -- return -EFAULT; -- } -- - /* Handle those commands that are just returning - * information stored in the driver. - * These commands should never time out and are unaffected -@@ -649,6 +700,25 @@ __mptctl_ioctl(struct file *file, unsign - return mptctl_eventreport(arg); - } else if (cmd == MPTFWREPLACE) { - return mptctl_replace_fw(arg); -+#if defined(DIAG_BUFFER_SUPPORT) -+/* diag_buffer static data calls*/ -+ } else if (cmd == MPTDIAGQUERY) { -+ return mptctl_query_diag_buffer(arg); -+ } else if (cmd == MPTDIAGUNREGISTER) { -+ return mptctl_unregister_diag_buffer(arg); -+#endif -+ -+#if defined(CPQ_CIM) -+/* csmisas static data calls*/ -+ } else if (cmd == CC_CSMI_SAS_GET_DRIVER_INFO) { -+ return csmisas_get_driver_info(arg); -+ } else if (cmd == CC_CSMI_SAS_GET_CNTLR_STATUS) { -+ return csmisas_get_cntlr_status(arg); -+ } else if (cmd == CC_CSMI_SAS_GET_SCSI_ADDRESS) { -+ return csmisas_get_scsi_address(arg); -+ } else if (cmd == CC_CSMI_SAS_GET_DEVICE_ADDRESS){ -+ return csmisas_get_device_address(arg); -+#endif // CPQ_CIM - } - - /* All of these commands require an interrupt or -@@ -657,6 +727,8 @@ __mptctl_ioctl(struct file *file, unsign - if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) - return ret; - -+// dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT ": mptctl_ioctl()\n", iocp->name)); -+ - if (cmd == MPTFWDOWNLOAD) - ret = mptctl_fw_download(arg); - else if (cmd == MPTCOMMAND) -@@ -667,6 +739,57 @@ __mptctl_ioctl(struct file *file, unsign - ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); - else if (cmd == HP_GETTARGETINFO) - ret = mptctl_hp_targetinfo(arg); -+#if defined(CPQ_CIM) -+/* csmisas requiring fw calls*/ -+ else if (cmd == CC_CSMI_SAS_GET_CNTLR_CONFIG) -+ ret = csmisas_get_cntlr_config(arg); -+ else if (cmd == CC_CSMI_SAS_GET_PHY_INFO) -+ ret = csmisas_get_phy_info(arg); -+ else if (cmd == CC_CSMI_SAS_GET_SATA_SIGNATURE) -+ ret = csmisas_get_sata_signature(arg); -+ else if (cmd == CC_CSMI_SAS_GET_LINK_ERRORS) -+ ret = csmisas_get_link_errors(arg); -+ else if (cmd == CC_CSMI_SAS_SMP_PASSTHRU) -+ ret = csmisas_smp_passthru(arg); -+ else if (cmd == CC_CSMI_SAS_SSP_PASSTHRU) -+ ret = csmisas_ssp_passthru(arg); -+ else if (cmd == CC_CSMI_SAS_FIRMWARE_DOWNLOAD) -+ ret = csmisas_firmware_download(arg); -+ else if (cmd == CC_CSMI_SAS_GET_RAID_INFO) -+ ret = csmisas_get_raid_info(arg); -+ else if (cmd == CC_CSMI_SAS_GET_RAID_CONFIG) -+ ret = csmisas_get_raid_config(arg); -+ else if (cmd == CC_CSMI_SAS_GET_RAID_FEATURES) -+ ret = csmisas_get_raid_features(arg); -+ else if (cmd == CC_CSMI_SAS_SET_RAID_CONTROL) -+ ret = csmisas_set_raid_control(arg); -+ else if (cmd == CC_CSMI_SAS_GET_RAID_ELEMENT) -+ ret = csmisas_get_raid_element(arg); -+ else if (cmd == CC_CSMI_SAS_SET_RAID_OPERATION) -+ ret = csmisas_set_raid_operation(arg); -+ else if (cmd == CC_CSMI_SAS_SET_PHY_INFO) -+ ret = csmisas_set_phy_info(arg); -+ else if (cmd == CC_CSMI_SAS_STP_PASSTHRU) -+ ret = csmisas_stp_passthru(arg); -+ else if (cmd == CC_CSMI_SAS_TASK_MANAGEMENT) -+ ret = csmisas_task_managment(arg); -+ else if (cmd == CC_CSMI_SAS_PHY_CONTROL) -+ ret = csmisas_phy_control(arg); -+ else if (cmd == CC_CSMI_SAS_GET_CONNECTOR_INFO) -+ ret = csmisas_get_connector_info(arg); -+ else if (cmd == CC_CSMI_SAS_GET_LOCATION) -+ ret = csmisas_get_location(arg); -+#endif // CPQ_CIM -+ -+#if defined(DIAG_BUFFER_SUPPORT) -+/* diag_buffer requiring fw calls*/ -+ else if (cmd == MPTDIAGREGISTER) -+ ret = mptctl_register_diag_buffer(arg); -+ else if (cmd == MPTDIAGRELEASE) -+ ret = mptctl_release_diag_buffer(arg); -+ else if (cmd == MPTDIAGREADBUFFER) -+ ret = mptctl_read_diag_buffer(arg); -+#endif // DIAG_BUFFER_SUPPORT - else - ret = -EINVAL; - -@@ -699,6 +822,7 @@ static int mptctl_do_reset(unsigned long - } - - if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", - __FILE__, __LINE__, krinfo.hdr.iocnum); - return -ENODEV; /* (-6) No such device or address */ -@@ -789,13 +913,13 @@ mptctl_do_fw_download(int ioc, char __us - unsigned long timeleft; - - if (mpt_verify_adapter(ioc, &iocp) < 0) { -- printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", -- ioc); -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) -+ printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", ioc); - return -ENODEV; /* (-6) No such device or address */ - } else { - - /* Valid device. Get a message frame and construct the FW download message. -- */ -+ */ - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) - return -EAGAIN; - } -@@ -870,8 +994,7 @@ mptctl_do_fw_download(int ioc, char __us - * 96 8 - * 64 4 - */ -- maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - -- sizeof(FWDownloadTCSGE_t)) -+ maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) - / iocp->SGE_size; - if (numfrags > maxfrags) { - ret = -EMLINK; -@@ -904,8 +1027,8 @@ mptctl_do_fw_download(int ioc, char __us - n++; - if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { - printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " -- "Unable to copy f/w buffer hunk#%d @ %p\n", -- iocp->name, __FILE__, __LINE__, n, ufwbuf); -+ "Unable to copy f/w buffer hunk#%d @ %p\n", -+ iocp->name, __FILE__, __LINE__, n, ufwbuf); - goto fwdl_out; - } - fw_bytes_copied += bl->len; -@@ -930,7 +1053,7 @@ retry_wait: - timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); - if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - ret = -ETIME; -- printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); -+ printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __FUNCTION__); - if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { - mpt_free_msg_frame(iocp, mf); - goto fwdl_out; -@@ -943,7 +1066,7 @@ retry_wait: - } - - if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { -- printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); -+ printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __FUNCTION__); - mpt_free_msg_frame(iocp, mf); - ret = -ENODATA; - goto fwdl_out; -@@ -955,21 +1078,21 @@ retry_wait: - ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; - iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; - if (iocstat == MPI_IOCSTATUS_SUCCESS) { -- printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); -+ printk(MYIOC_s_INFO_FMT ": F/W update successfully sent!\n", iocp->name); - return 0; - } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) { -- printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n", -- iocp->name); -+ printk(MYIOC_s_WARN_FMT "Hmmm... doesn't support F/W download?\n", -+ iocp->name); - printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n", -- iocp->name); -+ iocp->name); - return -EBADRQC; - } else if (iocstat == MPI_IOCSTATUS_BUSY) { - printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name); - printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name); - return -EBUSY; - } else { -- printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n", -- iocp->name, iocstat); -+ printk(MYIOC_s_WARN_FMT "returned [bad] status = %04xh\n", -+ iocp->name, iocstat); - printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name); - return -ENOMSG; - } -@@ -979,7 +1102,7 @@ fwdl_out: - - CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); - SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); -- kfree_sgl(sgl, sgl_dma, buflist, iocp); -+ kfree_sgl(sgl, sgl_dma, buflist, iocp); - return ret; - } - -@@ -1061,9 +1184,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, i - alloc_sz = alloc_sz / 2; - if (alloc_sz == 0) { - printk(MYIOC_s_WARN_FMT "-SG: No can do - " -- "not enough memory! :-(\n", ioc->name); -+ "not enough memory! :-(\n", ioc->name); - printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", -- ioc->name, numfrags); -+ ioc->name, numfrags); - goto free_and_fail; - } - continue; -@@ -1072,8 +1195,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, i - - bytes_allocd += this_alloc; - sgl->FlagsLength = (0x10000000|sgdir|this_alloc); -- dma_addr = pci_map_single(ioc->pcidev, -- buflist[buflist_ent].kptr, this_alloc, dir); -+ if (ioc->sg_addr_size == sizeof(u64)) -+ sgl->FlagsLength |= MPT_SGE_FLAGS_64_BIT_ADDRESSING; -+ dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); - sgl->Address = dma_addr; - - fragcnt++; -@@ -1087,8 +1211,8 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, i - - /* Need to chain? */ - if (fragcnt == sg_spill) { -- printk(MYIOC_s_WARN_FMT -- "-SG: No can do - " "Chain required! :-(\n", ioc->name); -+ printk(MYIOC_s_WARN_FMT "-SG: No can do - " -+ "Chain required! :-(\n", ioc->name); - printk(MYIOC_s_WARN_FMT "(freeing %d frags)\n", ioc->name, numfrags); - goto free_and_fail; - } -@@ -1097,9 +1221,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, i - if (numfrags*8 > MAX_SGL_BYTES){ - /* GRRRRR... */ - printk(MYIOC_s_WARN_FMT "-SG: No can do - " -- "too many SG frags! :-(\n", ioc->name); -+ "too many SG frags! :-(\n", ioc->name); - printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", -- ioc->name, numfrags); -+ ioc->name, numfrags); - goto free_and_fail; - } - } -@@ -1221,7 +1345,7 @@ mptctl_getiocinfo (unsigned long arg, un - unsigned int port; - int cim_rev; - u8 revision; -- struct scsi_device *sdev; -+ struct scsi_device *sdev; - VirtDevice *vdevice; - - /* Add of PCI INFO results in unaligned access for -@@ -1256,6 +1380,7 @@ mptctl_getiocinfo (unsigned long arg, un - - if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - kfree(karg); -@@ -1265,8 +1390,8 @@ mptctl_getiocinfo (unsigned long arg, un - /* Verify the data transfer size is correct. */ - if (karg->hdr.maxDataSize != data_size) { - printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " -- "Structure size mismatch. Command not completed.\n", -- ioc->name, __FILE__, __LINE__); -+ "Structure size mismatch. Command not completed.\n", -+ ioc->name, __FILE__, __LINE__); - kfree(karg); - return -EFAULT; - } -@@ -1318,6 +1443,8 @@ mptctl_getiocinfo (unsigned long arg, un - if (ioc->sh) { - shost_for_each_device(sdev, ioc->sh) { - vdevice = sdev->hostdata; -+ if (vdevice == NULL || vdevice->vtarget == NULL) -+ continue; - if (vdevice->vtarget->tflags & - MPT_TARGET_FLAGS_RAID_COMPONENT) - continue; -@@ -1378,7 +1505,7 @@ mptctl_gettargetinfo (unsigned long arg) - int maxWordsLeft; - int numBytes; - u8 port; -- struct scsi_device *sdev; -+ struct scsi_device *sdev; - - if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { - printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - " -@@ -1389,6 +1516,7 @@ mptctl_gettargetinfo (unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; -@@ -1405,8 +1533,8 @@ mptctl_gettargetinfo (unsigned long arg) - port = karg.hdr.port; - - if (maxWordsLeft <= 0) { -- printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", -- ioc->name, __FILE__, __LINE__); -+ printk(MYIOC_s_ERR_FMT "%s::mptctl_gettargetinfo() @%d - no memory available!\n", -+ ioc->name, __FILE__, __LINE__); - return -ENOMEM; - } - -@@ -1426,8 +1554,8 @@ mptctl_gettargetinfo (unsigned long arg) - */ - pmem = kzalloc(numBytes, GFP_KERNEL); - if (!pmem) { -- printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", -- ioc->name, __FILE__, __LINE__); -+ printk(MYIOC_s_ERR_FMT "%s::mptctl_gettargetinfo() @%d - no memory available!\n", -+ ioc->name, __FILE__, __LINE__); - return -ENOMEM; - } - pdata = (int *) pmem; -@@ -1439,6 +1567,8 @@ mptctl_gettargetinfo (unsigned long arg) - if (!maxWordsLeft) - continue; - vdevice = sdev->hostdata; -+ if (vdevice == NULL || vdevice->vtarget == NULL) -+ continue; - if (vdevice->vtarget->tflags & - MPT_TARGET_FLAGS_RAID_COMPONENT) - continue; -@@ -1503,6 +1633,7 @@ mptctl_readtest (unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; -@@ -1564,6 +1695,7 @@ mptctl_eventquery (unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; -@@ -1603,6 +1735,7 @@ mptctl_eventenable (unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; -@@ -1616,15 +1749,14 @@ mptctl_eventenable (unsigned long arg) - int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); - ioc->events = kzalloc(sz, GFP_KERNEL); - if (!ioc->events) { -- printk(MYIOC_s_ERR_FMT -- ": ERROR - Insufficient memory to add adapter!\n", -+ printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", - ioc->name); - return -ENOMEM; - } - ioc->alloc_total += sz; - - ioc->eventContext = 0; -- } -+ } - - /* Update the IOC event logging flag. - */ -@@ -1652,13 +1784,14 @@ mptctl_eventreport (unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } -+ - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", - ioc->name)); -- - numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); - maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); - -@@ -1706,6 +1839,7 @@ mptctl_replace_fw (unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; -@@ -1737,8 +1871,8 @@ mptctl_replace_fw (unsigned long arg) - */ - if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) { - printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_replace_fw - " -- "Unable to read in mpt_ioctl_replace_fw image " -- "@ %p\n", ioc->name, __FILE__, __LINE__, uarg); -+ "Unable to read in mpt_ioctl_replace_fw image " -+ "@ %p\n", ioc->name, __FILE__, __LINE__, uarg); - mpt_free_fw_memory(ioc); - return -EFAULT; - } -@@ -1755,7 +1889,7 @@ mptctl_replace_fw (unsigned long arg) - * - * Outputs: None. - * Return: 0 if successful -- * -EBUSY if previous command timeout and IOC reset is not complete. -+ * -EBUSY if previous command timout and IOC reset is not complete. - * -EFAULT if data unavailable - * -ENODEV if no such device/adapter - * -ETIME if timer expires -@@ -1780,6 +1914,7 @@ mptctl_mpt_command (unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; -@@ -1795,7 +1930,7 @@ mptctl_mpt_command (unsigned long arg) - * - * Outputs: None. - * Return: 0 if successful -- * -EBUSY if previous command timeout and IOC reset is not complete. -+ * -EBUSY if previous command timout and IOC reset is not complete. - * -EFAULT if data unavailable - * -ENODEV if no such device/adapter - * -ETIME if timer expires -@@ -1818,7 +1953,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - int sz, rc = 0; - int msgContext; - u16 req_idx; -- ulong timeout; -+ unsigned long timeout; - unsigned long timeleft; - struct scsi_device *sdev; - unsigned long flags; -@@ -1831,6 +1966,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; -@@ -1862,8 +1998,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - - /* Get a free request frame and save the message context. - */ -- if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) -- return -EAGAIN; -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) -+ return -EAGAIN; - - hdr = (MPIHeader_t *) mf; - msgContext = le32_to_cpu(hdr->MsgContext); -@@ -1884,11 +2020,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - hdr->MsgContext = cpu_to_le32(msgContext); - function = hdr->Function; - -- - /* Verify that this request is allowed. - */ - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", -- ioc->name, hdr->Function, mf)); -+ ioc->name, function, mf)); - - switch (function) { - case MPI_FUNCTION_IOC_FACTS: -@@ -1967,6 +2102,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - struct scsi_target *starget = scsi_target(sdev); - VirtTarget *vtarget = starget->hostdata; - -+ if (vtarget == NULL) -+ continue; - if ((pScsiReq->TargetID == vtarget->id) && - (pScsiReq->Bus == vtarget->channel) && - (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) -@@ -1987,7 +2124,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - pScsiReq->Control = cpu_to_le32(scsidir | qtag); - pScsiReq->DataLength = cpu_to_le32(dataSize); - -- - } else { - printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " - "SCSI driver is not loaded. \n", -@@ -2000,7 +2136,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - case MPI_FUNCTION_SMP_PASSTHROUGH: - /* Check mf->PassthruFlags to determine if - * transfer is ImmediateMode or not. -- * Immediate mode returns data in the ReplyFrame. -+ * Immediate mode returns data in the reply. - * Else, we are sending request and response data - * in two SGLs at the end of the mf. - */ -@@ -2077,12 +2213,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - { - SCSITaskMgmt_t *pScsiTm; - pScsiTm = (SCSITaskMgmt_t *)mf; -- dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "\tTaskType=0x%x MsgFlags=0x%x " -- "TaskMsgContext=0x%x id=%d channel=%d\n", -- ioc->name, pScsiTm->TaskType, le32_to_cpu -- (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, -- pScsiTm->TargetID, pScsiTm->Bus)); -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tTaskType=0x%x MsgFlags=0x%x " -+ "TaskMsgContext=0x%x id=%d channel=%d\n", ioc->name, pScsiTm->TaskType, -+ le32_to_cpu(pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, -+ pScsiTm->TargetID, pScsiTm->Bus)); - break; - } - -@@ -2094,7 +2228,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - /* Verify that all entries in the IOC INIT match - * existing setup (and in LE format). - */ -- if (sizeof(dma_addr_t) == sizeof(u64)) { -+ if (ioc->sg_addr_size == sizeof(u64)) { - high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32)); - sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32)); - } else { -@@ -2102,6 +2236,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - sense_high= 0; - } - -+ if (!pInit->MaxDevices && !pInit->MaxBuses) { -+ pInit->MaxDevices = ioc->facts.MaxDevices; -+ pInit->MaxBuses = ioc->facts.MaxBuses; -+ } -+ - if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) || - (pInit->MaxBuses != ioc->facts.MaxBuses) || - (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) || -@@ -2137,12 +2276,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - MPI_FUNCTION_FC_ABORT - MPI_FUNCTION_LAN_SEND - MPI_FUNCTION_LAN_RECEIVE -- MPI_FUNCTION_LAN_RESET -+ MPI_FUNCTION_LAN_RESET - */ - - printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " - "Illegal request (function 0x%x) \n", -- ioc->name, __FILE__, __LINE__, hdr->Function); -+ ioc->name, __FILE__, __LINE__, function); - rc = -EFAULT; - goto done_free_mem; - } -@@ -2168,7 +2307,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - if (karg.dataInSize > 0) { - flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | - MPI_SGE_FLAGS_END_OF_BUFFER | -- MPI_SGE_FLAGS_DIRECTION) -+ MPI_SGE_FLAGS_DIRECTION ) - << MPI_SGE_FLAGS_SHIFT; - } else { - flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; -@@ -2230,7 +2369,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - - SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); - INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) -- if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { -+ if (function == MPI_FUNCTION_SCSI_TASK_MGMT) { - - mutex_lock(&ioc->taskmgmt_cmds.mutex); - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { -@@ -2244,8 +2383,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) - mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf); - else { -- rc =mpt_send_handshake_request(mptctl_id, ioc, -- sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); -+ rc = mpt_send_handshake_request(mptctl_id, ioc, -+ sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); - if (rc != 0) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "send_handshake FAILED! (ioc %p, mf %p)\n", -@@ -2256,19 +2395,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_ - goto done_free_mem; - } - } -- - } else - mpt_put_msg_frame(mptctl_id, ioc, mf); - - /* Now wait for the command to complete */ - timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; - retry_wait: -- timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, -- HZ*timeout); -+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*timeout); - if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - rc = -ETIME; - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", -- ioc->name, __func__)); -+ ioc->name, __FUNCTION__)); - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { - if (function == MPI_FUNCTION_SCSI_TASK_MGMT) - mutex_unlock(&ioc->taskmgmt_cmds.mutex); -@@ -2279,7 +2416,8 @@ retry_wait: - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - mptctl_timeout_expired(ioc, mf); - mf = NULL; -- } else -+ } -+ else - goto retry_wait; - goto done_free_mem; - } -@@ -2287,7 +2425,6 @@ retry_wait: - if (function == MPI_FUNCTION_SCSI_TASK_MGMT) - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - -- - mf = NULL; - - /* If a valid reply frame, copy to the user. -@@ -2295,8 +2432,7 @@ retry_wait: - */ - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { - if (karg.maxReplyBytes < ioc->reply_sz) { -- sz = min(karg.maxReplyBytes, -- 4*ioc->ioctl_cmds.reply[2]); -+ sz = min(karg.maxReplyBytes, 4*ioc->ioctl_cmds.reply[2]); - } else { - sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); - } -@@ -2318,8 +2454,7 @@ retry_wait: - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { - sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); - if (sz > 0) { -- if (copy_to_user(karg.senseDataPtr, -- ioc->ioctl_cmds.sense, sz)) { -+ if (copy_to_user(karg.senseDataPtr, ioc->ioctl_cmds.sense, sz)) { - printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " - "Unable to write sense data to user %p\n", - ioc->name, __FILE__, __LINE__, -@@ -2334,8 +2469,7 @@ retry_wait: - * to user. - */ - if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && -- (karg.dataInSize > 0) && (bufIn.kptr)) { -- -+ (karg.dataInSize > 0) && (bufIn.kptr)) { - if (copy_to_user(karg.dataInBufPtr, - bufIn.kptr, karg.dataInSize)) { - printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " -@@ -2378,7 +2512,7 @@ done_free_mem: - * Outputs: None. - * Return: 0 if successful - * -EFAULT if data unavailable -- * -EBUSY if previous command timeout and IOC reset is not complete. -+ * -EBUSY if previous command timout and IOC reset is not complete. - * -ENODEV if no such device/adapter - * -ETIME if timer expires - * -ENOMEM if memory allocation error -@@ -2389,18 +2523,17 @@ mptctl_hp_hostinfo(unsigned long arg, un - hp_host_info_t __user *uarg = (void __user *) arg; - MPT_ADAPTER *ioc; - struct pci_dev *pdev; -- char *pbuf=NULL; -+ char *pbuf=NULL; - dma_addr_t buf_dma; - hp_host_info_t karg; -- CONFIGPARMS cfg; -- ConfigPageHeader_t hdr; - int iocnum; -- int rc, cim_rev; -+ int cim_rev; - ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; - MPT_FRAME_HDR *mf = NULL; - MPIHeader_t *mpi_hdr; - unsigned long timeleft; - int retval; -+ u32 MsgContext; - - /* Reset long to int. Should affect IA64 and SPARC only - */ -@@ -2420,13 +2553,14 @@ mptctl_hp_hostinfo(unsigned long arg, un - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } -+ - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", - ioc->name)); -- - /* Fill in the data and return the structure to the calling - * program - */ -@@ -2466,42 +2600,9 @@ mptctl_hp_hostinfo(unsigned long arg, un - karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0'; - karg.fw_version[11] = '\0'; - -- /* Issue a config request to get the device serial number -- */ -- hdr.PageVersion = 0; -- hdr.PageLength = 0; -- hdr.PageNumber = 0; -- hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; -- cfg.cfghdr.hdr = &hdr; -- cfg.physAddr = -1; -- cfg.pageAddr = 0; -- cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -- cfg.dir = 0; /* read */ -- cfg.timeout = 10; -+ strncpy(karg.serial_number, ioc->board_tracer, 16); - -- strncpy(karg.serial_number, " ", 24); -- if (mpt_config(ioc, &cfg) == 0) { -- if (cfg.cfghdr.hdr->PageLength > 0) { -- /* Issue the second config page request */ -- cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; -- -- pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); -- if (pbuf) { -- cfg.physAddr = buf_dma; -- if (mpt_config(ioc, &cfg) == 0) { -- ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; -- if (strlen(pdata->BoardTracerNumber) > 1) { -- strncpy(karg.serial_number, pdata->BoardTracerNumber, 24); -- karg.serial_number[24-1]='\0'; -- } -- } -- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); -- pbuf = NULL; -- } -- } -- } -- rc = mpt_GetIocState(ioc, 1); -- switch (rc) { -+ switch (mpt_GetIocState(ioc, 1)) { - case MPI_IOC_STATE_OPERATIONAL: - karg.ioc_status = HP_STATUS_OK; - break; -@@ -2537,21 +2638,23 @@ mptctl_hp_hostinfo(unsigned long arg, un - } - } - -- /* -+ /* - * Gather ISTWI(Industry Standard Two Wire Interface) Data - */ - if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { -- dfailprintk(ioc, printk(MYIOC_s_WARN_FMT -- "%s, no msg frames!!\n", ioc->name, __func__)); -+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", -+ ioc->name,__FUNCTION__)); -+ retval = -ENOMEM; - goto out; - } - - IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; - mpi_hdr = (MPIHeader_t *) mf; -+ MsgContext = mpi_hdr->MsgContext; - memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); - IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; - IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; -- IstwiRWRequest->MsgContext = mpi_hdr->MsgContext; -+ IstwiRWRequest->MsgContext = MsgContext; - IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; - IstwiRWRequest->NumAddressBytes = 0x01; - IstwiRWRequest->DataLength = cpu_to_le16(0x04); -@@ -2561,23 +2664,21 @@ mptctl_hp_hostinfo(unsigned long arg, un - IstwiRWRequest->DeviceAddr = 0xB0; - - pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); -- if (!pbuf) -+ if (!pbuf) { -+ retval = -ENOMEM; - goto out; -- ioc->add_sge((char *)&IstwiRWRequest->SGL, -- (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); -+ } -+ ioc->add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4),buf_dma); - - retval = 0; -- SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, -- IstwiRWRequest->MsgContext); -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, IstwiRWRequest->MsgContext); - INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) - mpt_put_msg_frame(mptctl_id, ioc, mf); -- - retry_wait: -- timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, -- HZ*MPT_IOCTL_DEFAULT_TIMEOUT); -+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*MPT_IOCTL_DEFAULT_TIMEOUT); - if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - retval = -ETIME; -- printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); -+ printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __FUNCTION__); - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { - mpt_free_msg_frame(ioc, mf); - goto out; -@@ -2617,7 +2718,7 @@ retry_wait: - return -EFAULT; - } - -- return 0; -+ return retval; - - } - -@@ -2627,7 +2728,7 @@ retry_wait: - * Outputs: None. - * Return: 0 if successful - * -EFAULT if data unavailable -- * -EBUSY if previous command timeout and IOC reset is not complete. -+ * -EBUSY if previous command timout and IOC reset is not complete. - * -ENODEV if no such device/adapter - * -ETIME if timer expires - * -ENOMEM if memory allocation error -@@ -2639,12 +2740,12 @@ mptctl_hp_targetinfo(unsigned long arg) - SCSIDevicePage0_t *pg0_alloc; - SCSIDevicePage3_t *pg3_alloc; - MPT_ADAPTER *ioc; -- MPT_SCSI_HOST *hd = NULL; -+ MPT_SCSI_HOST *hd = NULL; - hp_target_info_t karg; - int iocnum; - int data_sz; - dma_addr_t page_dma; -- CONFIGPARMS cfg; -+ CONFIGPARMS cfg; - ConfigPageHeader_t hdr; - int tmp, np, rc = 0; - -@@ -2657,13 +2758,14 @@ mptctl_hp_targetinfo(unsigned long arg) - - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } -- dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", -- ioc->name)); - -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_targetinfo called.\n", -+ ioc->name)); - /* There is nothing to do for FCP parts. - */ - if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) -@@ -2773,10 +2875,11 @@ mptctl_hp_targetinfo(unsigned long arg) - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - --static const struct file_operations mptctl_fops = { -+static struct file_operations mptctl_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, -- .fasync = mptctl_fasync, -+ .release = mptctl_release, -+ .fasync = mptctl_fasync, - .unlocked_ioctl = mptctl_ioctl, - #ifdef CONFIG_COMPAT - .compat_ioctl = compat_mpctl_ioctl, -@@ -2812,8 +2915,9 @@ compat_mptfwxfer_ioctl(struct file *filp - iocnumX = kfw32.iocnum & 0xFF; - if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || - (iocp == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", -- __LINE__, iocnumX); -+ __LINE__, iocnumX); - return -ENODEV; - } - -@@ -2852,8 +2956,9 @@ compat_mpt_command(struct file *filp, un - iocnumX = karg32.hdr.iocnum & 0xFF; - if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || - (iocp == NULL)) { -+ if (mpt_debug_level & MPT_DEBUG_IOCTL) - printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", -- __LINE__, iocnumX); -+ __LINE__, iocnumX); - return -ENODEV; - } - -@@ -2902,6 +3007,31 @@ static long compat_mpctl_ioctl(struct fi - case MPTHARDRESET: - case HP_GETHOSTINFO: - case HP_GETTARGETINFO: -+#if defined(CPQ_CIM) -+ case CC_CSMI_SAS_GET_DRIVER_INFO: -+ case CC_CSMI_SAS_GET_CNTLR_CONFIG: -+ case CC_CSMI_SAS_GET_CNTLR_STATUS: -+ case CC_CSMI_SAS_GET_SCSI_ADDRESS: -+ case CC_CSMI_SAS_GET_DEVICE_ADDRESS: -+ case CC_CSMI_SAS_GET_PHY_INFO: -+ case CC_CSMI_SAS_GET_SATA_SIGNATURE: -+ case CC_CSMI_SAS_GET_LINK_ERRORS: -+ case CC_CSMI_SAS_SMP_PASSTHRU: -+ case CC_CSMI_SAS_SSP_PASSTHRU: -+ case CC_CSMI_SAS_FIRMWARE_DOWNLOAD: -+ case CC_CSMI_SAS_GET_RAID_INFO: -+ case CC_CSMI_SAS_GET_RAID_CONFIG: -+ case CC_CSMI_SAS_GET_RAID_FEATURES: -+ case CC_CSMI_SAS_SET_RAID_CONTROL: -+ case CC_CSMI_SAS_GET_RAID_ELEMENT: -+ case CC_CSMI_SAS_SET_RAID_OPERATION: -+ case CC_CSMI_SAS_SET_PHY_INFO: -+ case CC_CSMI_SAS_STP_PASSTHRU: -+ case CC_CSMI_SAS_TASK_MANAGEMENT: -+ case CC_CSMI_SAS_PHY_CONTROL: -+ case CC_CSMI_SAS_GET_CONNECTOR_INFO: -+ case CC_CSMI_SAS_GET_LOCATION: -+#endif /* CPQ_CIM */ - case MPTTEST: - ret = __mptctl_ioctl(f, cmd, arg); - break; -@@ -2938,6 +3068,7 @@ mptctl_probe(struct pci_dev *pdev, const - - mutex_init(&ioc->ioctl_cmds.mutex); - init_completion(&ioc->ioctl_cmds.done); -+ - return 0; - } - -@@ -2951,6 +3082,22 @@ mptctl_probe(struct pci_dev *pdev, const - static void - mptctl_remove(struct pci_dev *pdev) - { -+#if defined(DIAG_BUFFER_SUPPORT) -+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -+ int i; -+ -+ /* -+ * Cleanup diag buffer allocated memory -+ */ -+ for (i = 0; i < MPI_DIAG_BUF_TYPE_COUNT; i++) { -+ if (ioc->DiagBuffer[i] == NULL) -+ continue; -+ pci_free_consistent(ioc->pcidev, ioc->DiagBuffer_sz[i], -+ ioc->DiagBuffer[i], ioc->DiagBuffer_dma[i]); -+ ioc->DiagBuffer[i] = NULL; -+ ioc->DiagBuffer_Status[i] = 0; -+ } -+#endif - } - - static struct mpt_pci_driver mptctl_driver = { -@@ -3012,16 +3159,23 @@ static void mptctl_exit(void) - - /* De-register reset handler from base module */ - mpt_reset_deregister(mptctl_id); -+ mpt_reset_deregister(mptctl_taskmgmt_id); - - /* De-register callback handler from base module */ - mpt_deregister(mptctl_id); -- mpt_reset_deregister(mptctl_taskmgmt_id); -- -- mpt_device_driver_deregister(MPTCTL_DRIVER); - -+ mpt_device_driver_deregister(MPTCTL_DRIVER); - } - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - -+#if defined(CPQ_CIM) -+#include "csmi/csmisas.c" -+#endif // CPQ_CIM -+ -+#if defined(DIAG_BUFFER_SUPPORT) -+#include "rejected_ioctls/diag_buffer.c" -+#endif -+ - module_init(mptctl_init); - module_exit(mptctl_exit); ---- a/drivers/message/fusion/mptctl.h -+++ b/drivers/message/fusion/mptctl.h -@@ -1,5 +1,5 @@ - /* -- * linux/drivers/message/fusion/mptioctl.h -+ * linux/drivers/message/fusion/mptctl.h - * Fusion MPT misc device (ioctl) driver. - * For use with PCI chip/adapter(s): - * LSIFC9xx/LSI409xx Fibre Channel -@@ -460,8 +460,5 @@ typedef struct _hp_target_info { - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - -- --/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -- - #endif - ---- a/drivers/message/fusion/mptdebug.h -+++ b/drivers/message/fusion/mptdebug.h -@@ -17,6 +17,10 @@ - * - * Example: (programming for MPT_DEBUG_EVENTS on host 5) - * -+ * global setting: -+ * echo 8 > /sys/module/mptbase/parameters/mpt_debug_level -+ * -+ * per host setting: - * echo 8 > /sys/class/scsi_host/host5/debug_level - * - * -------------------------------------------------------- -@@ -55,10 +59,11 @@ - #define MPT_DEBUG_RESET 0x00008000 - #define MPT_DEBUG_SCSI 0x00010000 - #define MPT_DEBUG_IOCTL 0x00020000 -+#define MPT_DEBUG_CSMISAS 0x00040000 - #define MPT_DEBUG_FC 0x00080000 - #define MPT_DEBUG_SAS 0x00100000 - #define MPT_DEBUG_SAS_WIDE 0x00200000 --#define MPT_DEBUG_36GB_MEM 0x00400000 -+#define MPT_DEBUG_36GB_MEM 0x00400000 - - /* - * CONFIG_FUSION_LOGGING - enabled in Kconfig -@@ -127,6 +132,9 @@ - #define dctlprintk(IOC, CMD) \ - MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL) - -+#define dcsmisasprintk(IOC, CMD) \ -+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CSMISAS) -+ - #define dfcprintk(IOC, CMD) \ - MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FC) - -@@ -139,7 +147,6 @@ - #define d36memprintk(IOC, CMD) \ - MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM) - -- - /* - * Verbose logging - */ ---- a/drivers/message/fusion/mptfc.c -+++ b/drivers/message/fusion/mptfc.c -@@ -43,6 +43,7 @@ - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+#include - #include - #include - #include -@@ -52,8 +53,10 @@ - #include /* for mdelay */ - #include /* needed for in_interrupt() proto */ - #include /* notifier code */ -+#include - #include - #include -+#include - - #include - #include -@@ -80,10 +83,18 @@ MODULE_VERSION(my_VERSION); - static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ - module_param(mptfc_dev_loss_tmo, int, 0); - MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the " -- " transport to wait for an rport to " -+ " transport to wait for an rport to " - " return following a device loss event." - " Default=60."); - -+static int mpt_sdev_queue_depth = MPT_SCSI_CMD_PER_DEV_HIGH; -+static int mptfc_set_sdev_queue_depth(const char *val, struct kernel_param *kp); -+module_param_call(mpt_sdev_queue_depth, mptfc_set_sdev_queue_depth, -+ param_get_int, &mpt_sdev_queue_depth, 0600); -+MODULE_PARM_DESC(mpt_sdev_queue_depth, -+ " Max Device Queue Depth (default=" -+ __MODULE_STRING(MPT_SCSI_CMD_PER_DEV_HIGH) ")"); -+ - /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ - #define MPTFC_MAX_LUN (16895) - static int max_lun = MPTFC_MAX_LUN; -@@ -118,7 +129,7 @@ static struct scsi_host_template mptfc_d - .slave_configure = mptscsih_slave_configure, - .target_destroy = mptfc_target_destroy, - .slave_destroy = mptscsih_slave_destroy, -- .change_queue_depth = mptscsih_change_queue_depth, -+ .change_queue_depth = mptscsih_change_queue_depth, - .eh_abort_handler = mptfc_abort, - .eh_device_reset_handler = mptfc_dev_reset, - .eh_bus_reset_handler = mptfc_bus_reset, -@@ -183,6 +194,35 @@ static struct fc_function_template mptfc - .show_host_symbolic_name = 1, - }; - -+/** -+ * mptfc_set_sdev_queue_depth - global setting of the mpt_sdev_queue_depth -+ * found via /sys/module/mptfc/parameters/mpt_sdev_queue_depth -+ * @val: -+ * @kp: -+ * -+ * Returns -+ **/ -+static int -+mptfc_set_sdev_queue_depth(const char *val, struct kernel_param *kp) -+{ -+ int ret = param_set_int(val, kp); -+ MPT_ADAPTER *ioc; -+ struct scsi_device *sdev; -+ -+ if (ret) -+ return ret; -+ -+ list_for_each_entry(ioc, &ioc_list, list) { -+ if (ioc->bus_type != FC) -+ continue; -+ shost_for_each_device(sdev, ioc->sh) -+ mptscsih_change_queue_depth(sdev, mpt_sdev_queue_depth, -+ SCSI_QDEPTH_DEFAULT); -+ ioc->sdev_queue_depth = mpt_sdev_queue_depth; -+ } -+ return 0; -+} -+ - static int - mptfc_block_error_handler(struct scsi_cmnd *SCpnt, - int (*func)(struct scsi_cmnd *SCpnt), -@@ -194,7 +234,7 @@ mptfc_block_error_handler(struct scsi_cm - struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - unsigned long flags; - int ready; -- MPT_ADAPTER *ioc; -+ MPT_ADAPTER *ioc; - - hd = shost_priv(SCpnt->device->host); - ioc = hd->ioc; -@@ -231,28 +271,28 @@ static int - mptfc_abort(struct scsi_cmnd *SCpnt) - { - return -- mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__); -+ mptfc_block_error_handler(SCpnt, mptscsih_abort, __FUNCTION__); - } - - static int - mptfc_dev_reset(struct scsi_cmnd *SCpnt) - { - return -- mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__); -+ mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __FUNCTION__); - } - - static int - mptfc_bus_reset(struct scsi_cmnd *SCpnt) - { - return -- mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__); -+ mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __FUNCTION__); - } - - static int - mptfc_host_reset(struct scsi_cmnd *SCpnt) - { - return -- mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__); -+ mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __FUNCTION__); - } - - static void -@@ -335,7 +375,7 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, in - - data_sz = hdr.PageLength * 4; - ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, -- &page0_dma); -+ &page0_dma); - rc = -ENOMEM; - if (!ppage0_alloc) - break; -@@ -371,7 +411,7 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, in - *p_pp0++ = p_p0++; /* save addr */ - } - pci_free_consistent(ioc->pcidev, data_sz, -- (u8 *) ppage0_alloc, page0_dma); -+ (u8 *) ppage0_alloc, page0_dma); - if (rc != 0) - break; - -@@ -476,6 +516,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int - if (vtarget) { - vtarget->id = pg0->CurrentTargetID; - vtarget->channel = pg0->CurrentBus; -+ vtarget->deleted = 0; - } - } - *((struct mptfc_rport_info **)rport->dd_data) = ri; -@@ -513,6 +554,7 @@ mptfc_target_destroy(struct scsi_target - struct fc_rport *rport; - struct mptfc_rport_info *ri; - -+ printk("%s - starget=%p\n", __FUNCTION__, starget); - rport = starget_to_rport(starget); - if (rport) { - ri = *((struct mptfc_rport_info **)rport->dd_data); -@@ -560,6 +602,7 @@ mptfc_target_alloc(struct scsi_target *s - - return rc; - } -+ - /* - * mptfc_dump_lun_info - * @ioc -@@ -589,7 +632,6 @@ mptfc_dump_lun_info(MPT_ADAPTER *ioc, st - (unsigned long long)nn)); - } - -- - /* - * OS entry point to allow host driver to alloc memory - * for each scsi device. Called once per device the bus scan. -@@ -604,7 +646,7 @@ mptfc_slave_alloc(struct scsi_device *sd - VirtDevice *vdevice; - struct scsi_target *starget; - struct fc_rport *rport; -- MPT_ADAPTER *ioc; -+ MPT_ADAPTER *ioc; - - starget = scsi_target(sdev); - rport = starget_to_rport(starget); -@@ -614,11 +656,10 @@ mptfc_slave_alloc(struct scsi_device *sd - - hd = shost_priv(sdev->host); - ioc = hd->ioc; -- - vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); - if (!vdevice) { - printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", -- ioc->name, sizeof(VirtDevice)); -+ ioc->name, sizeof(VirtDevice)); - return -ENOMEM; - } - -@@ -635,10 +676,7 @@ mptfc_slave_alloc(struct scsi_device *sd - vdevice->lun = sdev->lun; - - vtarget->num_luns++; -- -- - mptfc_dump_lun_info(ioc, rport, sdev, vtarget); -- - return 0; - } - -@@ -944,11 +982,12 @@ start_over: - return rc; - } - --static void -+static int - mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc) - { - int ii; - FCPortPage1_t *pp1; -+ int rc; - - #define MPTFC_FW_DEVICE_TIMEOUT (1) - #define MPTFC_FW_IO_PEND_TIMEOUT (1) -@@ -956,8 +995,8 @@ mptfc_SetFcPortPage1_defaults(MPT_ADAPTE - #define OFF_FLAGS (MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS) - - for (ii=0; iifacts.NumberOfPorts; ii++) { -- if (mptfc_GetFcPortPage1(ioc, ii) != 0) -- continue; -+ if ((rc = mptfc_GetFcPortPage1(ioc, ii)) < 0) -+ return rc; - pp1 = ioc->fc_data.fc_port_page1[ii].data; - if ((pp1->InitiatorDeviceTimeout == MPTFC_FW_DEVICE_TIMEOUT) - && (pp1->InitiatorIoPendTimeout == MPTFC_FW_IO_PEND_TIMEOUT) -@@ -968,8 +1007,10 @@ mptfc_SetFcPortPage1_defaults(MPT_ADAPTE - pp1->InitiatorIoPendTimeout = MPTFC_FW_IO_PEND_TIMEOUT; - pp1->Flags &= ~OFF_FLAGS; - pp1->Flags |= ON_FLAGS; -- mptfc_WriteFcPortPage1(ioc, ii); -+ if ((rc = mptfc_WriteFcPortPage1(ioc, ii)) < 0) -+ return rc; - } -+ return 0; - } - - -@@ -1003,10 +1044,10 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,in - fc_host_maxframe_size(sh) = pp0->MaxFrameSize; - - fc_host_node_name(sh) = -- (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; -+ (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; - - fc_host_port_name(sh) = -- (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low; -+ (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low; - - fc_host_port_id(sh) = pp0->PortIdentifier; - -@@ -1082,10 +1123,13 @@ mptfc_link_status_change(struct work_str - static void - mptfc_setup_reset(struct work_struct *work) - { -- MPT_ADAPTER *ioc = -+ MPT_ADAPTER *ioc = - container_of(work, MPT_ADAPTER, fc_setup_reset_work); - u64 pn; - struct mptfc_rport_info *ri; -+ struct scsi_target *starget; -+ VirtTarget *vtarget; -+ - - /* reset about to happen, delete (block) all rports */ - list_for_each_entry(ri, &ioc->fc_rports, list) { -@@ -1093,6 +1137,12 @@ mptfc_setup_reset(struct work_struct *wo - ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED; - fc_remote_port_delete(ri->rport); /* won't sleep */ - ri->rport = NULL; -+ starget = ri->starget; -+ if (starget) { -+ vtarget = starget->hostdata; -+ if (vtarget) -+ vtarget->deleted = 1; -+ } - - pn = (u64)ri->pg0.WWPN.High << 32 | - (u64)ri->pg0.WWPN.Low; -@@ -1111,8 +1161,22 @@ mptfc_rescan_devices(struct work_struct - MPT_ADAPTER *ioc = - container_of(work, MPT_ADAPTER, fc_rescan_work); - int ii; -+ int rc; - u64 pn; - struct mptfc_rport_info *ri; -+ struct scsi_target *starget; -+ VirtTarget *vtarget; -+ -+ /* -+ * if cannot set defaults, something's really wrong, bail out -+ */ -+ -+ if ((rc = mptfc_SetFcPortPage1_defaults(ioc)) < 0) { -+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT -+ "mptfc_rescan.%d: unable to set PP1 defaults, rc %d.\n", -+ ioc->name, ioc->sh->host_no, rc)); -+ return; -+ } - - /* start by tagging all ports as missing */ - list_for_each_entry(ri, &ioc->fc_rports, list) { -@@ -1140,6 +1204,12 @@ mptfc_rescan_devices(struct work_struct - MPT_RPORT_INFO_FLAGS_MISSING); - fc_remote_port_delete(ri->rport); /* won't sleep */ - ri->rport = NULL; -+ starget = ri->starget; -+ if (starget) { -+ vtarget = starget->hostdata; -+ if (vtarget) -+ vtarget->deleted = 1; -+ } - - pn = (u64)ri->pg0.WWPN.High << 32 | - (u64)ri->pg0.WWPN.Low; -@@ -1157,7 +1227,7 @@ mptfc_probe(struct pci_dev *pdev, const - { - struct Scsi_Host *sh; - MPT_SCSI_HOST *hd; -- MPT_ADAPTER *ioc; -+ MPT_ADAPTER *ioc; - unsigned long flags; - int ii; - int numSGE = 0; -@@ -1215,7 +1285,7 @@ mptfc_probe(struct pci_dev *pdev, const - ioc->name); - error = -1; - goto out_mptfc_probe; -- } -+ } - - spin_lock_init(&ioc->fc_rescan_work_lock); - INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); -@@ -1238,6 +1308,10 @@ mptfc_probe(struct pci_dev *pdev, const - sh->max_id = ioc->pfacts->MaxDevices; - sh->max_lun = max_lun; - -+ sh->this_id = ioc->pfacts[0].PortSCSIID; -+ -+ ioc->sdev_queue_depth = mpt_sdev_queue_depth; -+ - /* Required entry. - */ - sh->unique_id = ioc->id; -@@ -1300,8 +1374,8 @@ mptfc_probe(struct pci_dev *pdev, const - - /* initialize workqueue */ - -- snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name), -- "mptfc_wq_%d", sh->host_no); -+ snprintf(ioc->fc_rescan_work_q_name, MPT_KOBJ_NAME_LEN, "mptfc_wq_%d", -+ sh->host_no); - ioc->fc_rescan_work_q = - create_singlethread_workqueue(ioc->fc_rescan_work_q_name); - if (!ioc->fc_rescan_work_q) -@@ -1314,7 +1388,6 @@ mptfc_probe(struct pci_dev *pdev, const - for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { - (void) mptfc_GetFcPortPage0(ioc, ii); - } -- mptfc_SetFcPortPage1_defaults(ioc); - - /* - * scan for rports - -@@ -1352,8 +1425,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, Ev - unsigned long flags; - int rc=1; - -- devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", -- ioc->name, event)); -+ if (ioc->bus_type != FC) -+ return 0; - - if (ioc->sh == NULL || - ((hd = shost_priv(ioc->sh)) == NULL)) -@@ -1390,45 +1463,45 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int re - unsigned long flags; - - rc = mptscsih_ioc_reset(ioc,reset_phase); -- if (rc == 0) -+ if ((ioc->bus_type != FC) || (!rc)) - return rc; - -- -- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- ": IOC %s_reset routed to FC host driver!\n",ioc->name, -- reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( -- reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); -- -- if (reset_phase == MPT_IOC_SETUP_RESET) { -+ switch(reset_phase) { -+ case MPT_IOC_SETUP_RESET: -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __FUNCTION__)); - spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - if (ioc->fc_rescan_work_q) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_setup_reset_work); - } - spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); -- } -- -- else if (reset_phase == MPT_IOC_PRE_RESET) { -- } -- -- else { /* MPT_IOC_POST_RESET */ -- mptfc_SetFcPortPage1_defaults(ioc); -+ break; -+ case MPT_IOC_PRE_RESET: -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __FUNCTION__)); -+ break; -+ case MPT_IOC_POST_RESET: -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __FUNCTION__)); - spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - if (ioc->fc_rescan_work_q) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_rescan_work); - } - spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); -+ break; -+ default: -+ break; - } - return 1; - } - --/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. - * - * Returns 0 for success, non-zero for failure. -- */ -+ **/ - static int __init - mptfc_init(void) - { -@@ -1460,12 +1533,11 @@ mptfc_init(void) - return error; - } - --/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * mptfc_remove - Remove fc infrastructure for devices - * @pdev: Pointer to pci_dev structure - * -- */ -+ **/ - static void __devexit - mptfc_remove(struct pci_dev *pdev) - { -@@ -1475,6 +1547,8 @@ mptfc_remove(struct pci_dev *pdev) - unsigned long flags; - int ii; - -+ printk("%s -pdev=%p\n", __FUNCTION__, pdev); -+ - /* destroy workqueue */ - if ((work_q=ioc->fc_rescan_work_q)) { - spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); -@@ -1517,7 +1591,6 @@ mptfc_exit(void) - - mpt_reset_deregister(mptfcDoneCtx); - mpt_event_deregister(mptfcDoneCtx); -- - mpt_deregister(mptfcInternalCtx); - mpt_deregister(mptfcTaskCtx); - mpt_deregister(mptfcDoneCtx); ---- a/drivers/message/fusion/mptlan.c -+++ b/drivers/message/fusion/mptlan.c -@@ -6,7 +6,6 @@ - * - * Copyright (c) 2000-2008 LSI Corporation - * (mailto:DL-MPTFusionLinux@lsi.com) -- * - */ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /* -@@ -78,6 +77,12 @@ MODULE_VERSION(my_VERSION); - * Fusion MPT LAN private structures - */ - -+struct NAA_Hosed { -+ u16 NAA; -+ u8 ieee[FC_ALEN]; -+ struct NAA_Hosed *next; -+}; -+ - struct BufferControl { - struct sk_buff *skb; - dma_addr_t dma; -@@ -107,6 +112,7 @@ struct mpt_lan_priv { - - u32 total_posted; - u32 total_received; -+ struct net_device_stats stats; /* Per device statistics */ - - struct delayed_work post_buckets_task; - struct net_device *dev; -@@ -153,6 +159,16 @@ static u8 LanCtx = MPT_MAX_PROTOCOL_DRIV - static u32 max_buckets_out = 127; - static u32 tx_max_out_p = 127 - 16; - -+#ifdef QLOGIC_NAA_WORKAROUND -+static struct NAA_Hosed *mpt_bad_naa = NULL; -+DEFINE_RWLOCK(bad_naa_lock); -+#endif -+ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+/* -+ * Fusion MPT LAN external data -+ */ -+ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * lan_reply - Handle all data sent from the hardware. -@@ -179,8 +195,7 @@ lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_H - u32 tmsg = CAST_PTR_TO_U32(reply); - - dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n", -- IOC_AND_NETDEV_NAMES_s_s(dev), -- tmsg)); -+ IOC_AND_NETDEV_NAMES_s_s(dev), tmsg)); - - switch (GET_LAN_FORM(tmsg)) { - -@@ -429,6 +444,7 @@ mpt_lan_open(struct net_device *dev) - dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); - - mpt_lan_post_receive_buckets(priv); -+ - printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", - IOC_AND_NETDEV_NAMES_s_s(dev)); - -@@ -572,7 +588,6 @@ mpt_lan_tx_timeout(struct net_device *de - } - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ --//static inline int - static int - mpt_lan_send_turbo(struct net_device *dev, u32 tmsg) - { -@@ -585,12 +600,12 @@ mpt_lan_send_turbo(struct net_device *de - ctx = GET_LAN_BUFFER_CONTEXT(tmsg); - sent = priv->SendCtl[ctx].skb; - -- dev->stats.tx_packets++; -- dev->stats.tx_bytes += sent->len; -+ priv->stats.tx_packets++; -+ priv->stats.tx_bytes += sent->len; - - dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", - IOC_AND_NETDEV_NAMES_s_s(dev), -- __func__, sent)); -+ __FUNCTION__, sent)); - - priv->SendCtl[ctx].skb = NULL; - pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, -@@ -627,7 +642,7 @@ mpt_lan_send_reply(struct net_device *de - - switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) { - case MPI_IOCSTATUS_SUCCESS: -- dev->stats.tx_packets += count; -+ priv->stats.tx_packets += count; - break; - - case MPI_IOCSTATUS_LAN_CANCELED: -@@ -635,13 +650,13 @@ mpt_lan_send_reply(struct net_device *de - break; - - case MPI_IOCSTATUS_INVALID_SGL: -- dev->stats.tx_errors += count; -+ priv->stats.tx_errors += count; - printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n", - IOC_AND_NETDEV_NAMES_s_s(dev)); - goto out; - - default: -- dev->stats.tx_errors += count; -+ priv->stats.tx_errors += count; - break; - } - -@@ -652,11 +667,11 @@ mpt_lan_send_reply(struct net_device *de - ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext)); - - sent = priv->SendCtl[ctx].skb; -- dev->stats.tx_bytes += sent->len; -+ priv->stats.tx_bytes += sent->len; - - dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", - IOC_AND_NETDEV_NAMES_s_s(dev), -- __func__, sent)); -+ __FUNCTION__, sent)); - - priv->SendCtl[ctx].skb = NULL; - pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, -@@ -695,7 +710,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, s - u16 cur_naa = 0x1000; - - dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", -- __func__, skb)); -+ __FUNCTION__, skb)); - - spin_lock_irqsave(&priv->txfidx_lock, flags); - if (priv->mpt_txfidx_tail < 0) { -@@ -703,8 +718,8 @@ mpt_lan_sdu_send (struct sk_buff *skb, s - spin_unlock_irqrestore(&priv->txfidx_lock, flags); - - printk (KERN_ERR "%s: no tx context available: %u\n", -- __func__, priv->mpt_txfidx_tail); -- return NETDEV_TX_BUSY; -+ __FUNCTION__, priv->mpt_txfidx_tail); -+ return 1; - } - - mf = mpt_get_msg_frame(LanCtx, mpt_dev); -@@ -713,8 +728,8 @@ mpt_lan_sdu_send (struct sk_buff *skb, s - spin_unlock_irqrestore(&priv->txfidx_lock, flags); - - printk (KERN_ERR "%s: Unable to alloc request frame\n", -- __func__); -- return NETDEV_TX_BUSY; -+ __FUNCTION__); -+ return 1; - } - - ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--]; -@@ -731,7 +746,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, s - skb_reset_mac_header(skb); - skb_pull(skb, 12); - -- dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, -+ dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, - PCI_DMA_TODEVICE); - - priv->SendCtl[ctx].skb = skb; -@@ -761,6 +776,32 @@ mpt_lan_sdu_send (struct sk_buff *skb, s - - mac = skb_mac_header(skb); - -+#ifdef QLOGIC_NAA_WORKAROUND -+{ -+ struct NAA_Hosed *nh; -+ -+ /* Munge the NAA for Tx packets to QLogic boards, which don't follow -+ RFC 2625. The longer I look at this, the more my opinion of Qlogic -+ drops. */ -+ read_lock_irq(&bad_naa_lock); -+ for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { -+ if ((nh->ieee[0] == mac[0]) && -+ (nh->ieee[1] == mac[1]) && -+ (nh->ieee[2] == mac[2]) && -+ (nh->ieee[3] == mac[3]) && -+ (nh->ieee[4] == mac[4]) && -+ (nh->ieee[5] == mac[5])) { -+ cur_naa = nh->NAA; -+ dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " -+ "= %04x.\n", cur_naa)); -+ break; -+ } -+ } -+ read_unlock_irq(&bad_naa_lock); -+} -+#endif -+ -+ - pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | - (mac[0] << 8) | - (mac[1] << 0)); -@@ -784,7 +825,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, s - MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) | - skb->len); - pSimple->Address.Low = cpu_to_le32((u32) dma); -- if (sizeof(dma_addr_t) > sizeof(u32)) -+ if (mpt_dev->sg_addr_size > sizeof(u32)) - pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32)); - else - pSimple->Address.High = 0; -@@ -796,7 +837,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, s - IOC_AND_NETDEV_NAMES_s_s(dev), - le32_to_cpu(pSimple->FlagsLength))); - -- return NETDEV_TX_OK; -+ return 0; - } - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -@@ -807,7 +848,7 @@ mpt_lan_wake_post_buckets_task(struct ne - */ - { - struct mpt_lan_priv *priv = netdev_priv(dev); -- -+ - if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { - if (priority) { - schedule_delayed_work(&priv->post_buckets_task, 0); -@@ -816,7 +857,7 @@ mpt_lan_wake_post_buckets_task(struct ne - dioprintk((KERN_INFO MYNAM ": post_buckets queued on " - "timer.\n")); - } -- dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", -+ dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", - IOC_AND_NETDEV_NAMES_s_s(dev) )); - } - } -@@ -833,8 +874,8 @@ mpt_lan_receive_skb(struct net_device *d - "delivered to upper level.\n", - IOC_AND_NETDEV_NAMES_s_s(dev), skb->len)); - -- dev->stats.rx_bytes += skb->len; -- dev->stats.rx_packets++; -+ priv->stats.rx_bytes += skb->len; -+ priv->stats.rx_packets++; - - skb->dev = dev; - netif_rx(skb); -@@ -1073,7 +1114,6 @@ mpt_lan_receive_post_reply(struct net_de - PCI_DMA_FROMDEVICE); - - skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); -- - pci_dma_sync_single_for_device(mpt_dev->pcidev, - priv->RcvCtl[ctx].dma, - priv->RcvCtl[ctx].len, -@@ -1121,22 +1161,20 @@ mpt_lan_receive_post_reply(struct net_de - "(priv->buckets_out = %d)\n", - IOC_AND_NETDEV_NAMES_s_s(dev), - remaining, atomic_read(&priv->buckets_out)); -- -+ - if ((remaining < priv->bucketthresh) && - ((atomic_read(&priv->buckets_out) - remaining) > - MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) { -- - printk (KERN_WARNING MYNAM " Mismatch between driver's " - "buckets_out count and fw's BucketsRemaining " - "count has crossed the threshold, issuing a " - "LanReset to clear the fw's hashtable. You may " - "want to check your /var/log/messages for \"CRC " - "error\" event notifications.\n"); -- - mpt_lan_reset(dev); - mpt_lan_wake_post_buckets_task(dev, 0); - } -- -+ - return mpt_lan_receive_skb(dev, skb); - } - -@@ -1164,7 +1202,7 @@ mpt_lan_post_receive_buckets(struct mpt_ - - dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n", - IOC_AND_NETDEV_NAMES_s_s(dev), -- __func__, buckets, curr)); -+ __FUNCTION__, buckets, curr)); - - max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) / - (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t)); -@@ -1173,9 +1211,9 @@ mpt_lan_post_receive_buckets(struct mpt_ - mf = mpt_get_msg_frame(LanCtx, mpt_dev); - if (mf == NULL) { - printk (KERN_ERR "%s: Unable to alloc request frame\n", -- __func__); -+ __FUNCTION__); - dioprintk((KERN_ERR "%s: %u buckets remaining\n", -- __func__, buckets)); -+ __FUNCTION__, buckets)); - goto out; - } - pRecvReq = (LANReceivePostRequest_t *) mf; -@@ -1200,7 +1238,7 @@ mpt_lan_post_receive_buckets(struct mpt_ - spin_lock_irqsave(&priv->rxfidx_lock, flags); - if (priv->mpt_rxfidx_tail < 0) { - printk (KERN_ERR "%s: Can't alloc context\n", -- __func__); -+ __FUNCTION__); - spin_unlock_irqrestore(&priv->rxfidx_lock, - flags); - break; -@@ -1223,7 +1261,7 @@ mpt_lan_post_receive_buckets(struct mpt_ - if (skb == NULL) { - printk (KERN_WARNING - MYNAM "/%s: Can't alloc skb\n", -- __func__); -+ __FUNCTION__); - priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; - spin_unlock_irqrestore(&priv->rxfidx_lock, flags); - break; -@@ -1251,7 +1289,7 @@ mpt_lan_post_receive_buckets(struct mpt_ - MPI_SGE_FLAGS_SIMPLE_ELEMENT | - MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len); - pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma); -- if (sizeof(dma_addr_t) > sizeof(u32)) -+ if (mpt_dev->sg_addr_size > sizeof(u32)) - pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32)); - else - pSimple->Address.High = 0; -@@ -1261,7 +1299,7 @@ mpt_lan_post_receive_buckets(struct mpt_ - - if (pSimple == NULL) { - /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n", --/**/ __func__); -+/**/ __FUNCTION__); - mpt_free_msg_frame(mpt_dev, mf); - goto out; - } -@@ -1285,9 +1323,9 @@ mpt_lan_post_receive_buckets(struct mpt_ - - out: - dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", -- __func__, buckets, atomic_read(&priv->buckets_out))); -+ __FUNCTION__, buckets, atomic_read(&priv->buckets_out))); - dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", -- __func__, priv->total_posted, priv->total_received)); -+ __FUNCTION__, priv->total_posted, priv->total_received)); - - clear_bit(0, &priv->post_buckets_active); - } -@@ -1296,7 +1334,7 @@ static void - mpt_lan_post_receive_buckets_work(struct work_struct *work) - { - mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv, -- post_buckets_task.work)); -+ post_buckets_task.work)); - } - - static const struct net_device_ops mpt_netdev_ops = { -@@ -1311,11 +1349,10 @@ static const struct net_device_ops mpt_n - static struct net_device * - mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) - { -- struct net_device *dev; -- struct mpt_lan_priv *priv; -+ struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv)); -+ struct mpt_lan_priv *priv = NULL; - u8 HWaddr[FC_ALEN], *a; - -- dev = alloc_fcdev(sizeof(struct mpt_lan_priv)); - if (!dev) - return NULL; - -@@ -1327,8 +1364,9 @@ mpt_register_lan_device (MPT_ADAPTER *mp - priv->mpt_dev = mpt_dev; - priv->pnum = pnum; - -+ memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task)); - INIT_DELAYED_WORK(&priv->post_buckets_task, -- mpt_lan_post_receive_buckets_work); -+ mpt_lan_post_receive_buckets_work); - priv->post_buckets_active = 0; - - dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", -@@ -1351,6 +1389,8 @@ mpt_register_lan_device (MPT_ADAPTER *mp - spin_lock_init(&priv->txfidx_lock); - spin_lock_init(&priv->rxfidx_lock); - -+ memset(&priv->stats, 0, sizeof(priv->stats)); -+ - /* Grab pre-fetched LANPage1 stuff. :-) */ - a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow; - -@@ -1372,6 +1412,8 @@ mpt_register_lan_device (MPT_ADAPTER *mp - tx_max_out_p : MPT_TX_MAX_OUT_LIM; - - dev->netdev_ops = &mpt_netdev_ops; -+ -+/* Not in 2.3.42. Need 2.3.45+ */ - dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT; - - dlprintk((KERN_INFO MYNAM ": Finished registering dev " -@@ -1387,7 +1429,7 @@ mpt_register_lan_device (MPT_ADAPTER *mp - static int - mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id) - { -- MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - struct net_device *dev; - int i; - -@@ -1414,14 +1456,16 @@ mptlan_probe(struct pci_dev *pdev, const - ioc->pfacts[i].PortNumber); - continue; - } -- -+ - printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device " - "registered as '%s'\n", ioc->name, dev->name); - printk(KERN_INFO MYNAM ": %s/%s: " -- "LanAddr = %pM\n", -+ "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", - IOC_AND_NETDEV_NAMES_s_s(dev), -- dev->dev_addr); -- -+ dev->dev_addr[0], dev->dev_addr[1], -+ dev->dev_addr[2], dev->dev_addr[3], -+ dev->dev_addr[4], dev->dev_addr[5]); -+ - ioc->netdev = dev; - - return 0; -@@ -1433,7 +1477,7 @@ mptlan_probe(struct pci_dev *pdev, const - static void - mptlan_remove(struct pci_dev *pdev) - { -- MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - struct net_device *dev = ioc->netdev; - - if(dev != NULL) { -@@ -1466,7 +1510,6 @@ static int __init mpt_lan_init (void) - } - - dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); -- - mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER); - return 0; - } -@@ -1505,8 +1548,9 @@ mpt_lan_type_trans(struct sk_buff *skb, - - printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n", - NETDEV_PTR_TO_IOC_NAME_s(dev)); -- printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n", -- fch->saddr); -+ printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", -+ fch->saddr[0], fch->saddr[1], fch->saddr[2], -+ fch->saddr[3], fch->saddr[4], fch->saddr[5]); - } - - if (*fch->daddr & 1) { -@@ -1525,6 +1569,80 @@ mpt_lan_type_trans(struct sk_buff *skb, - - fcllc = (struct fcllc *)skb->data; - -+#ifdef QLOGIC_NAA_WORKAROUND -+{ -+ u16 source_naa = fch->stype, found = 0; -+ -+ /* Workaround for QLogic not following RFC 2625 in regards to the NAA -+ value. */ -+ -+ if ((source_naa & 0xF000) == 0) -+ source_naa = swab16(source_naa); -+ -+ if (fcllc->ethertype == htons(ETH_P_ARP)) -+ dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of " -+ "%04x.\n", source_naa)); -+ -+ if ((fcllc->ethertype == htons(ETH_P_ARP)) && -+ ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){ -+ struct NAA_Hosed *nh, *prevnh; -+ int i; -+ -+ dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from " -+ "system with non-RFC 2625 NAA value (%04x).\n", -+ source_naa)); -+ -+ write_lock_irq(&bad_naa_lock); -+ for (prevnh = nh = mpt_bad_naa; nh != NULL; -+ prevnh=nh, nh=nh->next) { -+ if ((nh->ieee[0] == fch->saddr[0]) && -+ (nh->ieee[1] == fch->saddr[1]) && -+ (nh->ieee[2] == fch->saddr[2]) && -+ (nh->ieee[3] == fch->saddr[3]) && -+ (nh->ieee[4] == fch->saddr[4]) && -+ (nh->ieee[5] == fch->saddr[5])) { -+ found = 1; -+ dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re" -+ "q/Rep w/ bad NAA from system already" -+ " in DB.\n")); -+ break; -+ } -+ } -+ -+ if ((!found) && (nh == NULL)) { -+ -+ nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL); -+ dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/" -+ " bad NAA from system not yet in DB.\n")); -+ -+ if (nh != NULL) { -+ nh->next = NULL; -+ if (!mpt_bad_naa) -+ mpt_bad_naa = nh; -+ if (prevnh) -+ prevnh->next = nh; -+ -+ nh->NAA = source_naa; /* Set the S_NAA value. */ -+ for (i = 0; i < FC_ALEN; i++) -+ nh->ieee[i] = fch->saddr[i]; -+ dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:" -+ "%02x:%02x with non-compliant S_NAA value.\n", -+ fch->saddr[0], fch->saddr[1], fch->saddr[2], -+ fch->saddr[3], fch->saddr[4],fch->saddr[5])); -+ } else { -+ printk (KERN_ERR "mptlan/type_trans: Unable to" -+ " kmalloc a NAA_Hosed struct.\n"); -+ } -+ } else if (!found) { -+ printk (KERN_ERR "mptlan/type_trans: found not" -+ " set, but nh isn't null. Evil " -+ "funkiness abounds.\n"); -+ } -+ write_unlock_irq(&bad_naa_lock); -+ } -+} -+#endif -+ - /* Strip the SNAP header from ARP packets since we don't - * pass them through to the 802.2/SNAP layers. - */ ---- a/drivers/message/fusion/mptlan.h -+++ b/drivers/message/fusion/mptlan.h -@@ -6,7 +6,6 @@ - * - * Copyright (c) 2000-2008 LSI Corporation - * (mailto:DL-MPTFusionLinux@lsi.com) -- * - */ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /* -@@ -73,6 +72,7 @@ - - #include - #include -+#include - - /* Override mptbase.h by pre-defining these! */ - #define MODULEAUTHOR "LSI Corporation" ---- a/drivers/message/fusion/mptsas.c -+++ b/drivers/message/fusion/mptsas.c -@@ -47,9 +47,11 @@ - #include - #include - #include --#include -+#include - #include -+#include - #include /* for mdelay */ -+#include - - #include - #include -@@ -62,7 +64,6 @@ - #include "mptscsih.h" - #include "mptsas.h" - -- - #define my_NAME "Fusion MPT SAS Host driver" - #define my_VERSION MPT_LINUX_VERSION_COMMON - #define MYNAM "mptsas" -@@ -73,6 +74,7 @@ - #define MPTSAS_RAID_CHANNEL 1 - - #define SAS_CONFIG_PAGE_TIMEOUT 30 -+ - MODULE_AUTHOR(MODULEAUTHOR); - MODULE_DESCRIPTION(my_NAME); - MODULE_LICENSE("GPL"); -@@ -84,6 +86,25 @@ MODULE_PARM_DESC(mpt_pt_clear, - " Clear persistency table: enable=1 " - "(default=MPTSCSIH_PT_CLEAR=0)"); - -+static int mpt_cmd_retry_count = 300; -+module_param(mpt_cmd_retry_count, int, 0); -+MODULE_PARM_DESC(mpt_cmd_retry_count, -+ " Device discovery TUR command retry count: default=300"); -+ -+static int mpt_disable_hotplug_remove = 0; -+module_param(mpt_disable_hotplug_remove, int, 0); -+MODULE_PARM_DESC(mpt_disable_hotplug_remove, -+ " Disable hotpug remove events: default=0"); -+ -+static int mpt_sdev_queue_depth = MPT_SCSI_CMD_PER_DEV_HIGH; -+static int mptsas_set_sdev_queue_depth(const char *val, -+ struct kernel_param *kp); -+module_param_call(mpt_sdev_queue_depth, mptsas_set_sdev_queue_depth, -+ param_get_int, &mpt_sdev_queue_depth, 0600); -+MODULE_PARM_DESC(mpt_sdev_queue_depth, -+ " Max Device Queue Depth (default=" -+ __MODULE_STRING(MPT_SCSI_CMD_PER_DEV_HIGH) ")"); -+ - /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ - #define MPTSAS_MAX_LUN (16895) - static int max_lun = MPTSAS_MAX_LUN; -@@ -96,7 +117,6 @@ static u8 mptsasInternalCtx = MPT_MAX_PR - static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; - static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS; - --static void mptsas_firmware_event_work(struct work_struct *work); - static void mptsas_send_sas_event(struct fw_event_work *fw_event); - static void mptsas_send_raid_event(struct fw_event_work *fw_event); - static void mptsas_send_ir2_event(struct fw_event_work *fw_event); -@@ -126,6 +146,39 @@ static void mptsas_broadcast_primative_w - static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event); - static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); - -+ -+void mptsas_schedule_target_reset(void *ioc); -+static void mptsas_firmware_event_work(struct work_struct *work); -+ -+/** -+ * mptsas_set_sdev_queue_depth - global setting of the mpt_sdev_queue_depth -+ * found via /sys/module/mptsas/parameters/mpt_sdev_queue_depth -+ * @val: -+ * @kp: -+ * -+ * Returns -+ **/ -+static int -+mptsas_set_sdev_queue_depth(const char *val, struct kernel_param *kp) -+{ -+ int ret = param_set_int(val, kp); -+ MPT_ADAPTER *ioc; -+ struct scsi_device *sdev; -+ -+ if (ret) -+ return ret; -+ -+ list_for_each_entry(ioc, &ioc_list, list) { -+ if (ioc->bus_type != SAS) -+ continue; -+ shost_for_each_device(sdev, ioc->sh) -+ mptscsih_change_queue_depth(sdev, mpt_sdev_queue_depth, -+ SCSI_QDEPTH_DEFAULT); -+ ioc->sdev_queue_depth = mpt_sdev_queue_depth; -+ } -+ return 0; -+} -+ - static void mptsas_print_phy_data(MPT_ADAPTER *ioc, - MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) - { -@@ -279,6 +332,10 @@ mptsas_add_fw_event(MPT_ADAPTER *ioc, st - { - unsigned long flags; - -+#if defined(CPQ_CIM) -+ ioc->csmi_change_count++; -+#endif -+ - spin_lock_irqsave(&ioc->fw_event_lock, flags); - list_add_tail(&fw_event->list, &ioc->fw_event_list); - INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work); -@@ -297,7 +354,7 @@ mptsas_requeue_fw_event(MPT_ADAPTER *ioc - unsigned long flags; - spin_lock_irqsave(&ioc->fw_event_lock, flags); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task " -- "(fw_event=0x%p)\n", ioc->name, __func__, fw_event)); -+ "(fw_event=0x%p)\n", ioc->name,__FUNCTION__, fw_event)); - fw_event->retries++; - queue_delayed_work(ioc->fw_event_q, &fw_event->work, - msecs_to_jiffies(delay)); -@@ -325,7 +382,7 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *i - { - struct fw_event_work *fw_event, *next; - struct mptsas_target_reset_event *target_reset_list, *n; -- MPT_SCSI_HOST *hd = shost_priv(ioc->sh); -+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh); - - /* flush the target_reset_list */ - if (!list_empty(&hd->target_reset_list)) { -@@ -436,7 +493,14 @@ mptsas_is_end_device(struct mptsas_devin - return 0; - } - --/* no mutex */ -+/** -+ * mptsas_port_delete - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @port_details: -+ * -+ * (no mutex) -+ * -+ **/ - static void - mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) - { -@@ -465,6 +529,11 @@ mptsas_port_delete(MPT_ADAPTER *ioc, str - kfree(port_details); - } - -+/** -+ * mptsas_get_rphy - -+ * @phy_info: -+ * -+ **/ - static inline struct sas_rphy * - mptsas_get_rphy(struct mptsas_phyinfo *phy_info) - { -@@ -474,6 +543,13 @@ mptsas_get_rphy(struct mptsas_phyinfo *p - return NULL; - } - -+/** -+ * mptsas_set_rphy - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @phy_info: -+ * @rphy: -+ * -+ **/ - static inline void - mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) - { -@@ -491,6 +567,11 @@ mptsas_set_rphy(MPT_ADAPTER *ioc, struct - } - } - -+/** -+ * mptsas_get_port - -+ * @phy_info: -+ * -+ **/ - static inline struct sas_port * - mptsas_get_port(struct mptsas_phyinfo *phy_info) - { -@@ -500,6 +581,13 @@ mptsas_get_port(struct mptsas_phyinfo *p - return NULL; - } - -+/** -+ * mptsas_set_port - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @phy_info: -+ * @port: -+ * -+ **/ - static inline void - mptsas_set_port(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_port *port) - { -@@ -514,6 +602,11 @@ mptsas_set_port(MPT_ADAPTER *ioc, struct - } - } - -+/** -+ * mptsas_get_starget - -+ * @phy_info: -+ * -+ **/ - static inline struct scsi_target * - mptsas_get_starget(struct mptsas_phyinfo *phy_info) - { -@@ -523,6 +616,12 @@ mptsas_get_starget(struct mptsas_phyinfo - return NULL; - } - -+/** -+ * mptsas_set_starget - -+ * @phy_info: -+ * @starget: -+ * -+ **/ - static inline void - mptsas_set_starget(struct mptsas_phyinfo *phy_info, struct scsi_target * - starget) -@@ -544,15 +643,15 @@ static void - mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id, - u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id) - { -- struct mptsas_device_info *sas_info, *next; -+ struct sas_device_info *sas_info, *next; - struct scsi_device *sdev; - struct scsi_target *starget; -- struct sas_rphy *rphy; -+ struct sas_rphy *rphy; - - /* - * Delete all matching devices out of the list - */ -- mutex_lock(&ioc->sas_device_info_mutex); -+ down(&ioc->sas_device_info_mutex); - list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, - list) { - if (!sas_info->is_logical_volume && -@@ -564,7 +663,7 @@ mptsas_add_device_component(MPT_ADAPTER - } - } - -- sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL); -+ sas_info = kzalloc(sizeof(struct sas_device_info), GFP_KERNEL); - if (!sas_info) - goto out; - -@@ -594,7 +693,7 @@ mptsas_add_device_component(MPT_ADAPTER - } - - out: -- mutex_unlock(&ioc->sas_device_info_mutex); -+ up(&ioc->sas_device_info_mutex); - return; - } - -@@ -631,23 +730,23 @@ mptsas_add_device_component_by_fw(MPT_AD - } - - /** -- * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list -+ * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding -+ * each individual device to list - * @ioc: Pointer to MPT_ADAPTER structure - * @channel: fw mapped id's - * @id: - * - **/ - static void --mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc, -- struct scsi_target *starget) -+mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc, struct scsi_target *starget) - { - CONFIGPARMS cfg; - ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidVolumePage0_t buffer = NULL; - int i; -- RaidPhysDiskPage0_t phys_disk; -- struct mptsas_device_info *sas_info, *next; -+ RaidPhysDiskPage0_t phys_disk; -+ struct sas_device_info *sas_info, *next; - - memset(&cfg, 0 , sizeof(CONFIGPARMS)); - memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); -@@ -684,14 +783,14 @@ mptsas_add_device_component_starget_ir(M - */ - for (i = 0; i < buffer->NumPhysDisks; i++) { - -- if (mpt_raid_phys_disk_pg0(ioc, -+ if(mpt_raid_phys_disk_pg0(ioc, - buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) - continue; - - mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus, - phys_disk.PhysDiskID); - -- mutex_lock(&ioc->sas_device_info_mutex); -+ down(&ioc->sas_device_info_mutex); - list_for_each_entry(sas_info, &ioc->sas_device_info_list, - list) { - if (!sas_info->is_logical_volume && -@@ -701,14 +800,13 @@ mptsas_add_device_component_starget_ir(M - sas_info->volume_id = starget->id; - } - } -- mutex_unlock(&ioc->sas_device_info_mutex); -- -+ up(&ioc->sas_device_info_mutex); - } - - /* - * Delete all matching devices out of the list - */ -- mutex_lock(&ioc->sas_device_info_mutex); -+ down(&ioc->sas_device_info_mutex); - list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, - list) { - if (sas_info->is_logical_volume && sas_info->fw.id == -@@ -718,7 +816,7 @@ mptsas_add_device_component_starget_ir(M - } - } - -- sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL); -+ sas_info = kzalloc(sizeof(struct sas_device_info), GFP_KERNEL); - if (sas_info) { - sas_info->fw.id = starget->id; - sas_info->os.id = starget->id; -@@ -727,7 +825,7 @@ mptsas_add_device_component_starget_ir(M - INIT_LIST_HEAD(&sas_info->list); - list_add_tail(&sas_info->list, &ioc->sas_device_info_list); - } -- mutex_unlock(&ioc->sas_device_info_mutex); -+ up(&ioc->sas_device_info_mutex); - - out: - if (buffer) -@@ -770,7 +868,8 @@ mptsas_add_device_component_starget(MPT_ - } - - /** -- * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached -+ * mptsas_del_device_component_by_os - Once a device has been removed, we -+ * mark the entry in the list as being cached - * @ioc: Pointer to MPT_ADAPTER structure - * @channel: os mapped id's - * @id: -@@ -779,7 +878,7 @@ mptsas_add_device_component_starget(MPT_ - static void - mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id) - { -- struct mptsas_device_info *sas_info, *next; -+ struct sas_device_info *sas_info, *next; - - /* - * Set is_cached flag -@@ -799,20 +898,24 @@ mptsas_del_device_component_by_os(MPT_AD - static void - mptsas_del_device_components(MPT_ADAPTER *ioc) - { -- struct mptsas_device_info *sas_info, *next; -+ struct sas_device_info *sas_info, *next; - -- mutex_lock(&ioc->sas_device_info_mutex); -+ down(&ioc->sas_device_info_mutex); - list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, - list) { - list_del(&sas_info->list); - kfree(sas_info); - } -- mutex_unlock(&ioc->sas_device_info_mutex); -+ up(&ioc->sas_device_info_mutex); - } - - - /* - * mptsas_setup_wide_ports -+ * configuration -+ * in the sas_topology -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @port_info: - * - * Updates for new and existing narrow/wide port configuration - * in the sas_topology -@@ -836,13 +939,14 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc - continue; - if (port_details->num_phys < 2) - continue; -+ - /* - * Removing a phy from a port, letting the last - * phy be removed by firmware events. - */ - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "%s: [%p]: deleting phy = %d\n", -- ioc->name, __func__, port_details, i)); -+ "%s: [%p]: deleting phy = %d\n", -+ ioc->name, __FUNCTION__, port_details, i)); - port_details->num_phys--; - port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); - memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); -@@ -945,19 +1049,18 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc - } - - /** -- * csmisas_find_vtarget -- * -- * @ioc -- * @volume_id -- * @volume_bus -+ * mptsas_find_vtarget - obtain vtarget object for non-raid devices -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @channel: -+ * @id: - * - **/ - static VirtTarget * - mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id) - { -- struct scsi_device *sdev; -+ struct scsi_device *sdev; - VirtDevice *vdevice; -- VirtTarget *vtarget = NULL; -+ VirtTarget *vtarget = NULL; - - shost_for_each_device(sdev, ioc->sh) { - vdevice = sdev->hostdata; -@@ -1017,16 +1120,14 @@ mptsas_queue_rescan(MPT_ADAPTER *ioc) - - - /** -- * mptsas_target_reset -- * -- * Issues TARGET_RESET to end device using handshaking method -- * -- * @ioc -- * @channel -- * @id -+ * mptsas_target_reset - Issues TARGET_RESET to end device using -+ * handshaking method -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @channel: -+ * @id: - * -- * Returns (1) success -- * (0) failure -+ * Returns (1) success -+ * (0) failure - * - **/ - static int -@@ -1034,6 +1135,7 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 - { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; -+ - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) - return 0; - -@@ -1075,16 +1177,27 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 - return 0; - } - -+static void -+mptsas_block_io_sdev(struct scsi_device *sdev, void *data) -+{ -+ scsi_device_set_state(sdev, SDEV_BLOCK); -+} -+ -+static void -+mptsas_block_io_starget(struct scsi_target *starget) -+{ -+ if (starget) -+ starget_for_each_device(starget, NULL, mptsas_block_io_sdev); -+} -+ - /** -- * mptsas_target_reset_queue -- * -- * Receive request for TARGET_RESET after recieving an firmware -- * event NOT_RESPONDING_EVENT, then put command in link list -- * and queue if task_queue already in use. -- * -- * @ioc -- * @sas_event_data -+ * mptsas_target_reset_queue - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @sas_event_data: - * -+ * Receive request for TARGET_RESET after -+ * recieving an firmware event NOT_RESPONDING_EVENT, then put command in -+ * link list and queue if task_queue already in use. - **/ - static void - mptsas_target_reset_queue(MPT_ADAPTER *ioc, -@@ -1098,10 +1211,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *i - id = sas_event_data->TargetID; - channel = sas_event_data->Bus; - -- if (!(vtarget = mptsas_find_vtarget(ioc, channel, id))) -- return; -- -- vtarget->deleted = 1; /* block IO */ -+ if ((vtarget = mptsas_find_vtarget(ioc, channel, id))) { -+ if (!ioc->disable_hotplug_remove) { -+ mptsas_block_io_starget(vtarget->starget); -+ vtarget->deleted = 1; /* block IO */ -+ } -+ } - - target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event), - GFP_ATOMIC); -@@ -1124,20 +1239,57 @@ mptsas_target_reset_queue(MPT_ADAPTER *i - } - - /** -- * mptsas_taskmgmt_complete - complete SAS task management function -+ * mptsas_schedule_target_reset- send pending target reset -+ * @iocp: per adapter object -+ * -+ * This function will delete scheduled target reset from the list and -+ * try to send next target reset. This will be called from completion -+ * context of any Task managment command. -+ */ -+ -+void -+mptsas_schedule_target_reset(void *iocp) -+{ -+ MPT_ADAPTER *ioc = (MPT_ADAPTER*)(iocp); -+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh); -+ struct list_head *head = &hd->target_reset_list; -+ struct mptsas_target_reset_event *target_reset_list; -+ u8 id, channel; -+ /* -+ * issue target reset to next device in the queue -+ */ -+ -+ head = &hd->target_reset_list; -+ if (list_empty(head)) -+ return; -+ -+ target_reset_list = list_entry(head->next, -+ struct mptsas_target_reset_event, list); -+ -+ id = target_reset_list->sas_event_data.TargetID; -+ channel = target_reset_list->sas_event_data.Bus; -+ target_reset_list->time_count = jiffies; -+ -+ if (mptsas_target_reset(ioc, channel, id)) -+ target_reset_list->target_reset_issued = 1; -+ return; -+} -+ -+ -+/** -+ * mptsas_taskmgmt_complete - Completion for TARGET_RESET after -+ * NOT_RESPONDING_EVENT, enable work queue to finish off removing device -+ * from upper layers. then send next TARGET_RESET in the queue. - * @ioc: Pointer to MPT_ADAPTER structure - * -- * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work -- * queue to finish off removing device from upper layers. then send next -- * TARGET_RESET in the queue. - **/ - static int - mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) - { - MPT_SCSI_HOST *hd = shost_priv(ioc->sh); -- struct list_head *head = &hd->target_reset_list; -- u8 id, channel; -+ struct list_head *head = &hd->target_reset_list; - struct mptsas_target_reset_event *target_reset_list; -+ u8 id, channel; - SCSITaskMgmtReply_t *pScsiTmReply; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: " -@@ -1212,32 +1364,15 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *io - &target_reset_list->sas_event_data); - - -- /* -- * issue target reset to next device in the queue -- */ -- -- head = &hd->target_reset_list; -- if (list_empty(head)) -- return 1; -- -- target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, -- list); -- -- id = target_reset_list->sas_event_data.TargetID; -- channel = target_reset_list->sas_event_data.Bus; -- target_reset_list->time_count = jiffies; -- -- if (mptsas_target_reset(ioc, channel, id)) -- target_reset_list->target_reset_issued = 1; -+ ioc->schedule_target_reset(ioc); - - return 1; - } - - /** -- * mptscsih_ioc_reset -- * -- * @ioc -- * @reset_phase -+ * mptsas_ioc_reset - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @reset_phase: - * - **/ - static int -@@ -1282,7 +1417,6 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int r - return rc; - } - -- - /** - * enum device_state - - * @DEVICE_RETRY: need to retry the TUR -@@ -1296,6 +1430,15 @@ enum device_state{ - DEVICE_READY, - }; - -+ -+/** -+ * mptsas_sas_enclosure_pg0 - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @enclosure: -+ * @form: -+ * @form_specific: -+ * -+ **/ - static int - mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, - u32 form, u32 form_specific) -@@ -1440,7 +1583,8 @@ mptsas_add_end_device(MPT_ADAPTER *ioc, - } - - /** -- * mptsas_del_end_device - report a deleted end device to sas transport layer -+ * mptsas_del_end_device - report a deleted end device to sas transport -+ * layer - * @ioc: Pointer to MPT_ADAPTER structure - * @phy_info: decribes attached device - * -@@ -1638,13 +1782,297 @@ mptsas_firmware_event_work(struct work_s - - - -+/** -+ * mptsas_get_lun_number - returns the first entry in report_luns table -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @channel: -+ * @id: -+ * @lun: -+ * -+ */ -+static int -+mptsas_get_lun_number(MPT_ADAPTER *ioc, u8 channel, u8 id, int *lun) -+{ -+ INTERNAL_CMD *iocmd; -+ struct scsi_lun *lun_data; -+ dma_addr_t lun_data_dma; -+ u32 lun_data_len; -+ u8 *data; -+ MPT_SCSI_HOST *hd; -+ int rc; -+ u32 length, num_luns; -+ -+ iocmd = NULL; -+ hd = shost_priv(ioc->sh); -+ lun_data_len = (255 * sizeof(struct scsi_lun)); -+ lun_data = pci_alloc_consistent(ioc->pcidev, lun_data_len, -+ &lun_data_dma); -+ if (!lun_data) { -+ printk(MYIOC_s_ERR_FMT "%s: pci_alloc_consistent(%d) FAILED!\n", -+ ioc->name, __FUNCTION__, lun_data_len); -+ rc = -ENOMEM; -+ goto out; -+ } -+ -+ iocmd = kzalloc(sizeof(INTERNAL_CMD), GFP_KERNEL); -+ if (!iocmd) { -+ printk(MYIOC_s_ERR_FMT "%s: kzalloc(%zd) FAILED!\n", -+ ioc->name, __FUNCTION__, sizeof(INTERNAL_CMD)); -+ rc = -ENOMEM; -+ goto out; -+ } -+ -+ /* -+ * Report Luns -+ */ -+ iocmd->cmd = REPORT_LUNS; -+ iocmd->data_dma = lun_data_dma; -+ iocmd->data = (u8 *)lun_data; -+ iocmd->size = lun_data_len; -+ iocmd->channel = channel; -+ iocmd->id = id; -+ -+ if ((rc = mptscsih_do_cmd(hd, iocmd)) < 0) { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "report_luns failed due to rc=0x%x\n", ioc->name, -+ __FUNCTION__, channel, id, rc); -+ goto out; -+ } -+ -+ if (rc != MPT_SCANDV_GOOD) { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "report_luns failed due to rc=0x%x\n", ioc->name, -+ __FUNCTION__, channel, id, rc); -+ rc = -rc; -+ goto out; -+ } -+ -+ data = (u8 *)lun_data; -+ length = ((data[0] << 24) | (data[1] << 16) | -+ (data[2] << 8) | (data[3] << 0)); -+ -+ num_luns = (length / sizeof(struct scsi_lun)); -+ if (!num_luns) -+ goto out; -+ /* return 1st lun in the list */ -+ *lun = scsilun_to_int(&lun_data[1]); -+ -+#if 0 -+ /* some debugging, left commented out */ -+ { -+ struct scsi_lun *lunp; -+ for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) -+ printk("%x\n", scsilun_to_int(lunp)); -+ } -+#endif -+ -+ out: -+ if (lun_data) -+ pci_free_consistent(ioc->pcidev, lun_data_len, lun_data, -+ lun_data_dma); -+ kfree(iocmd); -+ return rc; -+} -+ -+/** -+ * mptsas_test_unit_ready - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @channel: -+ * @id: -+ * @count: retry count -+ * -+ */ -+enum device_state -+mptsas_test_unit_ready(MPT_ADAPTER *ioc, u8 channel, u8 id, u16 count) -+{ -+ INTERNAL_CMD *iocmd; -+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh); -+ enum device_state state; -+ int rc; -+ u8 skey, asc, ascq; -+ u8 retry_ua; -+ -+ if (count >= mpt_cmd_retry_count) -+ return DEVICE_ERROR; -+ -+ retry_ua = 0; -+ iocmd = kzalloc(sizeof(INTERNAL_CMD), GFP_KERNEL); -+ if (!iocmd) { -+ printk(MYIOC_s_ERR_FMT "%s: kzalloc(%zd) FAILED!\n", -+ __FUNCTION__, ioc->name, sizeof(INTERNAL_CMD)); -+ return DEVICE_ERROR; -+ } -+ -+ state = DEVICE_ERROR; -+ iocmd->cmd = TEST_UNIT_READY; -+ iocmd->data_dma = -1; -+ iocmd->data = NULL; -+ -+ if (mptscsih_is_phys_disk(ioc, channel, id)) { -+ iocmd->flags |= MPT_ICFLAG_PHYS_DISK; -+ iocmd->physDiskNum = mptscsih_raid_id_to_num(ioc, channel, id); -+ iocmd->id = id; -+ } -+ iocmd->channel = channel; -+ iocmd->id = id; -+ -+ retry: -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_channel=%d " -+ "fw_id=%d retry=%d\n", ioc->name, __FUNCTION__, channel, id, count)); -+ rc = mptscsih_do_cmd(hd, iocmd); -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rc=0x%02x\n", -+ ioc->name, __FUNCTION__, rc)); -+ if (rc < 0) { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "tur failed due to timeout\n", ioc->name, -+ __FUNCTION__, channel, id); -+ goto tur_done; -+ } -+ -+ switch(rc) { -+ case MPT_SCANDV_GOOD: -+ state = DEVICE_READY; -+ goto tur_done; -+ case MPT_SCANDV_BUSY: -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: " -+ "fw_channel=%d fw_id=%d : device busy\n", -+ ioc->name, __FUNCTION__, channel, id)); -+ state = DEVICE_RETRY; -+ break; -+ case MPT_SCANDV_DID_RESET: -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: " -+ "fw_channel=%d fw_id=%d : did reset\n", -+ ioc->name, __FUNCTION__, channel, id)); -+ state = DEVICE_RETRY; -+ break; -+ case MPT_SCANDV_SENSE: -+ skey = ioc->internal_cmds.sense[2] & 0x0F; -+ asc = ioc->internal_cmds.sense[12]; -+ ascq = ioc->internal_cmds.sense[13]; -+ -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: " -+ "fw_channel=%d fw_id=%d : [sense_key,asc," -+ "ascq]: [0x%02x,0x%02x,0x%02x]\n", ioc->name, -+ __FUNCTION__, channel, id, skey, asc, ascq)); -+ -+ if (skey == UNIT_ATTENTION) { -+ state = DEVICE_RETRY; -+ break; -+ } else if (skey == NOT_READY) { -+ /* -+ * medium isn't present -+ */ -+ if (asc == 0x3a) { -+ state = DEVICE_READY; -+ goto tur_done; -+ } -+ /* -+ * LU becoming ready, or -+ * LU hasn't self-configured yet -+ */ -+ if ((asc == 0x04 && ascq == 0x01) || -+ (asc == 0x04 && ascq == 0x11) || -+ asc == 0x3e) { -+ state = DEVICE_RETRY; -+ break; -+ } -+ } else if (skey == ILLEGAL_REQUEST) { -+ /* try sending a tur to a non-zero lun number */ -+ if (!iocmd->lun && !mptsas_get_lun_number(ioc, -+ channel, id, &iocmd->lun) && iocmd->lun) -+ goto retry; -+ } -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d : " -+ "tur failed due to [sense_key,asc,ascq]: " -+ "[0x%02x,0x%02x,0x%02x]\n", ioc->name, -+ __FUNCTION__, channel, id, skey, asc, ascq); -+ goto tur_done; -+ case MPT_SCANDV_SELECTION_TIMEOUT: -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "tur failed due to no device\n", ioc->name, -+ __FUNCTION__, channel, -+ id); -+ goto tur_done; -+ case MPT_SCANDV_SOME_ERROR: -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "tur failed due to some error\n", ioc->name, -+ __FUNCTION__, -+ channel, id); -+ goto tur_done; -+ default: -+ printk(MYIOC_s_ERR_FMT -+ "%s: fw_channel=%d fw_id=%d: tur failed due to " -+ "unknown rc=0x%02x\n", ioc->name, __FUNCTION__, -+ channel, id, rc ); -+ goto tur_done; -+ } -+ tur_done: -+ kfree(iocmd); -+ return state; -+} -+ -+/** -+ * mptsas_issue_tlr - Enabling Transport Layer Retries -+ * @hd: -+ * @sdev: -+ * -+ **/ -+static void -+mptsas_issue_tlr(MPT_SCSI_HOST *hd, struct scsi_device *sdev) -+{ -+ INTERNAL_CMD *iocmd; -+ VirtDevice *vdevice = sdev->hostdata; -+ u8 retries; -+ u8 rc; -+ MPT_ADAPTER *ioc = hd->ioc; -+ -+ if ( sdev->inquiry[8] == 'H' && -+ sdev->inquiry[9] == 'P' && -+ sdev->inquiry[10] == ' ' && -+ sdev->inquiry[11] == ' ' && -+ sdev->inquiry[12] == ' ' && -+ sdev->inquiry[13] == ' ' && -+ sdev->inquiry[14] == ' ' && -+ sdev->inquiry[15] == ' ' ) { -+ -+ iocmd = kzalloc(sizeof(INTERNAL_CMD), GFP_KERNEL); -+ if (!iocmd) { -+ printk(MYIOC_s_ERR_FMT "%s: kzalloc(%zd) FAILED!\n", -+ __FUNCTION__, ioc->name, sizeof(INTERNAL_CMD)); -+ return; -+ } -+ iocmd->id = vdevice->vtarget->id; -+ iocmd->channel = vdevice->vtarget->channel; -+ iocmd->lun = vdevice->lun; -+ iocmd->physDiskNum = -1; -+ iocmd->cmd = TRANSPORT_LAYER_RETRIES; -+ iocmd->data_dma = -1; -+ for (retries = 0, rc = -1; retries < 3; retries++) { -+ rc = mptscsih_do_cmd(hd, iocmd); -+ if (!rc) -+ break; -+ } -+ if (rc != 0) -+ printk(MYIOC_s_DEBUG_FMT "unable to enable TLR on" -+ " fw_channel %d, fw_id %d, lun=%d\n", -+ ioc->name, vdevice->vtarget->channel, -+ vdevice->vtarget->id, sdev->lun); -+ kfree(iocmd); -+ } -+} -+ -+/** -+ * mptsas_slave_configure - -+ * @sdev: -+ * -+ **/ - static int - mptsas_slave_configure(struct scsi_device *sdev) - { - struct Scsi_Host *host = sdev->host; -- MPT_SCSI_HOST *hd = shost_priv(host); -- MPT_ADAPTER *ioc = hd->ioc; -- VirtDevice *vdevice = sdev->hostdata; -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ VirtDevice *vdevice = sdev->hostdata; - - if (vdevice->vtarget->deleted) { - sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n"); -@@ -1664,10 +2092,19 @@ mptsas_slave_configure(struct scsi_devic - - mptsas_add_device_component_starget(ioc, scsi_target(sdev)); - -+ if (sdev->type == TYPE_TAPE && -+ (ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_TLR )) -+ mptsas_issue_tlr(hd, sdev); - out: -+ - return mptscsih_slave_configure(sdev); - } - -+/** -+ * mptsas_target_alloc - -+ * @starget: -+ * -+ **/ - static int - mptsas_target_alloc(struct scsi_target *starget) - { -@@ -1677,7 +2114,7 @@ mptsas_target_alloc(struct scsi_target * - u8 id, channel; - struct sas_rphy *rphy; - struct mptsas_portinfo *p; -- int i; -+ int i; - MPT_ADAPTER *ioc = hd->ioc; - - vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); -@@ -1698,13 +2135,9 @@ mptsas_target_alloc(struct scsi_target * - kfree(vtarget); - return -ENXIO; - } -- for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { -- if (id == ioc->raid_data.pIocPg2-> -- RaidVolume[i].VolumeID) { -- channel = ioc->raid_data.pIocPg2-> -- RaidVolume[i].VolumeBus; -- } -- } -+ for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) -+ if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) -+ channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; - vtarget->raidVolume = 1; - goto out; - } -@@ -1720,6 +2153,12 @@ mptsas_target_alloc(struct scsi_target * - channel = p->phy_info[i].attached.channel; - mptsas_set_starget(&p->phy_info[i], starget); - -+ starget_printk(KERN_INFO, starget, MYIOC_s_FMT -+ "add device: fw_channel %d, fw_id %d, phy %d, sas_addr 0x%llx\n", -+ ioc->name, p->phy_info[i].attached.channel, -+ p->phy_info[i].attached.id, p->phy_info[i].attached.phy_id, -+ (unsigned long long)p->phy_info[i].attached.sas_address); -+ - /* - * Exposing hidden raid components - */ -@@ -1746,6 +2185,11 @@ mptsas_target_alloc(struct scsi_target * - return 0; - } - -+/** -+ * mptsas_target_destroy - -+ * @starget: -+ * -+ **/ - static void - mptsas_target_destroy(struct scsi_target *starget) - { -@@ -1753,7 +2197,7 @@ mptsas_target_destroy(struct scsi_target - MPT_SCSI_HOST *hd = shost_priv(host); - struct sas_rphy *rphy; - struct mptsas_portinfo *p; -- int i; -+ int i; - MPT_ADAPTER *ioc = hd->ioc; - VirtTarget *vtarget; - -@@ -1765,7 +2209,6 @@ mptsas_target_destroy(struct scsi_target - mptsas_del_device_component_by_os(ioc, starget->channel, - starget->id); - -- - if (starget->channel == MPTSAS_RAID_CHANNEL) - goto out; - -@@ -1794,7 +2237,11 @@ mptsas_target_destroy(struct scsi_target - starget->hostdata = NULL; - } - -- -+/** -+ * mptsas_slave_alloc - -+ * @sdev: -+ * -+ **/ - static int - mptsas_slave_alloc(struct scsi_device *sdev) - { -@@ -1803,8 +2250,8 @@ mptsas_slave_alloc(struct scsi_device *s - struct sas_rphy *rphy; - struct mptsas_portinfo *p; - VirtDevice *vdevice; -- struct scsi_target *starget; -- int i; -+ struct scsi_target *starget; -+ int i; - MPT_ADAPTER *ioc = hd->ioc; - - vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); -@@ -1816,6 +2263,9 @@ mptsas_slave_alloc(struct scsi_device *s - starget = scsi_target(sdev); - vdevice->vtarget = starget->hostdata; - -+ /* -+ * RAID volumes placed beyond the last expected port. -+ */ - if (sdev->channel == MPTSAS_RAID_CHANNEL) - goto out; - -@@ -1849,6 +2299,12 @@ mptsas_slave_alloc(struct scsi_device *s - return 0; - } - -+/** -+ * mptsas_qcmd - -+ * @SCpnt: -+ * @done: -+ * -+ **/ - static int - mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) - { -@@ -1868,7 +2324,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, voi - if (ioc->sas_discovery_quiesce_io) - return SCSI_MLQUEUE_HOST_BUSY; - --// scsi_print_command(SCpnt); -+ if (ioc->debug_level & MPT_DEBUG_SCSI) -+ scsi_print_command(SCpnt); - - return mptscsih_qcmd(SCpnt,done); - } -@@ -1878,7 +2335,7 @@ static struct scsi_host_template mptsas_ - .module = THIS_MODULE, - .proc_name = "mptsas", - .proc_info = mptscsih_proc_info, -- .name = "MPT SPI Host", -+ .name = "MPT SAS Host", - .info = mptscsih_info, - .queuecommand = mptsas_qcmd, - .target_alloc = mptsas_target_alloc, -@@ -1886,7 +2343,7 @@ static struct scsi_host_template mptsas_ - .slave_configure = mptsas_slave_configure, - .target_destroy = mptsas_target_destroy, - .slave_destroy = mptscsih_slave_destroy, -- .change_queue_depth = mptscsih_change_queue_depth, -+ .change_queue_depth = mptscsih_change_queue_depth, - .eh_abort_handler = mptscsih_abort, - .eh_device_reset_handler = mptscsih_dev_reset, - .eh_bus_reset_handler = mptscsih_bus_reset, -@@ -1901,6 +2358,11 @@ static struct scsi_host_template mptsas_ - .shost_attrs = mptscsih_host_attrs, - }; - -+/** -+ * mptsas_get_linkerrors - -+ * @phy: -+ * -+ **/ - static int mptsas_get_linkerrors(struct sas_phy *phy) - { - MPT_ADAPTER *ioc = phy_to_ioc(phy); -@@ -1963,6 +2425,13 @@ static int mptsas_get_linkerrors(struct - return error; - } - -+/** -+ * mptsas_mgmt_done - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @req: -+ * @reply: -+ * -+ **/ - static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, - MPT_FRAME_HDR *reply) - { -@@ -1981,6 +2450,12 @@ static int mptsas_mgmt_done(MPT_ADAPTER - return 0; - } - -+/** -+ * mptsas_phy_reset - -+ * @phy: -+ * @hard_reset: -+ * -+ **/ - static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) - { - MPT_ADAPTER *ioc = phy_to_ioc(phy); -@@ -2019,14 +2494,16 @@ static int mptsas_phy_reset(struct sas_p - - INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) - mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); -- -- timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, -- 10 * HZ); -- if (!timeleft) { -- /* On timeout reset the board */ -+ timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10*HZ); -+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ error = -ETIME; - mpt_free_msg_frame(ioc, mf); -- mpt_HardResetHandler(ioc, CAN_SLEEP); -- error = -ETIMEDOUT; -+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET) -+ goto out_unlock; -+ if (!timeleft) { -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); -+ } - goto out_unlock; - } - -@@ -2055,6 +2532,12 @@ static int mptsas_phy_reset(struct sas_p - return error; - } - -+/** -+ * mptsas_get_enclosure_identifier - -+ * @rphy: -+ * @identifier: -+ * -+ **/ - static int - mptsas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) - { -@@ -2089,6 +2572,11 @@ mptsas_get_enclosure_identifier(struct s - return error; - } - -+/** -+ * mptsas_get_bay_identifier - -+ * @rphy: -+ * -+ **/ - static int - mptsas_get_bay_identifier(struct sas_rphy *rphy) - { -@@ -2176,10 +2664,14 @@ static int mptsas_smp_handler(struct Scs - (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4)); - - /* request */ -- flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT | -- MPI_SGE_FLAGS_END_OF_BUFFER | -- MPI_SGE_FLAGS_DIRECTION) -- << MPI_SGE_FLAGS_SHIFT; -+ -+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | -+ MPI_SGE_FLAGS_SYSTEM_ADDRESS | -+ MPI_SGE_FLAGS_HOST_TO_IOC | -+ MPI_SGE_FLAGS_END_OF_BUFFER; -+ -+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; -+ - flagsLength |= (blk_rq_bytes(req) - 4); - - dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), -@@ -2200,20 +2692,27 @@ static int mptsas_smp_handler(struct Scs - dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), - blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); - if (!dma_addr_in) -- goto unmap; -+ goto out_unmap; -+ - ioc->add_sge(psge, flagsLength, dma_addr_in); - - INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) - mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); - - timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); -- if (!timeleft) { -- printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__); -- /* On timeout reset the board */ -- mpt_HardResetHandler(ioc, CAN_SLEEP); -- ret = -ETIMEDOUT; -- goto unmap; -+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ ret = -ETIME; -+ mpt_free_msg_frame(ioc, mf); -+ mf = NULL; -+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET) -+ goto out_unmap; -+ if (!timeleft) { -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); -+ } -+ goto out_unmap; - } -+ - mf = NULL; - - if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) { -@@ -2230,7 +2729,7 @@ static int mptsas_smp_handler(struct Scs - ioc->name, __func__); - ret = -ENXIO; - } --unmap: -+out_unmap: - if (dma_addr_out) - pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req), - PCI_DMA_BIDIRECTIONAL); -@@ -2247,6 +2746,7 @@ out: - return ret; - } - -+ - static struct sas_function_template mptsas_transport_functions = { - .get_linkerrors = mptsas_get_linkerrors, - .get_enclosure_identifier = mptsas_get_enclosure_identifier, -@@ -2257,6 +2757,12 @@ static struct sas_function_template mpts - - static struct scsi_transport_template *mptsas_transport_template; - -+/** -+ * mptsas_sas_io_unit_pg0 - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @port_info: -+ * -+ **/ - static int - mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) - { -@@ -2305,7 +2811,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, - - port_info->num_phys = buffer->NumPhys; - port_info->phy_info = kcalloc(port_info->num_phys, -- sizeof(struct mptsas_phyinfo), GFP_KERNEL); -+ sizeof(struct mptsas_phyinfo),GFP_KERNEL); - if (!port_info->phy_info) { - error = -ENOMEM; - goto out_free_consistent; -@@ -2335,6 +2841,11 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, - return error; - } - -+/** -+ * mptsas_sas_io_unit_pg1 - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * -+ **/ - static int - mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc) - { -@@ -2350,11 +2861,11 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc) - - cfg.cfghdr.ehdr = &hdr; - cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -- cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; - cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED; - cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; - cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION; - cfg.cfghdr.ehdr->PageNumber = 1; -+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; - - error = mpt_config(ioc, &cfg); - if (error) -@@ -2392,6 +2903,14 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc) - return error; - } - -+/** -+ * mptsas_sas_phy_pg0 - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @phy_info: -+ * @form: -+ * @form_specific: -+ * -+ **/ - static int - mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, - u32 form, u32 form_specific) -@@ -2412,12 +2931,12 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, str - - cfg.cfghdr.ehdr = &hdr; - cfg.dir = 0; /* read */ -- cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; - - /* Get Phy Pg 0 for each Phy. */ - cfg.physAddr = -1; - cfg.pageAddr = form + form_specific; - cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; - - error = mpt_config(ioc, &cfg); - if (error) -@@ -2456,6 +2975,14 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, str - return error; - } - -+/** -+ * mptsas_sas_device_pg0 - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @device_info: -+ * @form: -+ * @form_specific: -+ * -+ **/ - static int - mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, - u32 form, u32 form_specific) -@@ -2482,7 +3009,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, - cfg.dir = 0; /* read */ - cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; - -- memset(device_info, 0, sizeof(struct mptsas_devinfo)); - error = mpt_config(ioc, &cfg); - if (error) - goto out; -@@ -2502,6 +3028,12 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - - error = mpt_config(ioc, &cfg); -+ -+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) { -+ error = -ENODEV; -+ goto out_free_consistent; -+ } -+ - if (error) - goto out_free_consistent; - -@@ -2530,6 +3062,14 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, - return error; - } - -+/** -+ * mptsas_sas_expander_pg0 - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @port_info: -+ * @form: -+ * @form_specific: -+ * -+ **/ - static int - mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, - u32 form, u32 form_specific) -@@ -2557,7 +3097,6 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc - cfg.dir = 0; /* read */ - cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; - -- memset(port_info, 0, sizeof(struct mptsas_portinfo)); - error = mpt_config(ioc, &cfg); - if (error) - goto out; -@@ -2578,18 +3117,18 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - - error = mpt_config(ioc, &cfg); -- if (error) -- goto out_free_consistent; -- -- if (!buffer->NumPhys) { -+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) { - error = -ENODEV; - goto out_free_consistent; - } - -+ if (error) -+ goto out_free_consistent; -+ - /* save config data */ - port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1; - port_info->phy_info = kcalloc(port_info->num_phys, -- sizeof(struct mptsas_phyinfo), GFP_KERNEL); -+ sizeof(struct mptsas_phyinfo),GFP_KERNEL); - if (!port_info->phy_info) { - error = -ENOMEM; - goto out_free_consistent; -@@ -2613,6 +3152,14 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc - return error; - } - -+/** -+ * mptsas_sas_expander_pg1 - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @phy_info: -+ * @form: -+ * @form_specific: -+ * -+ **/ - static int - mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, - u32 form, u32 form_specific) -@@ -2658,7 +3205,6 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - - error = mpt_config(ioc, &cfg); -- - if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) { - error = -ENODEV; - goto out; -@@ -2686,6 +3232,199 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc - return error; - } - -+struct rep_manu_request{ -+ u8 smp_frame_type; -+ u8 function; -+ u8 reserved; -+ u8 request_length; -+}; -+ -+struct rep_manu_reply{ -+ u8 smp_frame_type; /* 0x41 */ -+ u8 function; /* 0x01 */ -+ u8 function_result; -+ u8 response_length; -+ u16 expander_change_count; -+ u8 reserved0[2]; -+ u8 sas_format:1; -+ u8 reserved1:7; -+ u8 reserved2[3]; -+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; -+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; -+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; -+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; -+ u16 component_id; -+ u8 component_revision_id; -+ u8 reserved3; -+ u8 vendor_specific[8]; -+}; -+ -+/** -+ * mptsas_exp_repmanufacture_info - -+ * @ioc: per adapter object -+ * @sas_address: expander sas address -+ * @edev: the sas_expander_device object -+ * -+ * Fills in the sas_expander_device object when SMP port is created. -+ * -+ * Returns 0 for success, non-zero for failure. -+ */ -+static int -+mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc, -+ u64 sas_address, struct sas_expander_device *edev) -+{ -+ MPT_FRAME_HDR *mf; -+ SmpPassthroughRequest_t *smpreq; -+ SmpPassthroughReply_t *smprep; -+ struct rep_manu_reply *manufacture_reply; -+ struct rep_manu_request *manufacture_request; -+ int ret; -+ int flagsLength; -+ unsigned long timeleft; -+ char *psge; -+ unsigned long flags; -+ void *data_out = NULL; -+ dma_addr_t data_out_dma = 0; -+ u32 sz; -+ -+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -+ if (ioc->ioc_reset_in_progress) { -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n", -+ __func__, ioc->name); -+ return -EFAULT; -+ } -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -+ -+ ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex); -+ if (ret) -+ goto out; -+ -+ mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc); -+ if (!mf) { -+ ret = -ENOMEM; -+ goto out_unlock; -+ } -+ -+ smpreq = (SmpPassthroughRequest_t *)mf; -+ memset(smpreq, 0, sizeof(*smpreq)); -+ -+ sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply); -+ -+ data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma); -+ if (!data_out) { -+ printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n", -+ __FILE__, __LINE__, __func__); -+ ret = -ENOMEM; -+ goto put_mf; -+ } -+ -+ manufacture_request = data_out; -+ manufacture_request->smp_frame_type = 0x40; -+ manufacture_request->function = 1; -+ manufacture_request->reserved = 0; -+ manufacture_request->request_length = 0; -+ -+ smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; -+ smpreq->PhysicalPort = 0xFF; -+ *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address); -+ smpreq->RequestDataLength = sizeof(struct rep_manu_request); -+ -+ psge = (char *) -+ (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4)); -+ -+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | -+ MPI_SGE_FLAGS_SYSTEM_ADDRESS | -+ MPI_SGE_FLAGS_HOST_TO_IOC | -+ MPI_SGE_FLAGS_END_OF_BUFFER; -+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; -+ flagsLength |= sizeof(struct rep_manu_request); -+ -+ ioc->add_sge(psge, flagsLength, data_out_dma); -+ psge += ioc->SGE_size; -+ -+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | -+ MPI_SGE_FLAGS_SYSTEM_ADDRESS | -+ MPI_SGE_FLAGS_IOC_TO_HOST | -+ MPI_SGE_FLAGS_END_OF_BUFFER; -+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; -+ flagsLength |= sizeof(struct rep_manu_reply); -+ ioc->add_sge(psge, flagsLength, data_out_dma + -+ sizeof(struct rep_manu_request)); -+ -+ INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) -+ mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); -+ -+ timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); -+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ ret = -ETIME; -+ mpt_free_msg_frame(ioc, mf); -+ mf = NULL; -+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET) -+ goto out_free; -+ if (!timeleft) { -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); -+ } -+ goto out_free; -+ } -+ -+ mf = NULL; -+ -+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) { -+ u8 *tmp; -+ -+ smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; -+ if (le16_to_cpu(smprep->ResponseDataLength) != -+ sizeof(struct rep_manu_reply)) -+ goto out_free; -+ -+ manufacture_reply = data_out + sizeof(struct rep_manu_request); -+ strncpy(edev->vendor_id, manufacture_reply->vendor_id, -+ SAS_EXPANDER_VENDOR_ID_LEN); -+ strncpy(edev->product_id, manufacture_reply->product_id, -+ SAS_EXPANDER_PRODUCT_ID_LEN); -+ strncpy(edev->product_rev, manufacture_reply->product_rev, -+ SAS_EXPANDER_PRODUCT_REV_LEN); -+ edev->level = manufacture_reply->sas_format; -+ if (manufacture_reply->sas_format) { -+ strncpy(edev->component_vendor_id, -+ manufacture_reply->component_vendor_id, -+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); -+ tmp = (u8 *)&manufacture_reply->component_id; -+ edev->component_id = tmp[0] << 8 | tmp[1]; -+ edev->component_revision_id = -+ manufacture_reply->component_revision_id; -+ } -+ -+ } else { -+ printk(MYIOC_s_ERR_FMT -+ "%s: smp passthru reply failed to be returned\n", -+ ioc->name, __func__); -+ ret = -ENXIO; -+ } -+ -+out_free: -+ if (data_out_dma) -+ pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma); -+put_mf: -+ if (mf) -+ mpt_free_msg_frame(ioc, mf); -+out_unlock: -+ CLEAR_MGMT_STATUS(ioc->sas_mgmt.status) -+ mutex_unlock(&ioc->sas_mgmt.mutex); -+out: -+ return ret; -+ -+} -+ -+ -+/** -+ * mptsas_parse_device_info - -+ * @identify: -+ * @device_info: -+ * -+ **/ - static void - mptsas_parse_device_info(struct sas_identify *identify, - struct mptsas_devinfo *device_info) -@@ -2745,6 +3484,13 @@ mptsas_parse_device_info(struct sas_iden - } - } - -+/** -+ * mptsas_probe_one_phy - -+ * @dev: -+ * @phy_info: -+ * @local: -+ * -+ **/ - static int mptsas_probe_one_phy(struct device *dev, - struct mptsas_phyinfo *phy_info, int index, int local) - { -@@ -2868,7 +3614,6 @@ static int mptsas_probe_one_phy(struct d - ioc = phy_to_ioc(phy_info->phy); - - if (phy_info->sas_port_add_phy) { -- - if (!port) { - port = sas_port_alloc_num(dev); - if (!port) { -@@ -2886,20 +3631,18 @@ static int mptsas_probe_one_phy(struct d - devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev, - MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n", - ioc->name, port->port_identifier, -- (unsigned long long)phy_info-> -- attached.sas_address)); -+ (unsigned long long)phy_info->attached.sas_address)); - } -- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "sas_port_add_phy: phy_id=%d\n", -- ioc->name, phy_info->phy_id)); -+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n", -+ ioc->name, phy_info->phy_id)); - sas_port_add_phy(port, phy_info->phy); - phy_info->sas_port_add_phy = 0; - devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev, - MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name, - phy_info->phy_id, phy_info->phy)); - } -- if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { - -+ if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { - struct sas_rphy *rphy; - struct device *parent; - struct sas_identify identify; -@@ -2967,12 +3710,23 @@ static int mptsas_probe_one_phy(struct d - goto out; - } - mptsas_set_rphy(ioc, phy_info, rphy); -+ if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE || -+ identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) -+ mptsas_exp_repmanufacture_info(ioc, -+ identify.sas_address, -+ rphy_to_expander_device(rphy)); - } - - out: - return error; - } - -+/** -+ * mptsas_probe_hba_phys - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @handle: -+ * -+ **/ - static int - mptsas_probe_hba_phys(MPT_ADAPTER *ioc) - { -@@ -3403,13 +4157,12 @@ mptsas_send_link_status_event(struct fw_ - if (!port_info) { - if (ioc->old_sas_discovery_protocal) { - port_info = mptsas_expander_add(ioc, -- le16_to_cpu(link_data->DevHandle)); -+ le16_to_cpu(link_data->DevHandle)); - if (port_info) - goto out; - } - goto out; - } -- - if (port_info == ioc->hba_port_info) - mptsas_probe_hba_phys(ioc); - else -@@ -3434,7 +4187,7 @@ static void - mptsas_not_responding_devices(MPT_ADAPTER *ioc) - { - struct mptsas_portinfo buffer, *port_info; -- struct mptsas_device_info *sas_info; -+ struct sas_device_info *sas_info; - struct mptsas_devinfo sas_device; - u32 handle; - VirtTarget *vtarget = NULL; -@@ -3443,6 +4196,9 @@ mptsas_not_responding_devices(MPT_ADAPTE - int retval, retry_count; - unsigned long flags; - -+ if (ioc->disable_hotplug_remove) -+ return; -+ - mpt_findImVolumes(ioc); - - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -@@ -3456,74 +4212,66 @@ mptsas_not_responding_devices(MPT_ADAPTE - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - - /* devices, logical volumes */ -- mutex_lock(&ioc->sas_device_info_mutex); - redo_device_scan: - list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) { - if (sas_info->is_cached) - continue; - if (!sas_info->is_logical_volume) { -- sas_device.handle = 0; -- retry_count = 0; -+ sas_device.handle = 0; -+ retry_count = 0; - retry_page: -- retval = mptsas_sas_device_pg0(ioc, &sas_device, -+ retval = mptsas_sas_device_pg0(ioc, &sas_device, - (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID - << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (sas_info->fw.channel << 8) + - sas_info->fw.id); - -- if (sas_device.handle) -- continue; -- if (retval == -EBUSY) { -- spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -- if (ioc->ioc_reset_in_progress) { -- dfailprintk(ioc, -- printk(MYIOC_s_DEBUG_FMT -- "%s: exiting due to reset\n", -- ioc->name, __func__)); -- spin_unlock_irqrestore -- (&ioc->taskmgmt_lock, flags); -- mutex_unlock(&ioc-> -- sas_device_info_mutex); -- return; -- } -- spin_unlock_irqrestore(&ioc->taskmgmt_lock, -- flags); -+ if (sas_device.handle) -+ continue; -+ if (retval == -EBUSY) { -+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags); -+ if (ioc->ioc_reset_in_progress) { -+ dfailprintk(ioc, -+ printk(MYIOC_s_DEBUG_FMT -+ "%s: exiting due to reset\n", -+ ioc->name, __func__)); -+ spin_unlock_irqrestore -+ (&ioc->taskmgmt_lock, flags); -+ return; - } -+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, -+ flags); -+ } - -- if (retval && (retval != -ENODEV)) { -- if (retry_count < 10) { -- retry_count++; -- goto retry_page; -- } else { -- devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "%s: Config page retry exceeded retry " -- "count deleting device 0x%llx\n", -- ioc->name, __func__, -- sas_info->sas_address)); -- } -+ if (retval && (retval != -ENODEV)) { -+ if (retry_count < 10) { -+ retry_count++; -+ goto retry_page; -+ } else { -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "%s: Config page retry exceeded retry " -+ "count deleting device 0x%llx\n", -+ ioc->name, __func__, -+ sas_info->sas_address)); - } -+ } - -- /* delete device */ -- vtarget = mptsas_find_vtarget(ioc, -+ /* delete device */ -+ vtarget = mptsas_find_vtarget(ioc, - sas_info->fw.channel, sas_info->fw.id); -- -- if (vtarget) -- vtarget->deleted = 1; -- -- phy_info = mptsas_find_phyinfo_by_sas_address(ioc, -- sas_info->sas_address); -- -- if (phy_info) { -- mptsas_del_end_device(ioc, phy_info); -- goto redo_device_scan; -- } -+ if (vtarget) -+ vtarget->deleted = 1; -+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc, -+ sas_info->sas_address); -+ if (phy_info) { -+ mptsas_del_end_device(ioc, phy_info); -+ goto redo_device_scan; -+ } - } else - mptsas_volume_delete(ioc, sas_info->fw.id); - } -- mutex_unlock(&ioc->sas_device_info_mutex); - - /* expanders */ -- mutex_lock(&ioc->sas_topology_mutex); - redo_expander_scan: - list_for_each_entry(port_info, &ioc->sas_topology, list) { - -@@ -3551,7 +4299,6 @@ retry_page: - goto redo_expander_scan; - } - } -- mutex_unlock(&ioc->sas_topology_mutex); - } - - /** -@@ -3563,7 +4310,7 @@ static void - mptsas_probe_expanders(MPT_ADAPTER *ioc) - { - struct mptsas_portinfo buffer, *port_info; -- u32 handle; -+ u32 handle; - int i; - - handle = 0xFFFF; -@@ -3611,9 +4358,11 @@ mptsas_probe_expanders(MPT_ADAPTER *ioc) - static void - mptsas_probe_devices(MPT_ADAPTER *ioc) - { -+ u16 retry_count; - u16 handle; - struct mptsas_devinfo sas_device; - struct mptsas_phyinfo *phy_info; -+ enum device_state state; - - handle = 0xFFFF; - while (!(mptsas_sas_device_pg0(ioc, &sas_device, -@@ -3634,7 +4383,17 @@ mptsas_probe_devices(MPT_ADAPTER *ioc) - if (mptsas_get_rphy(phy_info)) - continue; - -- mptsas_add_end_device(ioc, phy_info); -+ state = DEVICE_RETRY; -+ retry_count = 0; -+ while(state == DEVICE_RETRY) { -+ state = mptsas_test_unit_ready(ioc, sas_device.channel, -+ sas_device.id, retry_count++); -+ ssleep(1); -+ } -+ if (state == DEVICE_READY) -+ mptsas_add_end_device(ioc, phy_info); -+ else -+ memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); - } - } - -@@ -3676,13 +4435,14 @@ mptsas_scan_sas_topology(MPT_ADAPTER *io - } - - -+ - static void - mptsas_handle_queue_full_event(struct fw_event_work *fw_event) - { - MPT_ADAPTER *ioc; - EventDataQueueFull_t *qfull_data; -- struct mptsas_device_info *sas_info; -- struct scsi_device *sdev; -+ struct sas_device_info *sas_info; -+ struct scsi_device *sdev; - int depth; - int id = -1; - int channel = -1; -@@ -3697,7 +4457,7 @@ mptsas_handle_queue_full_event(struct fw - current_depth = le16_to_cpu(qfull_data->CurrentDepth); - - /* if hidden raid component, look for the volume id */ -- mutex_lock(&ioc->sas_device_info_mutex); -+ down(&ioc->sas_device_info_mutex); - if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) { - list_for_each_entry(sas_info, &ioc->sas_device_info_list, - list) { -@@ -3730,7 +4490,7 @@ mptsas_handle_queue_full_event(struct fw - } - - out: -- mutex_unlock(&ioc->sas_device_info_mutex); -+ up(&ioc->sas_device_info_mutex); - - if (id != -1) { - shost_for_each_device(sdev, ioc->sh) { -@@ -3764,6 +4524,12 @@ mptsas_handle_queue_full_event(struct fw - } - - -+/** -+ * mptsas_find_phyinfo_by_sas_address - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @sas_address: -+ * -+ **/ - static struct mptsas_phyinfo * - mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) - { -@@ -3788,6 +4554,7 @@ mptsas_find_phyinfo_by_sas_address(MPT_A - return phy_info; - } - -+ - /** - * mptsas_find_phyinfo_by_phys_disk_num - - * @ioc: Pointer to MPT_ADAPTER structure -@@ -3814,7 +4581,7 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT - num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num); - if (!num_paths) - goto out; -- phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + -+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t,Path) + - (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); - if (!phys_disk) - goto out; -@@ -3827,8 +4594,7 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT - (channel == phys_disk->Path[i].PhysDiskBus)) { - memcpy(&sas_address, &phys_disk->Path[i].WWID, - sizeof(u64)); -- phy_info = mptsas_find_phyinfo_by_sas_address(ioc, -- sas_address); -+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc, sas_address); - goto out; - } - } -@@ -3850,11 +4616,9 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT - continue; - if (port_info->phy_info[i].attached.phys_disk_num == ~0) - continue; -- if ((port_info->phy_info[i].attached.phys_disk_num == -- phys_disk_num) && -- (port_info->phy_info[i].attached.id == id) && -- (port_info->phy_info[i].attached.channel == -- channel)) -+ if (port_info->phy_info[i].attached.phys_disk_num == phys_disk_num && -+ port_info->phy_info[i].attached.id == id && -+ port_info->phy_info[i].attached.channel == channel) - phy_info = &port_info->phy_info[i]; - } - } -@@ -3862,6 +4626,12 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT - return phy_info; - } - -+/** -+ * mptsas_reprobe_lun - -+ * @sdev: -+ * @data: -+ * -+ **/ - static void - mptsas_reprobe_lun(struct scsi_device *sdev, void *data) - { -@@ -3871,6 +4641,12 @@ mptsas_reprobe_lun(struct scsi_device *s - rc = scsi_device_reprobe(sdev); - } - -+/** -+ * mptsas_reprobe_target - -+ * @starget: -+ * @uld_attach: -+ * -+ **/ - static void - mptsas_reprobe_target(struct scsi_target *starget, int uld_attach) - { -@@ -3878,6 +4654,15 @@ mptsas_reprobe_target(struct scsi_target - mptsas_reprobe_lun); - } - -+/** -+ * mptsas_adding_inactive_raid_components - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @channel: -+ * @id: -+ * -+ * -+ * TODO: check for hotspares -+ **/ - static void - mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id) - { -@@ -3885,7 +4670,7 @@ mptsas_adding_inactive_raid_components(M - ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidVolumePage0_t buffer = NULL; -- RaidPhysDiskPage0_t phys_disk; -+ RaidPhysDiskPage0_t phys_disk; - int i; - struct mptsas_phyinfo *phy_info; - struct mptsas_devinfo sas_device; -@@ -3896,6 +4681,7 @@ mptsas_adding_inactive_raid_components(M - cfg.pageAddr = (channel << 8) + id; - cfg.cfghdr.hdr = &hdr; - cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; -+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; - - if (mpt_config(ioc, &cfg) != 0) - goto out; -@@ -3956,6 +4742,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, st - struct scsi_target * starget; - struct mptsas_devinfo sas_device; - VirtTarget *vtarget; -+ enum device_state state; - int i; - - switch (hot_plug_info->event_type) { -@@ -3995,13 +4782,27 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, st - if (mptsas_get_rphy(phy_info)) - break; - -- mptsas_add_end_device(ioc, phy_info); -+ state = mptsas_test_unit_ready(ioc, phy_info->attached.channel, -+ phy_info->attached.id, fw_event->retries); -+ -+ if (state == DEVICE_RETRY && !ioc->fw_events_off) { -+ mptsas_requeue_fw_event(ioc, fw_event, 1000); -+ return; -+ } -+ -+ if (state == DEVICE_READY) -+ mptsas_add_end_device(ioc, phy_info); -+ else -+ memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); - break; - - case MPTSAS_DEL_DEVICE: -- phy_info = mptsas_find_phyinfo_by_sas_address(ioc, -- hot_plug_info->sas_address); -- mptsas_del_end_device(ioc, phy_info); -+ -+ if (!ioc->disable_hotplug_remove) { -+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc, -+ hot_plug_info->sas_address); -+ mptsas_del_end_device(ioc, phy_info); -+ } - break; - - case MPTSAS_DEL_PHYSDISK: -@@ -4009,9 +4810,8 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, st - mpt_findImVolumes(ioc); - - phy_info = mptsas_find_phyinfo_by_phys_disk_num( -- ioc, hot_plug_info->phys_disk_num, -- hot_plug_info->channel, -- hot_plug_info->id); -+ ioc, hot_plug_info->phys_disk_num, hot_plug_info->channel, -+ hot_plug_info->id); - mptsas_del_end_device(ioc, phy_info); - break; - -@@ -4162,6 +4962,14 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, st - mptsas_free_fw_event(ioc, fw_event); - } - -+/** -+ * mptsas_send_sas_event -+ * -+ * -+ * @ioc -+ * @sas_event_data -+ * -+ **/ - static void - mptsas_send_sas_event(struct fw_event_work *fw_event) - { -@@ -4228,6 +5036,15 @@ mptsas_send_sas_event(struct fw_event_wo - } - } - -+ -+/** -+ * mptsas_send_raid_event -+ * -+ * -+ * @ioc -+ * @raid_event_data -+ * -+ **/ - static void - mptsas_send_raid_event(struct fw_event_work *fw_event) - { -@@ -4347,19 +5164,19 @@ mptsas_send_raid_event(struct fw_event_w - /** - * mptsas_issue_tm - send mptsas internal tm request - * @ioc: Pointer to MPT_ADAPTER structure -- * @type: Task Management type -- * @channel: channel number for task management -- * @id: Logical Target ID for reset (if appropriate) -- * @lun: Logical unit for reset (if appropriate) -- * @task_context: Context for the task to be aborted -- * @timeout: timeout for task management control -+ * @type -+ * @channel -+ * @id -+ * @lun -+ * @task_context -+ * @timeout - * -- * return 0 on success and -1 on failure: -+ * return: - * -- */ -+ **/ - static int --mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun, -- int task_context, ulong timeout, u8 *issue_reset) -+mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun, int task_context, ulong timeout, -+ u8 *issue_reset) - { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; -@@ -4367,8 +5184,7 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 typ - unsigned long timeleft; - - *issue_reset = 0; -- mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc); -- if (mf == NULL) { -+ if ((mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc)) == NULL) { - retval = -1; /* return failure */ - dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no " - "msg frames!!\n", ioc->name)); -@@ -4426,20 +5242,20 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 typ - } - - /** -- * mptsas_broadcast_primative_work - Handle broadcast primitives -+ * mptsas_broadcast_primative_work - Work queue thread to handle -+ * broadcast primitive events - * @work: work queue payload containing info describing the event - * -- * this will be handled in workqueue context. -- */ -+ **/ - static void - mptsas_broadcast_primative_work(struct fw_event_work *fw_event) - { - MPT_ADAPTER *ioc = fw_event->ioc; -- MPT_FRAME_HDR *mf; -- VirtDevice *vdevice; -+ MPT_FRAME_HDR *mf; -+ VirtDevice *vdevice; - int ii; - struct scsi_cmnd *sc; -- SCSITaskMgmtReply_t *pScsiTmReply; -+ SCSITaskMgmtReply_t * pScsiTmReply; - u8 issue_reset; - int task_context; - u8 channel, id; -@@ -4448,7 +5264,7 @@ mptsas_broadcast_primative_work(struct f - u32 query_count; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "%s - enter\n", ioc->name, __func__)); -+ "%s - enter\n", ioc->name, __FUNCTION__)); - - mutex_lock(&ioc->taskmgmt_cmds.mutex); - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { -@@ -4506,7 +5322,7 @@ mptsas_broadcast_primative_work(struct f - out: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s - exit, query_count = %d termination_count = %d\n", -- ioc->name, __func__, query_count, termination_count)); -+ ioc->name, __FUNCTION__, query_count, termination_count)); - - ioc->broadcast_aen_busy = 0; - mpt_clear_taskmgmt_in_progress_flag(ioc); -@@ -4514,20 +5330,19 @@ mptsas_broadcast_primative_work(struct f - - if (issue_reset) { - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", -- ioc->name, __func__); -- mpt_HardResetHandler(ioc, CAN_SLEEP); -+ ioc->name, __FUNCTION__); -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP)) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); - } - mptsas_free_fw_event(ioc, fw_event); - } - --/* -- * mptsas_send_ir2_event - handle exposing hidden disk when -- * an inactive raid volume is added -- * -- * @ioc: Pointer to MPT_ADAPTER structure -- * @ir2_data -+/** -+ * mptsas_send_ir2_event - handle exposing hidden disk when an inactive raid volume is added -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @ir2_data: - * -- */ -+ **/ - static void - mptsas_send_ir2_event(struct fw_event_work *fw_event) - { -@@ -4569,6 +5384,13 @@ mptsas_send_ir2_event(struct fw_event_wo - mptsas_hotplug_work(ioc, fw_event, &hot_plug_info); - } - -+ -+/** -+ * mptsas_event_process - -+ * @ioc: Pointer to MPT_ADAPTER structure -+ * @reply: -+ * -+ **/ - static int - mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) - { -@@ -4577,6 +5399,9 @@ mptsas_event_process(MPT_ADAPTER *ioc, E - struct fw_event_work *fw_event; - unsigned long delay; - -+ if (ioc->bus_type != SAS) -+ return 0; -+ - /* events turned off due to host reset or driver unloading */ - if (ioc->fw_events_off) - return 0; -@@ -4659,6 +5484,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, E - return 0; - } - -+ - /* Delete a volume when no longer listed in ioc pg2 - */ - static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id) -@@ -4678,18 +5504,24 @@ static void mptsas_volume_delete(MPT_ADA - goto release_sdev; - out: - printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, " -- "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id); -+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,id); - scsi_remove_device(sdev); - release_sdev: - scsi_device_put(sdev); - } - -+/** -+ * mptsas_probe - -+ * @pdev: -+ * @id: -+ * -+ **/ - static int - mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) - { - struct Scsi_Host *sh; - MPT_SCSI_HOST *hd; -- MPT_ADAPTER *ioc; -+ MPT_ADAPTER *ioc; - unsigned long flags; - int ii; - int numSGE = 0; -@@ -4707,7 +5539,7 @@ mptsas_probe(struct pci_dev *pdev, const - ioc->DoneCtx = mptsasDoneCtx; - ioc->TaskCtx = mptsasTaskCtx; - ioc->InternalCtx = mptsasInternalCtx; -- -+ ioc->schedule_target_reset = &mptsas_schedule_target_reset; - /* Added sanity check on readiness of the MPT adapter. - */ - if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { -@@ -4748,7 +5580,7 @@ mptsas_probe(struct pci_dev *pdev, const - ioc->name); - error = -1; - goto out_mptsas_probe; -- } -+ } - - spin_lock_irqsave(&ioc->FreeQlock, flags); - -@@ -4773,10 +5605,10 @@ mptsas_probe(struct pci_dev *pdev, const - - INIT_LIST_HEAD(&ioc->sas_topology); - mutex_init(&ioc->sas_topology_mutex); -- mutex_init(&ioc->sas_discovery_mutex); - mutex_init(&ioc->sas_mgmt.mutex); - init_completion(&ioc->sas_mgmt.done); - -+ - /* Verify that we won't exceed the maximum - * number of chain buffers - * We can optimize: ZZ = req_sz/sizeof(SGE) -@@ -4786,6 +5618,7 @@ mptsas_probe(struct pci_dev *pdev, const - * A slightly different algorithm is required for - * 64bit SGEs. - */ -+ - scale = ioc->req_sz/ioc->SGE_size; - if (ioc->sg_addr_size == sizeof(u64)) { - numSGE = (scale - 1) * -@@ -4822,12 +5655,16 @@ mptsas_probe(struct pci_dev *pdev, const - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", - ioc->name, ioc->ScsiLookup)); - -+ ioc->sdev_queue_depth = mpt_sdev_queue_depth; - ioc->sas_data.ptClear = mpt_pt_clear; -- - hd->last_queue_full = 0; -+ ioc->disable_hotplug_remove = mpt_disable_hotplug_remove; -+ if (ioc->disable_hotplug_remove) -+ printk(MYIOC_s_INFO_FMT "disabling hotplug remove\n", ioc->name); -+ - INIT_LIST_HEAD(&hd->target_reset_list); - INIT_LIST_HEAD(&ioc->sas_device_info_list); -- mutex_init(&ioc->sas_device_info_mutex); -+ init_MUTEX(&ioc->sas_device_info_mutex); - - spin_unlock_irqrestore(&ioc->FreeQlock, flags); - -@@ -4865,7 +5702,14 @@ mptsas_shutdown(struct pci_dev *pdev) - mptsas_cleanup_fw_event_q(ioc); - } - --static void __devexit mptsas_remove(struct pci_dev *pdev) -+ -+/** -+ * mptsas_remove - -+ * @pdev: -+ * -+ **/ -+static void __devexit -+mptsas_remove(struct pci_dev *pdev) - { - MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - struct mptsas_portinfo *p, *n; -@@ -4875,7 +5719,6 @@ static void __devexit mptsas_remove(stru - - mptsas_del_device_components(ioc); - -- ioc->sas_discovery_ignore_events = 1; - sas_remove_host(ioc->sh); - - mutex_lock(&ioc->sas_topology_mutex); -@@ -4920,6 +5763,10 @@ static struct pci_driver mptsas_driver = - #endif - }; - -+/** -+ * mptsas_init - -+ * -+ **/ - static int __init - mptsas_init(void) - { -@@ -4950,6 +5797,10 @@ mptsas_init(void) - return error; - } - -+/** -+ * mptsas_exit - -+ * -+ **/ - static void __exit - mptsas_exit(void) - { ---- a/drivers/message/fusion/mptsas.h -+++ b/drivers/message/fusion/mptsas.h -@@ -50,8 +50,8 @@ - /*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - - struct mptsas_target_reset_event { -- struct list_head list; -- EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; -+ struct list_head list; -+ MpiEventDataSasDeviceStatusChange_t sas_event_data; - u8 target_reset_issued; - unsigned long time_count; - }; -@@ -61,34 +61,32 @@ enum mptsas_hotplug_action { - MPTSAS_DEL_DEVICE, - MPTSAS_ADD_RAID, - MPTSAS_DEL_RAID, -+ MPTSAS_ADD_INACTIVE_VOLUME, - MPTSAS_ADD_PHYSDISK, - MPTSAS_ADD_PHYSDISK_REPROBE, - MPTSAS_DEL_PHYSDISK, - MPTSAS_DEL_PHYSDISK_REPROBE, -- MPTSAS_ADD_INACTIVE_VOLUME, -+ MPTSAS_REQUEUE_EVENT, - MPTSAS_IGNORE_EVENT, - }; - --struct mptsas_mapping{ -+struct sas_mapping{ - u8 id; - u8 channel; - }; - --struct mptsas_device_info { -- struct list_head list; -- struct mptsas_mapping os; /* operating system mapping*/ -- struct mptsas_mapping fw; /* firmware mapping */ -+struct sas_device_info { -+ struct list_head list; -+ struct sas_mapping os; /* operating system mapping*/ -+ struct sas_mapping fw; /* firmware mapping */ - u64 sas_address; - u32 device_info; /* specific bits for devices */ - u16 slot; /* enclosure slot id */ - u64 enclosure_logical_id; /*enclosure address */ - u8 is_logical_volume; /* is this logical volume */ -- /* this belongs to volume */ -- u8 is_hidden_raid_component; -- /* this valid when is_hidden_raid_component set */ -- u8 volume_id; -- /* cached data for a removed device */ -- u8 is_cached; -+ u8 is_hidden_raid_component; /* this belongs to volume */ -+ u8 volume_id; /* this valid when is_hidden_raid_component set */ -+ u8 is_cached; /* cached data for a removed device */ - }; - - struct mptsas_hotplug_event { -@@ -104,19 +102,23 @@ struct mptsas_hotplug_event { - struct scsi_device *sdev; - }; - -+ - struct fw_event_work { -- struct list_head list; -+ struct list_head list; - struct delayed_work work; -- MPT_ADAPTER *ioc; -+ MPT_ADAPTER *ioc; - u32 event; - u8 retries; - u8 event_data[1]; - }; - --struct mptsas_discovery_event { -+#if 0 -+struct mptsas_link_status_event { - struct work_struct work; -+ MpiEventDataSasPhyLinkStatus_t link_data; - MPT_ADAPTER *ioc; - }; -+#endif - - /* - * SAS topology structures -@@ -146,20 +148,20 @@ struct mptsas_devinfo { - * Specific details on ports, wide/narrow - */ - struct mptsas_portinfo_details{ -- u16 num_phys; /* number of phys belong to this port */ -- u64 phy_bitmask; /* TODO, extend support for 255 phys */ -- struct sas_rphy *rphy; /* transport layer rphy object */ -+ u16 num_phys; /* number of phys beloing to this port */ -+ u64 phy_bitmask; /* this needs extending to support 128 phys */ -+ struct sas_rphy *rphy; /* rphy for end devices */ - struct sas_port *port; /* transport layer port object */ - struct scsi_target *starget; - struct mptsas_portinfo *port_info; - }; - - struct mptsas_phyinfo { -- u16 handle; /* unique id to address this */ -- u8 phy_id; /* phy index */ -- u8 port_id; /* firmware port identifier */ -+ u16 handle; /* handle for this phy */ -+ u8 phy_id; /* phy index */ -+ u8 port_id; /* port number this phy is part of */ - u8 negotiated_link_rate; /* nego'd link rate for this phy */ -- u8 hw_link_rate; /* hardware max/min phys link rate */ -+ u8 hw_link_rate; /* hardware max/min phys link rate */ - u8 programmed_link_rate; /* programmed max/min phy link rate */ - u8 sas_port_add_phy; /* flag to request sas_port_add_phy*/ - struct mptsas_devinfo identify; /* point to phy device info */ -@@ -171,7 +173,7 @@ struct mptsas_phyinfo { - - struct mptsas_portinfo { - struct list_head list; -- u16 num_phys; /* number of phys */ -+ u16 num_phys; /* number of phys */ - struct mptsas_phyinfo *phy_info; - }; - -@@ -186,6 +188,12 @@ struct mptsas_enclosure { - u8 sep_id; /* SEP device logical target id */ - u8 sep_channel; /* SEP channel logical channel id */ - }; -- -+#if 0 -+struct mptsas_broadcast_primative_event { -+ struct delayed_work aen_work; -+ MPT_ADAPTER *ioc; -+}; -+#endif - /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - #endif -+ ---- a/drivers/message/fusion/mptscsih.c -+++ b/drivers/message/fusion/mptscsih.c -@@ -53,7 +53,9 @@ - #include /* for mdelay */ - #include /* needed for in_interrupt() proto */ - #include /* notifier code */ -+#include - #include -+#include - - #include - #include -@@ -77,10 +79,15 @@ MODULE_LICENSE("GPL"); - MODULE_VERSION(my_VERSION); - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+typedef struct _BIG_SENSE_BUF { -+ u8 data[MPT_SENSE_BUFFER_ALLOC]; -+} BIG_SENSE_BUF; -+ -+ - /* - * Other private/forward protos... - */ --struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); -+struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); - static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); - static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); - static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); -@@ -93,32 +100,20 @@ static int mptscsih_AddSGE(MPT_ADAPTER * - static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); - static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); - --int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, -- int lun, int ctx2abort, ulong timeout); -- - int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); - int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); -+static void mptscsih_synchronize_cache(struct scsi_device *sdev, MPT_SCSI_HOST *hd, VirtDevice *vdevice); - --void --mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); --static int mptscsih_get_completion_code(MPT_ADAPTER *ioc, -- MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); --int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); --static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); --static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); -- --static int --mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, -- SCSITaskMgmtReply_t *pScsiTmReply); --void mptscsih_remove(struct pci_dev *); --void mptscsih_shutdown(struct pci_dev *); -+void mptscsih_remove(struct pci_dev *); -+void mptscsih_shutdown(struct pci_dev *); - #ifdef CONFIG_PM --int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); --int mptscsih_resume(struct pci_dev *pdev); -+int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); -+int mptscsih_resume(struct pci_dev *pdev); - #endif -- --#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE -- -+static int mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, -+ SCSITaskMgmtReply_t *pScsiTmReply); -+static int mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, -+ MPT_FRAME_HDR *reply); - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /* -@@ -178,7 +173,7 @@ static int - mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, - SCSIIORequest_t *pReq, int req_idx) - { -- char *psge; -+ char *psge; - char *chainSge; - struct scatterlist *sg; - int frm_sz; -@@ -193,16 +188,29 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct - dma_addr_t v2; - u32 RequestNB; - -- sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; -+#ifdef EEDP_SUPPORT -+ if (pReq->Function == MPI_FUNCTION_SCSI_IO_32) { -+ SCSIIO32Request_t *mpi_request = (SCSIIO32Request_t *)pReq; -+ -+ sgdir = le32_to_cpu(mpi_request->Control) -+ & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; -+ psge = (char *) &mpi_request->SGL; -+ } else { -+#endif -+ sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; -+ psge = (char *) &pReq->SGL; -+#ifdef EEDP_SUPPORT -+ } -+#endif - if (sgdir == MPI_SCSIIO_CONTROL_WRITE) { - sgdir = MPT_TRANSFER_HOST_TO_IOC; - } else { - sgdir = MPT_TRANSFER_IOC_TO_HOST; - } - -- psge = (char *) &pReq->SGL; - frm_sz = ioc->req_sz; - -+ - /* Map the data portion, if any. - * sges_left = 0 if no data transfer. - */ -@@ -214,7 +222,7 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct - */ - sg = scsi_sglist(SCpnt); - sg_done = 0; -- sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION); -+ sgeOffset = psge - (char *) pReq; - chainSge = NULL; - - /* Prior to entering this loop - the following must be set -@@ -237,7 +245,7 @@ nextSGEset: - thisxfer = sg_dma_len(sg); - if (thisxfer == 0) { - /* Get next SG element from the OS */ -- sg = sg_next(sg); -+ sg = mpt_sg_next(sg); - sg_done++; - continue; - } -@@ -246,7 +254,7 @@ nextSGEset: - ioc->add_sge(psge, sgflags | thisxfer, v2); - - /* Get next SG element from the OS */ -- sg = sg_next(sg); -+ sg = mpt_sg_next(sg); - psge += ioc->SGE_size; - sgeOffset += ioc->SGE_size; - sg_done++; -@@ -346,7 +354,7 @@ nextSGEset: - if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) { - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "getFreeChainBuffer FAILED SCSI cmd=%02x (%p)\n", -- ioc->name, pReq->CDB[0], SCpnt)); -+ ioc->name, pReq->CDB[0], SCpnt)); - return FAILED; - } - -@@ -392,7 +400,7 @@ mptscsih_issue_sep_command(MPT_ADAPTER * - U32 SlotStatus) - { - MPT_FRAME_HDR *mf; -- SEPRequest_t *SEPMsg; -+ SEPRequest_t *SEPMsg; - - if (ioc->bus_type != SAS) - return; -@@ -598,14 +606,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); - req_idx_MR = (mr != NULL) ? - le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; -- -- /* Special case, where already freed message frame is received from -- * Firmware. It happens with Resetting IOC. -- * Return immediately. Do not care -- */ - if ((req_idx != req_idx_MR) || -- (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf)) -+ (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf)) { -+ printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n", -+ ioc->name); -+ printk (MYIOC_s_ERR_FMT -+ "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n", -+ ioc->name, req_idx, req_idx_MR, mf, mr, -+ mptscsih_get_scsi_lookup(ioc, req_idx_MR)); - return 0; -+ } - - sc = mptscsih_getclear_scsi_lookup(ioc, req_idx); - if (sc == NULL) { -@@ -658,6 +668,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - ; - } else { - u32 xfer_cnt; -+ u32 difftransfer; - u16 status; - u8 scsi_state, scsi_status; - u32 log_info; -@@ -668,6 +679,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); - scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt); - log_info = le32_to_cpu(pScsiReply->IOCLogInfo); -+ vdevice = sc->device->hostdata; - - /* - * if we get a data underrun indication, yet no data was -@@ -685,18 +697,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) - mptscsih_copy_sense_data(sc, hd, mf, pScsiReply); - -- /* -- * Look for + dump FCP ResponseInfo[]! -- */ -- if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID && -- pScsiReply->ResponseInfo) { -- printk(MYIOC_s_NOTE_FMT "[%d:%d:%d:%d] " -- "FCP_ResponseInfo=%08xh\n", ioc->name, -- sc->device->host->host_no, sc->device->channel, -- sc->device->id, sc->device->lun, -- le32_to_cpu(pScsiReply->ResponseInfo)); -- } -- - switch(status) { - case MPI_IOCSTATUS_BUSY: /* 0x0002 */ - case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */ -@@ -724,7 +724,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) - hd->sel_timeout[pScsiReq->TargetID]++; - -- vdevice = sc->device->hostdata; - if (!vdevice) - break; - vtarget = vdevice->vtarget; -@@ -769,7 +768,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - sc->result = DID_RESET << 16; - - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ -- if (ioc->bus_type == FC) -+ if ( ioc->bus_type == FC ) - sc->result = DID_ERROR << 16; - else - sc->result = DID_RESET << 16; -@@ -830,7 +829,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - } - if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { - /* What to do? -- */ -+ */ - sc->result = DID_SOFT_ERROR << 16; - } - else if (scsi_state & MPI_SCSI_STATE_TERMINATED) { -@@ -923,7 +922,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - - } - else if (scsi_state & -- (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS) -+ (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS) - ) { - /* - * What to do? -@@ -954,6 +953,13 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F - case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ - sc->result = DID_SOFT_ERROR << 16; - break; -+#ifdef EEDP_SUPPORT -+ case MPI_IOCSTATUS_EEDP_GUARD_ERROR: -+ case MPI_IOCSTATUS_EEDP_REF_TAG_ERROR: -+ case MPI_IOCSTATUS_EEDP_APP_TAG_ERROR: -+ sc->result = DID_PARITY << 16; -+ break; -+#endif /* EEDP Support */ - - case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ - case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */ -@@ -1143,8 +1149,8 @@ mptscsih_report_queue_full(struct scsi_c - void - mptscsih_remove(struct pci_dev *pdev) - { -- MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -- struct Scsi_Host *host = ioc->sh; -+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -+ struct Scsi_Host *host = ioc->sh; - MPT_SCSI_HOST *hd; - int sz1; - -@@ -1204,7 +1210,7 @@ mptscsih_shutdown(struct pci_dev *pdev) - int - mptscsih_suspend(struct pci_dev *pdev, pm_message_t state) - { -- MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - - scsi_block_requests(ioc->sh); - flush_scheduled_work(); -@@ -1221,7 +1227,7 @@ mptscsih_suspend(struct pci_dev *pdev, p - int - mptscsih_resume(struct pci_dev *pdev) - { -- MPT_ADAPTER *ioc = pci_get_drvdata(pdev); -+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - int rc; - - rc = mpt_resume(pdev); -@@ -1280,13 +1286,13 @@ mptscsih_copy_mem_info(struct info_str * - } - - if (info->pos < info->offset) { -- data += (info->offset - info->pos); -- len -= (info->offset - info->pos); -+ data += (info->offset - info->pos); -+ len -= (info->offset - info->pos); - } - - if (len > 0) { -- memcpy(info->buffer + info->pos, data, len); -- info->pos += len; -+ memcpy(info->buffer + info->pos, data, len); -+ info->pos += len; - } - } - -@@ -1326,12 +1332,12 @@ mptscsih_host_info(MPT_ADAPTER *ioc, cha - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * mptscsih_proc_info - Return information about MPT adapter -- * @host: scsi host struct -- * @buffer: if write, user data; if read, buffer for user -+ * @host: scsi host struct -+ * @buffer: if write, user data; if read, buffer for user - * @start: returns the buffer address -- * @offset: if write, 0; if read, the current offset into the buffer from -- * the previous read. -- * @length: if write, return length; -+ * @offset: if write, 0; if read, the current offset into the buffer from -+ * the previous read. -+ * @length: if write, return length; - * @func: write = 1; read = 0 - * - * (linux scsi_host_template.info routine) -@@ -1358,6 +1364,103 @@ mptscsih_proc_info(struct Scsi_Host *hos - return size; - } - -+#ifdef EEDP_SUPPORT -+u8 opcode_protection[256] = { -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, -+ 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; -+ -+/** -+ * _scsih_setup_eedp - setup MPI request for EEDP transfer -+ * @ioc: -+ * @scmd: pointer to scsi command object -+ * @mpi_request: pointer to the SCSI_IO reqest message frame -+ * -+ * Supporting protection 1 only. -+ * -+ * Returns nothing -+ */ -+static int -+_scsih_setup_eedp(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd, SCSIIO32Request_t *mpi_request) -+{ -+ VirtDevice *vdevice = scmd->device->hostdata; -+ u16 eedp_flags; -+ u8 scsi_opcode; -+ int lba_byte; -+ u32 *lba32; -+ -+ vdevice = scmd->device->hostdata; -+ if (!vdevice->eedp_enable) -+ return -1; -+ -+ /* protection type 1 support only */ -+ if (vdevice->eedp_type != 0) -+ return -1; -+ -+ /* check whether scsi opcode supports eedp transfer */ -+ scsi_opcode = scmd->cmnd[0]; -+ eedp_flags = opcode_protection[scsi_opcode]; -+ if (!eedp_flags) -+ return -1; -+ -+ /* -+ * enable ref/app/guard checking -+ * auto increment ref and app tag -+ */ -+ mpi_request->EEDPFlags = eedp_flags | -+ MPI_SCSIIO32_EEDPFLAGS_INC_PRI_REFTAG | -+ MPI_SCSIIO32_EEDPFLAGS_T10_CHK_REFTAG | -+ MPI_SCSIIO32_EEDPFLAGS_T10_CHK_LBATAG | -+ MPI_SCSIIO32_EEDPFLAGS_T10_CHK_GUARD; -+ -+ /* set block size */ -+ mpi_request->EEDPBlockSize = vdevice->eedp_block_length; -+ mpi_request->EEDPBlockSize += 8; -+ memset(mpi_request->CDB.CDB32, 0, 32); -+ -+ mpi_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xFFFF; -+ -+ /* set reference tag to low 32bit lba */ -+ lba_byte = (scmd->cmd_len == 16) ? 6 : 2; -+ lba32 = (u32 *)&scmd->cmnd[lba_byte]; -+ mpi_request->CDB.EEDP32.PrimaryReferenceTag = *lba32; -+ -+ /* set RDPROTECT, WRPROTECT, VRPROTECT bits to (001b) */ -+ scmd->cmnd[1] = (scmd->cmnd[1] & 0x1F) | 0x20; -+ -+ /* add the rest of the bits */ -+ mpi_request->Port = 0; -+ mpi_request->Flags = MPI_SCSIIO32_FLAGS_FORM_SCSIID; -+ mpi_request->DeviceAddress.SCSIID.TargetID = vdevice->vtarget->id; -+ mpi_request->DeviceAddress.SCSIID.Bus = vdevice->vtarget->channel; -+ mpi_request->ChainOffset = 0; -+ mpi_request->Function = MPI_FUNCTION_SCSI_IO_32; -+ mpi_request->CDBLength = scmd->cmd_len; -+ mpi_request->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; -+ mpi_request->MsgFlags = mpt_msg_flags(ioc); -+ int_to_scsilun(scmd->device->lun, (struct scsi_lun *)mpi_request->LUN); -+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); -+ mpi_request->SGLOffset0 = offsetof(SCSIIO32Request_t, SGL) / 4; -+ mpi_request->SGLOffset1 = 0; -+ mpi_request->SGLOffset2 = 0; -+ mpi_request->SGLOffset3 = 0; -+ return 0; -+} -+#endif /* EEDP Support */ -+ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - #define ADD_INDEX_LOG(req_ent) do { } while(0) - -@@ -1444,6 +1547,36 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, v - - /* Use the above information to set up the message frame - */ -+#ifdef EEDP_SUPPORT -+ if (_scsih_setup_eedp(ioc, SCpnt, (SCSIIO32Request_t *)mf) == 0) { -+ SCSIIO32Request_t *mpi_request = (SCSIIO32Request_t *)mf; -+ -+ /* finish off setting the rest of the SCSIIO32 */ -+ mpi_request->Control = cpu_to_le32(scsictl); -+ mpi_request->DataLength = cpu_to_le32(datalen); -+ -+ /* SenseBuffer low address */ -+ mpi_request->SenseBufferLowAddr = -+ cpu_to_le32(ioc->sense_buf_low_dma -+ + (my_idx * MPT_SENSE_BUFFER_ALLOC)); -+ -+ /* Now add the SG list -+ * Always have a SGE even if null length. -+ */ -+ if (datalen == 0) { -+ /* Add a NULL SGE */ -+ ioc->add_sge((char *)&mpi_request->SGL, -+ MPT_SGE_FLAGS_SSIMPLE_READ | 0, -+ (dma_addr_t) -1); -+ } else { -+ /* Add a 32 or 64 bit SGE */ -+ if (mptscsih_AddSGE(ioc, SCpnt, -+ pScsiReq, my_idx) != SUCCESS) -+ goto fail; -+ } -+ goto send_mf; -+ } -+#endif - pScsiReq->TargetID = (u8) vdevice->vtarget->id; - pScsiReq->Bus = vdevice->vtarget->channel; - pScsiReq->ChainOffset = 0; -@@ -1489,6 +1622,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, v - goto fail; - } - -+#ifdef EEDP_SUPPORT -+ send_mf: -+#endif - SCpnt->host_scribble = (unsigned char *)mf; - mptscsih_set_scsi_lookup(ioc, my_idx, SCpnt); - -@@ -1560,6 +1696,137 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *i - */ - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+ -+static int -+mptscsih_scandv_bus_reset(MPT_ADAPTER *ioc) -+{ -+ MPT_FRAME_HDR *mf; -+ SCSITaskMgmt_t *pScsiTm; -+ SCSITaskMgmtReply_t *pScsiTmReply; -+ int ii; -+ int retval; -+ unsigned long timeout; -+ unsigned long time_count; -+ u16 iocstatus; -+ -+ mutex_lock(&ioc->taskmgmt_cmds.mutex); -+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { -+ mutex_unlock(&ioc->taskmgmt_cmds.mutex); -+ return -EPERM; -+ } -+ -+ /* Send request -+ */ -+ if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { -+ dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n", -+ ioc->name)); -+ mpt_clear_taskmgmt_in_progress_flag(ioc); -+ retval = -ENOMEM; -+ goto out; -+ } -+ -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", -+ ioc->name, mf)); -+ -+ pScsiTm = (SCSITaskMgmt_t *) mf; -+ memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); -+ pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; -+ pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; -+ pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; -+ pScsiTm->TargetID = 0; -+ pScsiTm->Bus = 0; -+ pScsiTm->ChainOffset = 0; -+ pScsiTm->Reserved = 0; -+ pScsiTm->Reserved1 = 0; -+ pScsiTm->TaskMsgContext = 0; -+ for (ii= 0; ii < 8; ii++) -+ pScsiTm->LUN[ii] = 0; -+ for (ii=0; ii < 7; ii++) -+ pScsiTm->Reserved2[ii] = 0; -+ -+ switch (ioc->bus_type) { -+ case FC: -+ timeout = 40; -+ break; -+ case SAS: -+ timeout = 30; -+ break; -+ case SPI: -+ default: -+ timeout = 2; -+ break; -+ } -+ -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n", -+ ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout)); -+ -+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) -+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status) -+ retval = 0; -+ time_count = jiffies; -+ if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && -+ (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) -+ mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); -+ else { -+ retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, -+ sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); -+ if (retval != 0) { -+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "TaskMgmt send_handshake FAILED!" -+ " (ioc %p, mf %p, rc=%d) \n", ioc->name, -+ ioc, mf, retval)); -+ mpt_clear_taskmgmt_in_progress_flag(ioc); -+ goto out; -+ } -+ } -+ -+ /* Now wait for the command to complete */ -+ ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); -+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "TaskMgmt failed\n", ioc->name)); -+ mpt_free_msg_frame(ioc, mf); -+ mpt_clear_taskmgmt_in_progress_flag(ioc); -+ retval = -1; /* return failure */ -+ goto out; -+ } -+ -+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "TaskMgmt failed\n", ioc->name)); -+ retval = -1; /* return failure */ -+ goto out; -+ } -+ -+ pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n" -+ "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n" -+ "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus, -+ pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, -+ le16_to_cpu(pScsiTmReply->IOCStatus), -+ le32_to_cpu(pScsiTmReply->IOCLogInfo), -+ pScsiTmReply->ResponseCode, -+ le32_to_cpu(pScsiTmReply->TerminationCount))); -+ -+ iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; -+ -+ if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || -+ iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED || -+ iocstatus == MPI_IOCSTATUS_SUCCESS) -+ retval = 0; -+ else { -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "TaskMgmt failed\n", ioc->name)); -+ retval = -1; /* return failure */ -+ } -+ -+ out: -+ mutex_unlock(&ioc->taskmgmt_cmds.mutex); -+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) -+ return retval; -+} -+ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * mptscsih_IssueTaskMgmt - Generic send Task Management function. - * @hd: Pointer to MPT_SCSI_HOST structure -@@ -1579,14 +1846,13 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *i - * - **/ - int --mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, -- int ctx2abort, ulong timeout) -+mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) - { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; - int ii; - int retval; -- MPT_ADAPTER *ioc = hd->ioc; -+ MPT_ADAPTER *ioc = hd->ioc; - unsigned long timeleft; - u8 issue_hard_reset; - u32 ioc_raw_state; -@@ -1646,7 +1912,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd - pScsiTm->TaskType = type; - pScsiTm->Reserved1 = 0; - pScsiTm->MsgFlags = (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) -- ? MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION : 0; -+ ? MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION : 0; - - int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN); - -@@ -1705,7 +1971,8 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd - if (issue_hard_reset) { - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); -- retval = mpt_HardResetHandler(ioc, CAN_SLEEP); -+ if ((retval = mpt_SoftResetHandler(ioc, CAN_SLEEP)) != 0) -+ retval = mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - } - -@@ -1713,7 +1980,6 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - return retval; - } --EXPORT_SYMBOL(mptscsih_IssueTaskMgmt); - - static int - mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) -@@ -1722,6 +1988,7 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc - case FC: - return 40; - case SAS: -+ return 30; - case SPI: - default: - return 10; -@@ -1746,7 +2013,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) - int scpnt_idx; - int retval; - VirtDevice *vdevice; -- ulong sn = SCpnt->serial_number; -+ ulong sn = SCpnt->serial_number; - MPT_ADAPTER *ioc; - - /* If we can't locate our host adapter structure, return FAILED status. -@@ -1771,7 +2038,21 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) - ioc->name, SCpnt)); - SCpnt->result = DID_NO_CONNECT << 16; - SCpnt->scsi_done(SCpnt); -- retval = 0; -+ retval = SUCCESS; -+ goto out; -+ } -+ -+ /* Find this command -+ */ -+ if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) { -+ /* Cmd not found in ScsiLookup. -+ * Do OS callback. -+ */ -+ SCpnt->result = DID_RESET << 16; -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "task abort: command not in the active list! (sc=%p)\n", -+ ioc->name, SCpnt)); -+ retval = SUCCESS; - goto out; - } - -@@ -1786,25 +2067,23 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) - goto out; - } - -- /* Find this command -+ /* Task aborts are not supported for volumes. - */ -- if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) { -- /* Cmd not found in ScsiLookup. -- * Do OS callback. -- */ -+ if (vdevice->vtarget->raidVolume) { -+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -+ "task abort: raid volume (sc=%p)\n", -+ ioc->name, SCpnt)); - SCpnt->result = DID_RESET << 16; -- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " -- "Command not in the active list! (sc=%p)\n", ioc->name, -- SCpnt)); -- retval = SUCCESS; -+ retval = FAILED; - goto out; - } - -+ if (mpt_fwfault_debug) -+ mpt_halt_firmware(ioc); -+ - if (ioc->timeouts < -1) - ioc->timeouts++; - -- if (mpt_fwfault_debug) -- mpt_halt_firmware(ioc); - - /* Most important! Set TaskMsgContext to SCpnt's MsgContext! - * (the IO to be ABORT'd) -@@ -1821,6 +2100,10 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) - vdevice->vtarget->id, vdevice->lun, - ctx2abort, mptscsih_get_tm_timeout(ioc)); - -+ -+ /* check to see whether command actually completed and/or -+ * terminated -+ */ - if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && - SCpnt->serial_number == sn) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT -@@ -1836,7 +2119,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) - - out: - printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", -- ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); -+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED" ), SCpnt); - - return retval; - } -@@ -1964,7 +2247,7 @@ int - mptscsih_host_reset(struct scsi_cmnd *SCpnt) - { - MPT_SCSI_HOST * hd; -- int status = SUCCESS; -+ int status = SUCCESS; - MPT_ADAPTER *ioc; - int retval; - -@@ -1975,9 +2258,6 @@ mptscsih_host_reset(struct scsi_cmnd *SC - return FAILED; - } - -- /* make sure we have no outstanding commands at this stage */ -- mptscsih_flush_running_cmds(hd); -- - ioc = hd->ioc; - printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n", - ioc->name, SCpnt); -@@ -1985,7 +2265,9 @@ mptscsih_host_reset(struct scsi_cmnd *SC - /* If our attempts to reset the host failed, then return a failed - * status. The host will be taken off line by the SCSI mid-layer. - */ -- retval = mpt_HardResetHandler(ioc, CAN_SLEEP); -+ if ((retval = mpt_SoftResetHandler(ioc, CAN_SLEEP)) != 0) -+ retval = mpt_HardResetHandler(ioc, CAN_SLEEP); -+ - if (retval < 0) - status = FAILED; - else -@@ -1997,6 +2279,7 @@ mptscsih_host_reset(struct scsi_cmnd *SC - return status; - } - -+ - static int - mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, - SCSITaskMgmtReply_t *pScsiTmReply) -@@ -2083,7 +2366,6 @@ mptscsih_taskmgmt_response_code(MPT_ADAP - printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n", - ioc->name, response_code, desc); - } --EXPORT_SYMBOL(mptscsih_taskmgmt_response_code); - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** -@@ -2119,6 +2401,8 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER * - mpt_clear_taskmgmt_in_progress_flag(ioc); - ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->taskmgmt_cmds.done); -+ if (ioc->bus_type == SAS) -+ ioc->schedule_target_reset(ioc); - return 1; - } - return 0; -@@ -2135,7 +2419,7 @@ mptscsih_bios_param(struct scsi_device * - int heads; - int sectors; - sector_t cylinders; -- ulong dummy; -+ ulong dummy; - - heads = 64; - sectors = 32; -@@ -2164,14 +2448,15 @@ mptscsih_bios_param(struct scsi_device * - return 0; - } - --/* Search IOC page 3 to determine if this is hidden physical disk -+/** -+ * Search IOC page 3 to determine if this is hidden physical disk - * - */ - int - mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) - { - struct inactive_raid_component_info *component_info; -- int i, j; -+ u8 i, j; - RaidPhysDiskPage1_t *phys_disk; - int rc = 0; - int num_paths; -@@ -2197,7 +2482,7 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, - ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum); - if (num_paths < 2) - continue; -- phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + -+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t,Path) + - (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); - if (!phys_disk) - continue; -@@ -2224,21 +2509,20 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, - kfree(phys_disk); - } - -- - /* - * Check inactive list for matching phys disks - */ - if (list_empty(&ioc->raid_data.inactive_list)) - goto out; - -- mutex_lock(&ioc->raid_data.inactive_list_mutex); -+ down(&ioc->raid_data.inactive_list_mutex); - list_for_each_entry(component_info, &ioc->raid_data.inactive_list, - list) { - if ((component_info->d.PhysDiskID == id) && - (component_info->d.PhysDiskBus == channel)) - rc = 1; - } -- mutex_unlock(&ioc->raid_data.inactive_list_mutex); -+ up(&ioc->raid_data.inactive_list_mutex); - - out: - return rc; -@@ -2249,10 +2533,10 @@ u8 - mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) - { - struct inactive_raid_component_info *component_info; -- int i, j; -+ int i,j; - RaidPhysDiskPage1_t *phys_disk; - int rc = -ENXIO; -- int num_paths; -+ u8 num_paths; - - if (!ioc->raid_data.pIocPg3) - goto out; -@@ -2275,7 +2559,7 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc - ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum); - if (num_paths < 2) - continue; -- phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + -+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t,Path) + - (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); - if (!phys_disk) - continue; -@@ -2308,14 +2592,14 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc - if (list_empty(&ioc->raid_data.inactive_list)) - goto out; - -- mutex_lock(&ioc->raid_data.inactive_list_mutex); -+ down(&ioc->raid_data.inactive_list_mutex); - list_for_each_entry(component_info, &ioc->raid_data.inactive_list, - list) { - if ((component_info->d.PhysDiskID == id) && - (component_info->d.PhysDiskBus == channel)) - rc = component_info->d.PhysDiskNum; - } -- mutex_unlock(&ioc->raid_data.inactive_list_mutex); -+ up(&ioc->raid_data.inactive_list_mutex); - - out: - return rc; -@@ -2333,15 +2617,17 @@ mptscsih_slave_destroy(struct scsi_devic - MPT_SCSI_HOST *hd = shost_priv(host); - VirtTarget *vtarget; - VirtDevice *vdevice; -- struct scsi_target *starget; -+ struct scsi_target *starget; - - starget = scsi_target(sdev); - vtarget = starget->hostdata; -+ vtarget->num_luns--; - vdevice = sdev->hostdata; -+ if (!vdevice) -+ return; - - mptscsih_search_running_cmds(hd, vdevice); -- vtarget->num_luns--; -- mptscsih_synchronize_cache(hd, vdevice); -+ mptscsih_synchronize_cache(sdev, hd, vdevice); - kfree(vdevice); - sdev->hostdata = NULL; - } -@@ -2359,8 +2645,8 @@ int - mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) - { - MPT_SCSI_HOST *hd = shost_priv(sdev->host); -- VirtTarget *vtarget; -- struct scsi_target *starget; -+ VirtTarget *vtarget; -+ struct scsi_target *starget; - int max_depth; - int tagged; - MPT_ADAPTER *ioc = hd->ioc; -@@ -2372,15 +2658,13 @@ mptscsih_change_queue_depth(struct scsi_ - return -EOPNOTSUPP; - - if (ioc->bus_type == SPI) { -- if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) -- max_depth = 1; -- else if (sdev->type == TYPE_DISK && -- vtarget->minSyncFactor <= MPT_ULTRA160) -+ if (sdev->type == TYPE_DISK && -+ vtarget->minSyncFactor <= MPT_ULTRA160) - max_depth = MPT_SCSI_CMD_PER_DEV_HIGH; - else - max_depth = MPT_SCSI_CMD_PER_DEV_LOW; - } else -- max_depth = ioc->sh->can_queue; -+ max_depth = ioc->sh->can_queue; - - if (!sdev->tagged_supported) - max_depth = 1; -@@ -2393,9 +2677,120 @@ mptscsih_change_queue_depth(struct scsi_ - tagged = MSG_SIMPLE_TAG; - - scsi_adjust_queue_depth(sdev, tagged, qdepth); -+ -+ if (sdev->inquiry_len > 7) -+ sdev_printk(KERN_INFO, sdev, MYIOC_s_FMT "qdepth=%d, " -+ "tagged=%d, simple=%d, ordered=%d, scsi_level=%d, " -+ "cmd_que=%d\n", ioc->name, sdev->queue_depth, -+ sdev->tagged_supported, sdev->simple_tags, -+ sdev->ordered_tags, sdev->scsi_level, -+ (sdev->inquiry[7] & 2) >> 1); -+ - return sdev->queue_depth; - } - -+#ifdef EEDP_SUPPORT -+/** -+ * _scsih_read_capacity_16 - send READ_CAPACITY_16 to target -+ * -+ */ -+static int -+_scsih_read_capacity_16(MPT_SCSI_HOST *hd, int id, int channel, u32 lun, -+ void *data, u32 length) -+{ -+ INTERNAL_CMD iocmd; -+ dma_addr_t data_dma; -+ struct read_cap_parameter *parameter_data; -+ u32 data_length; -+ MPT_ADAPTER *ioc = hd->ioc; -+ int rc; -+ int count; -+ u8 skey; -+ u8 asc; -+ u8 ascq; -+ -+ data_length = sizeof(struct read_cap_parameter); -+ parameter_data = pci_alloc_consistent(ioc->pcidev, -+ data_length, &data_dma); -+ if (!parameter_data) { -+ printk(MYIOC_s_ERR_FMT "failure at %s:%d/%s()!\n", -+ ioc->name, __FILE__, __LINE__, __func__); -+ return -1; -+ } -+ -+ iocmd.cmd = SERVICE_ACTION_IN; -+ iocmd.data_dma = data_dma; -+ iocmd.data = (u8 *)parameter_data; -+ iocmd.size = data_length; -+ iocmd.channel = channel; -+ iocmd.id = id; -+ iocmd.lun = lun; -+ -+ for (count=0; count < 4; count++) { -+ rc = mptscsih_do_cmd(hd, &iocmd); -+ -+ if(rc == MPT_SCANDV_GOOD) { -+ memcpy(data, parameter_data, -+ min_t(u32, data_length, length)); -+ break; -+ } else if(rc == MPT_SCANDV_BUSY) { -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: " -+ "fw_channel=%d fw_id=%d : device busy\n", -+ ioc->name, __FUNCTION__, channel, id)); -+ continue; -+ } else if(rc == MPT_SCANDV_DID_RESET) { -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: " -+ "fw_channel=%d fw_id=%d : did reset\n", -+ ioc->name, __FUNCTION__, channel, id)); -+ continue; -+ } else if(rc == MPT_SCANDV_SENSE) { -+ skey = ioc->internal_cmds.sense[2] & 0x0F; -+ asc = ioc->internal_cmds.sense[12]; -+ ascq = ioc->internal_cmds.sense[13]; -+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: " -+ "fw_channel=%d fw_id=%d : [sense_key,arc,ascq]: " -+ "[0x%02x,0x%02x,0x%02x]\n", ioc->name, -+ __FUNCTION__, channel, id, skey, asc, ascq)); -+ if( skey == UNIT_ATTENTION || -+ skey == NOT_READY || -+ skey == ILLEGAL_REQUEST ) { -+ continue; -+ } else { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d : " -+ "tur failed due to [sense_key,asc,ascq]: " -+ "[0x%02x,0x%02x,0x%02x]\n", ioc->name, -+ __FUNCTION__, channel, id, skey, asc, ascq); -+ break; -+ } -+ } else if(rc == MPT_SCANDV_SELECTION_TIMEOUT) { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "read capacity failed due to no device\n", ioc->name, -+ __FUNCTION__, channel, id); -+ break; -+ } else if(rc == MPT_SCANDV_SOME_ERROR) { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "read capacity failed due to some error\n", ioc->name, -+ __FUNCTION__, channel, id); -+ break; -+ } else { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "read capacity failed due to some error\n", ioc->name, -+ __FUNCTION__, channel, id); -+ break; -+ } -+ } -+ -+ if(count > 4 && rc != 0) { -+ printk(MYIOC_s_ERR_FMT "%s: fw_channel=%d fw_id=%d: " -+ "read capacity failed to many times\n", ioc->name, -+ __FUNCTION__, channel, id); -+ } -+ -+ pci_free_consistent(ioc->pcidev, data_length, parameter_data, data_dma); -+ return rc; -+} -+#endif -+ - /* - * OS entry point to adjust the queue_depths on a per-device basis. - * Called once per device the bus scan. Use it to force the queue_depth -@@ -2408,7 +2803,7 @@ mptscsih_slave_configure(struct scsi_dev - struct Scsi_Host *sh = sdev->host; - VirtTarget *vtarget; - VirtDevice *vdevice; -- struct scsi_target *starget; -+ struct scsi_target *starget; - MPT_SCSI_HOST *hd = shost_priv(sh); - MPT_ADAPTER *ioc = hd->ioc; - -@@ -2416,6 +2811,45 @@ mptscsih_slave_configure(struct scsi_dev - vtarget = starget->hostdata; - vdevice = sdev->hostdata; - -+#ifdef EEDP_SUPPORT -+ if ((!(vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) && -+ (!(vdevice->vtarget->raidVolume))) { -+ -+ struct read_cap_parameter data; -+ memset(&data, 0, sizeof(struct read_cap_parameter)); -+ -+ /* -+ * check PROTECT bit -+ * -+ * NOTE: The crack monkey target mode driver doesn't -+ * set this bit(bug has been reported). -+ * The cm_target command line option is a work around. -+ */ -+ if (!(sdev->inquiry[5] & 1)) -+ goto out; -+ -+ if ((ioc->bus_type == FC) && -+ (_scsih_read_capacity_16(hd, vtarget->id, -+ vtarget->channel, sdev->lun, &data, -+ sizeof(struct read_cap_parameter)) == 0)) { -+ vdevice->eedp_enable = data.prot_en; -+ vdevice->eedp_type = data.p_type; -+ vdevice->eedp_block_length = -+ be32_to_cpu(data.logical_block_length); -+ -+ if (!vdevice->eedp_enable) -+ goto out; -+ -+ sdev_printk(KERN_INFO, sdev, "EEDP enabled: " -+ "protection_type(%d), block_length(%d)\n", -+ vdevice->eedp_type+1, -+ vdevice->eedp_block_length); -+ } -+ } -+ out: -+#endif /* EEDP Support */ -+ -+ - dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "device @ %p, channel=%d, id=%d, lun=%d\n", - ioc->name, sdev, sdev->channel, sdev->id, sdev->lun)); -@@ -2427,6 +2861,11 @@ mptscsih_slave_configure(struct scsi_dev - - vdevice->configured_lun = 1; - -+ if ((ioc->bus_type != SAS) && (sdev->id > sh->max_id)) { -+ /* error case, should never happen */ -+ scsi_adjust_queue_depth(sdev, 0, 1); -+ goto slave_configure_exit; -+ } - dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Queue depth=%d, tflags=%x\n", - ioc->name, sdev->queue_depth, vtarget->tflags)); -@@ -2437,8 +2876,10 @@ mptscsih_slave_configure(struct scsi_dev - ioc->name, vtarget->negoFlags, vtarget->maxOffset, - vtarget->minSyncFactor)); - -- mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH, -+ mptscsih_change_queue_depth(sdev, ioc->sdev_queue_depth, - SCSI_QDEPTH_DEFAULT); -+ -+slave_configure_exit: - dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "tagged %d, simple %d, ordered %d\n", - ioc->name,sdev->tagged_supported, sdev->simple_tags, -@@ -2463,7 +2904,7 @@ mptscsih_copy_sense_data(struct scsi_cmn - VirtDevice *vdevice; - SCSIIORequest_t *pReq; - u32 sense_count = le32_to_cpu(pScsiReply->SenseCount); -- MPT_ADAPTER *ioc = hd->ioc; -+ MPT_ADAPTER *ioc = hd->ioc; - - /* Get target structure - */ -@@ -2477,8 +2918,11 @@ mptscsih_copy_sense_data(struct scsi_cmn - /* Copy the sense received into the scsi command block. */ - req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); - sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); -- memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); - -+ if (sense_count > SCSI_SENSE_BUFFERSIZE) -+ sense_count = SCSI_SENSE_BUFFERSIZE; -+ -+ memcpy(sc->sense_buffer, sense_data, sense_count); - /* Log SMART data (asc = 0x5D, non-IM case only) if required. - */ - if ((ioc->events) && (ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) { -@@ -2496,12 +2940,10 @@ mptscsih_copy_sense_data(struct scsi_cmn - ioc->events[idx].data[1] = (sense_data[13] << 8) | sense_data[12]; - - ioc->eventContext++; -- if (ioc->pcidev->vendor == -- PCI_VENDOR_ID_IBM) { -- mptscsih_issue_sep_command(ioc, -- vdevice->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); -- vdevice->vtarget->tflags |= -- MPT_TARGET_FLAGS_LED_ON; -+ if (ioc->pcidev->vendor == PCI_VENDOR_ID_IBM) { -+ mptscsih_issue_sep_command(ioc, vdevice->vtarget, -+ MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); -+ vdevice->vtarget->tflags |= MPT_TARGET_FLAGS_LED_ON; - } - } - } -@@ -2512,12 +2954,16 @@ mptscsih_copy_sense_data(struct scsi_cmn - } - - /** -- * mptscsih_get_scsi_lookup - retrieves scmd entry -+ * mptscsih_get_scsi_lookup -+ * -+ * retrieves scmd entry from ScsiLookup[] array list -+ * - * @ioc: Pointer to MPT_ADAPTER structure - * @i: index into the array - * - * Returns the scsi_cmd pointer -- */ -+ * -+ **/ - struct scsi_cmnd * - mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) - { -@@ -2530,10 +2976,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *io - - return scmd; - } --EXPORT_SYMBOL(mptscsih_get_scsi_lookup); - - /** -- * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list -+ * mptscsih_getclear_scsi_lookup -+ * -+ * retrieves and clears scmd entry from ScsiLookup[] array list -+ * - * @ioc: Pointer to MPT_ADAPTER structure - * @i: index into the array - * -@@ -2604,7 +3052,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int - { - MPT_SCSI_HOST *hd; - -- if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) -+ if ((ioc->sh == NULL) || (ioc->sh->hostdata == NULL)) - return 0; - - hd = shost_priv(ioc->sh); -@@ -2684,7 +3132,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *io - SCSIIOReply_t *pReply; - u8 cmd; - u16 req_idx; -- u8 *sense_data; -+ u8 *sense_data; - int sz; - - ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; -@@ -2722,8 +3170,8 @@ mptscsih_scandv_complete(MPT_ADAPTER *io - /** - * mptscsih_get_completion_code - - * @ioc: Pointer to MPT_ADAPTER structure -- * @req: Pointer to original MPT request frame -- * @reply: Pointer to MPT reply frame (NULL if TurboReply) -+ * @reply: -+ * @cmd: - * - **/ - static int -@@ -2829,7 +3277,7 @@ mptscsih_get_completion_code(MPT_ADAPTER - * - * > 0 if command complete but some type of completion error. - */ --static int -+int - mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) - { - MPT_FRAME_HDR *mf; -@@ -2849,7 +3297,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTER - if (ioc->ioc_reset_in_progress) { - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "%s: busy with host reset\n", ioc->name, __func__)); -+ "%s: busy with host reset\n", ioc->name, __FUNCTION__)); - return MPT_SCANDV_BUSY; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -@@ -2945,6 +3393,35 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTER - timeout = 10; - break; - -+ case REPORT_LUNS: -+ cmdLen = 12; -+ dir = MPI_SCSIIO_CONTROL_READ; -+ CDB[0] = cmd; -+ CDB[6] = (io->size >> 24) & 0xFF; -+ CDB[7] = (io->size >> 16) & 0xFF; -+ CDB[8] = (io->size >> 8) & 0xFF; -+ CDB[9] = io->size & 0xFF; -+ timeout = 10; -+ break; -+ -+ case TRANSPORT_LAYER_RETRIES: -+ CDB[0] = cmd; -+ CDB[1] = 0x01; -+ cmdLen = 6; -+ dir = MPI_SCSIIO_CONTROL_READ; -+ timeout = 10; -+ break; -+#ifdef EEDP_SUPPORT -+ case SERVICE_ACTION_IN: -+ CDB[0] = cmd; -+ CDB[1] = 0x10; -+ CDB[13] = io->size & 0xFF; -+ dir = MPI_SCSIIO_CONTROL_READ; -+ timeout = 10; -+ cmdLen = 16; -+ break; -+#endif -+ - default: - /* Error Case */ - ret = -EFAULT; -@@ -3032,9 +3509,12 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTER - goto out; - } - if (!timeleft) { -+ if (!mptscsih_scandv_bus_reset(ioc)) -+ goto out; - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); -- mpt_HardResetHandler(ioc, CAN_SLEEP); -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - } - goto out; -@@ -3050,6 +3530,73 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTER - return ret; - } - -+int -+mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) -+{ -+ MPT_ADAPTER *ioc = hd->ioc; -+ MpiRaidActionRequest_t *pReq; -+ MPT_FRAME_HDR *mf; -+ int ret; -+ unsigned long timeleft; -+ -+ mutex_lock(&ioc->internal_cmds.mutex); -+ -+ /* Get and Populate a free Frame -+ */ -+ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { -+ dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!\n", -+ ioc->name, __FUNCTION__)); -+ ret = -EAGAIN; -+ goto out; -+ } -+ pReq = (MpiRaidActionRequest_t *)mf; -+ if (quiesce) -+ pReq->Action = MPI_RAID_ACTION_QUIESCE_PHYS_IO; -+ else -+ pReq->Action = MPI_RAID_ACTION_ENABLE_PHYS_IO; -+ pReq->Reserved1 = 0; -+ pReq->ChainOffset = 0; -+ pReq->Function = MPI_FUNCTION_RAID_ACTION; -+ pReq->VolumeID = id; -+ pReq->VolumeBus = channel; -+ pReq->PhysDiskNum = 0; -+ pReq->MsgFlags = 0; -+ pReq->Reserved2 = 0; -+ pReq->ActionDataWord = 0; /* Reserved for this action */ -+ -+ ioc->add_sge((char *)&pReq->ActionDataSGE, -+ MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); -+ -+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", -+ ioc->name, pReq->Action, channel, id)); -+ -+ INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status) -+ mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); -+ timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ); -+ if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ ret = -ETIME; -+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n", -+ ioc->name, __FUNCTION__)); -+ if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) -+ goto out; -+ if (!timeleft) { -+ printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", -+ ioc->name, __FUNCTION__); -+ if (mpt_SoftResetHandler(ioc, CAN_SLEEP) != 0) -+ mpt_HardResetHandler(ioc, CAN_SLEEP); -+ mpt_free_msg_frame(ioc, mf); -+ } -+ goto out; -+ } -+ -+ ret = ioc->internal_cmds.completion_code; -+ -+ out: -+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status) -+ mutex_unlock(&ioc->internal_cmds.mutex); -+ return ret; -+} -+ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks. -@@ -3061,9 +3608,10 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTER - * - */ - static void --mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice) -+mptscsih_synchronize_cache(struct scsi_device *sdev, MPT_SCSI_HOST *hd, VirtDevice *vdevice) - { - INTERNAL_CMD iocmd; -+ MPT_ADAPTER *ioc = hd->ioc; - - /* Ignore hidden raid components, this is handled when the command - * is sent to the volume -@@ -3075,23 +3623,124 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST - !vdevice->configured_lun) - return; - -- /* Following parameters will not change -- * in this routine. -- */ -+ memset(&iocmd, 0, sizeof(INTERNAL_CMD)); - iocmd.cmd = SYNCHRONIZE_CACHE; -- iocmd.flags = 0; - iocmd.physDiskNum = -1; - iocmd.data = NULL; - iocmd.data_dma = -1; -- iocmd.size = 0; -- iocmd.rsvd = iocmd.rsvd2 = 0; - iocmd.channel = vdevice->vtarget->channel; - iocmd.id = vdevice->vtarget->id; - iocmd.lun = vdevice->lun; - -+ sdev_printk(KERN_INFO, sdev, MYIOC_s_FMT "SYNCHRONIZE_CACHE: fw_channel %d," -+ " fw_id %d\n", ioc->name, vdevice->vtarget->channel, vdevice->vtarget->id); - mptscsih_do_cmd(hd, &iocmd); - } - -+/* -+ * shost attributes -+ */ -+static ssize_t -+mptscsih_fault_show(struct device *dev, struct device_attribute *attr, -+ char *buf) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->is_fault); -+} -+static ssize_t -+mptscsih_fault_store(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ int val = 0; -+ -+ if (sscanf(buf, "%d", &val) != 1) -+ return -EINVAL; -+ -+ ioc->is_fault = val; -+ return strlen(buf); -+ -+} -+ -+struct DIAG_BUFFER_START { -+ u32 Size; -+ u32 DiagVersion; -+ u8 BufferType; -+ u8 Reserved[3]; -+ u32 Reserved1; -+ u32 Reserved2; -+ u32 Reserved3; -+}; -+ -+static ssize_t -+mptscsih_ring_buffer_size_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ u32 size = 0; -+ struct DIAG_BUFFER_START *request_data; -+ -+ ioc->ring_buffer_sz = 0; -+ if (!ioc->DiagBuffer[0]) -+ return 0; -+ -+ request_data = (struct DIAG_BUFFER_START *)ioc->DiagBuffer[0]; -+ if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || -+ le32_to_cpu(request_data->DiagVersion) == 0x01000000) && -+ le32_to_cpu(request_data->Reserved3) == 0x4742444c) { -+ size = le32_to_cpu(request_data->Size); -+ ioc->ring_buffer_sz = size; -+ } -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", size); -+} -+ -+static ssize_t -+mptscsih_ring_buffer_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ void *request_data; -+ u32 size; -+ -+ if (!ioc->DiagBuffer[0]) -+ return 0; -+ -+ if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) -+ return 0; -+ -+ size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; -+ size = (size > PAGE_SIZE) ? PAGE_SIZE : size; -+ request_data = ioc->DiagBuffer[0] + ioc->ring_buffer_offset; -+ memcpy(buf, request_data, size); -+ return size; -+} -+ -+static ssize_t -+mptscsih_ring_buffer_store(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ int val = 0; -+ -+ if (sscanf(buf, "%d", &val) != 1) -+ return -EINVAL; -+ -+ ioc->ring_buffer_offset = val; -+ return strlen(buf); -+} -+ - static ssize_t - mptscsih_version_fw_show(struct device *dev, struct device_attribute *attr, - char *buf) -@@ -3106,7 +3755,6 @@ mptscsih_version_fw_show(struct device * - (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, - ioc->facts.FWVersion.Word & 0x000000FF); - } --static DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL); - - static ssize_t - mptscsih_version_bios_show(struct device *dev, struct device_attribute *attr, -@@ -3116,13 +3764,12 @@ mptscsih_version_bios_show(struct device - MPT_SCSI_HOST *hd = shost_priv(host); - MPT_ADAPTER *ioc = hd->ioc; - -- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", -+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", - (ioc->biosVersion & 0xFF000000) >> 24, - (ioc->biosVersion & 0x00FF0000) >> 16, - (ioc->biosVersion & 0x0000FF00) >> 8, - ioc->biosVersion & 0x000000FF); - } --static DEVICE_ATTR(version_bios, S_IRUGO, mptscsih_version_bios_show, NULL); - - static ssize_t - mptscsih_version_mpi_show(struct device *dev, struct device_attribute *attr, -@@ -3132,14 +3779,17 @@ mptscsih_version_mpi_show(struct device - MPT_SCSI_HOST *hd = shost_priv(host); - MPT_ADAPTER *ioc = hd->ioc; - -- return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion); -+ if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) -+ return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", -+ ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); -+ else -+ return snprintf(buf, PAGE_SIZE, "%03x\n", -+ ioc->facts.MsgVersion); - } --static DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL); - - static ssize_t - mptscsih_version_product_show(struct device *dev, -- struct device_attribute *attr, --char *buf) -+ struct device_attribute *attr, char *buf) - { - struct Scsi_Host *host = class_to_shost(dev); - MPT_SCSI_HOST *hd = shost_priv(host); -@@ -3147,8 +3797,6 @@ char *buf) - - return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name); - } --static DEVICE_ATTR(version_product, S_IRUGO, -- mptscsih_version_product_show, NULL); - - static ssize_t - mptscsih_version_nvdata_persistent_show(struct device *dev, -@@ -3162,8 +3810,6 @@ mptscsih_version_nvdata_persistent_show( - return snprintf(buf, PAGE_SIZE, "%02xh\n", - ioc->nvdata_version_persistent); - } --static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, -- mptscsih_version_nvdata_persistent_show, NULL); - - static ssize_t - mptscsih_version_nvdata_default_show(struct device *dev, -@@ -3175,8 +3821,6 @@ mptscsih_version_nvdata_default_show(str - - return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default); - } --static DEVICE_ATTR(version_nvdata_default, S_IRUGO, -- mptscsih_version_nvdata_default_show, NULL); - - static ssize_t - mptscsih_board_name_show(struct device *dev, struct device_attribute *attr, -@@ -3188,11 +3832,10 @@ mptscsih_board_name_show(struct device * - - return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name); - } --static DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL); - - static ssize_t --mptscsih_board_assembly_show(struct device *dev, -- struct device_attribute *attr, char *buf) -+mptscsih_board_assembly_show(struct device *dev, struct device_attribute *attr, -+ char *buf) - { - struct Scsi_Host *host = class_to_shost(dev); - MPT_SCSI_HOST *hd = shost_priv(host); -@@ -3200,8 +3843,6 @@ mptscsih_board_assembly_show(struct devi - - return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly); - } --static DEVICE_ATTR(board_assembly, S_IRUGO, -- mptscsih_board_assembly_show, NULL); - - static ssize_t - mptscsih_board_tracer_show(struct device *dev, struct device_attribute *attr, -@@ -3213,8 +3854,6 @@ mptscsih_board_tracer_show(struct device - - return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer); - } --static DEVICE_ATTR(board_tracer, S_IRUGO, -- mptscsih_board_tracer_show, NULL); - - static ssize_t - mptscsih_io_delay_show(struct device *dev, struct device_attribute *attr, -@@ -3226,8 +3865,6 @@ mptscsih_io_delay_show(struct device *de - - return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); - } --static DEVICE_ATTR(io_delay, S_IRUGO, -- mptscsih_io_delay_show, NULL); - - static ssize_t - mptscsih_device_delay_show(struct device *dev, struct device_attribute *attr, -@@ -3239,8 +3876,6 @@ mptscsih_device_delay_show(struct device - - return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); - } --static DEVICE_ATTR(device_delay, S_IRUGO, -- mptscsih_device_delay_show, NULL); - - static ssize_t - mptscsih_debug_level_show(struct device *dev, struct device_attribute *attr, -@@ -3252,6 +3887,7 @@ mptscsih_debug_level_show(struct device - - return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->debug_level); - } -+ - static ssize_t - mptscsih_debug_level_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -@@ -3265,14 +3901,78 @@ mptscsih_debug_level_store(struct device - return -EINVAL; - - ioc->debug_level = val; -- printk(MYIOC_s_INFO_FMT "debug_level=%08xh\n", -- ioc->name, ioc->debug_level); -+ printk(MYIOC_s_INFO_FMT "debug_level=%08xh\n", ioc->name, -+ ioc->debug_level); -+ return strlen(buf); -+} -+ -+static ssize_t -+mptscsih_disable_hotplug_remove_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ -+ return snprintf(buf, PAGE_SIZE, "%02xh\n", ioc->disable_hotplug_remove); -+} -+static ssize_t -+mptscsih_disable_hotplug_remove_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ MPT_SCSI_HOST *hd = shost_priv(host); -+ MPT_ADAPTER *ioc = hd->ioc; -+ int val = 0; -+ -+ if (sscanf(buf, "%x", &val) != 1) -+ return -EINVAL; -+ -+ ioc->disable_hotplug_remove = val; -+ if (ioc->disable_hotplug_remove) -+ printk(MYIOC_s_INFO_FMT "disabling hotplug remove\n", -+ ioc->name); -+ else -+ printk(MYIOC_s_INFO_FMT "eanbling hotplug remove\n", ioc->name); - return strlen(buf); - } -+ -+static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, -+ mptscsih_fault_show, mptscsih_fault_store); -+static DEVICE_ATTR(ring_buffer_size, S_IRUGO, -+ mptscsih_ring_buffer_size_show, NULL); -+static DEVICE_ATTR(ring_buffer, S_IRUGO | S_IWUSR, -+ mptscsih_ring_buffer_show, mptscsih_ring_buffer_store); -+static DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL); -+static DEVICE_ATTR(version_bios, S_IRUGO, -+ mptscsih_version_bios_show, NULL); -+static DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL); -+static DEVICE_ATTR(version_product, S_IRUGO, -+ mptscsih_version_product_show, NULL); -+static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, -+ mptscsih_version_nvdata_persistent_show, NULL); -+static DEVICE_ATTR(version_nvdata_default, S_IRUGO, -+ mptscsih_version_nvdata_default_show, NULL); -+static DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL); -+static DEVICE_ATTR(board_assembly, S_IRUGO, -+ mptscsih_board_assembly_show, NULL); -+static DEVICE_ATTR(board_tracer, S_IRUGO, -+ mptscsih_board_tracer_show, NULL); -+static DEVICE_ATTR(io_delay, S_IRUGO, -+ mptscsih_io_delay_show, NULL); -+static DEVICE_ATTR(device_delay, S_IRUGO, -+ mptscsih_device_delay_show, NULL); - static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR, -- mptscsih_debug_level_show, mptscsih_debug_level_store); -+ mptscsih_debug_level_show, mptscsih_debug_level_store); -+static DEVICE_ATTR(disable_hotplug_remove, S_IRUGO | S_IWUSR, -+ mptscsih_disable_hotplug_remove_show, mptscsih_disable_hotplug_remove_store); - - struct device_attribute *mptscsih_host_attrs[] = { -+ &dev_attr_fault, -+ &dev_attr_ring_buffer_size, -+ &dev_attr_ring_buffer, - &dev_attr_version_fw, - &dev_attr_version_bios, - &dev_attr_version_mpi, -@@ -3285,6 +3985,7 @@ struct device_attribute *mptscsih_host_a - &dev_attr_io_delay, - &dev_attr_device_delay, - &dev_attr_debug_level, -+ &dev_attr_disable_hotplug_remove, - NULL, - }; - -@@ -3312,5 +4013,9 @@ EXPORT_SYMBOL(mptscsih_scandv_complete); - EXPORT_SYMBOL(mptscsih_event_process); - EXPORT_SYMBOL(mptscsih_ioc_reset); - EXPORT_SYMBOL(mptscsih_change_queue_depth); -- -+EXPORT_SYMBOL(mptscsih_IssueTaskMgmt); -+EXPORT_SYMBOL(mptscsih_do_cmd); -+EXPORT_SYMBOL(mptscsih_quiesce_raid); -+EXPORT_SYMBOL(mptscsih_get_scsi_lookup); -+EXPORT_SYMBOL(mptscsih_taskmgmt_response_code); - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ ---- a/drivers/message/fusion/mptscsih.h -+++ b/drivers/message/fusion/mptscsih.h -@@ -85,12 +85,14 @@ - #define MPTSCSIH_DOMAIN_VALIDATION 1 - #define MPTSCSIH_MAX_WIDTH 1 - #define MPTSCSIH_MIN_SYNC 0x08 -+#define MPTSCSIH_QAS 1 - #define MPTSCSIH_SAF_TE 0 - #define MPTSCSIH_PT_CLEAR 0 - -+#define TRANSPORT_LAYER_RETRIES 0xC2 - #endif - -- -+#define mpt_sg_next(sg) sg_next(sg) - typedef struct _internal_cmd { - char *data; /* data pointer */ - dma_addr_t data_dma; /* data dma address */ -@@ -114,9 +116,7 @@ extern int mptscsih_resume(struct pci_de - extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); - extern const char * mptscsih_info(struct Scsi_Host *SChost); - extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); --extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, -- u8 id, int lun, int ctx2abort, ulong timeout); --extern void mptscsih_slave_destroy(struct scsi_device *device); -+extern void mptscsih_slave_destroy(struct scsi_device *sdev); - extern int mptscsih_slave_configure(struct scsi_device *device); - extern int mptscsih_abort(struct scsi_cmnd * SCpnt); - extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt); -@@ -130,8 +130,11 @@ extern int mptscsih_event_process(MPT_AD - extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); - extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason); -+extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); - extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); - extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); -+extern int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); - extern struct device_attribute *mptscsih_host_attrs[]; --extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); -+extern int mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id); -+extern struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); - extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); ---- a/drivers/message/fusion/mptspi.c -+++ b/drivers/message/fusion/mptspi.c -@@ -43,7 +43,7 @@ - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -- -+#include - #include - #include - #include -@@ -53,8 +53,10 @@ - #include /* for mdelay */ - #include /* needed for in_interrupt() proto */ - #include /* notifier code */ -+#include - #include - #include -+#include - - #include - #include -@@ -83,6 +85,10 @@ static int mpt_saf_te = MPTSCSIH_SAF_TE; - module_param(mpt_saf_te, int, 0); - MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)"); - -+static int mpt_qas = MPTSCSIH_QAS; -+module_param(mpt_qas, int, 1); -+MODULE_PARM_DESC(mpt_qas, " Quick Arbitration and Selection (QAS) enabled=1, disabled=0 (default=MPTSCSIH_QAS=1)"); -+ - static void mptspi_write_offset(struct scsi_target *, int); - static void mptspi_write_width(struct scsi_target *, int); - static int mptspi_write_spi_device_pg1(struct scsi_target *, -@@ -95,12 +101,12 @@ static u8 mptspiTaskCtx = MPT_MAX_PROTOC - static u8 mptspiInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ - - /** -- * mptspi_setTargetNegoParms - Update the target negotiation parameters -+ * mptspi_setTargetNegoParms - Update the target negotiation parameters - * @hd: Pointer to a SCSI Host Structure - * @target: per target private data - * @sdev: SCSI device - * -- * Update the target negotiation parameters based on the the Inquiry -+ * Update the target negotiation parameters based on the the Inquiry - * data, adapter capabilities, and NVRAM settings. - **/ - static void -@@ -131,7 +137,7 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST - if (scsi_device_sync(sdev)) { - factor = pspi_data->minSyncFactor; - if (!scsi_device_dt(sdev)) -- factor = MPT_ULTRA2; -+ factor = MPT_ULTRA2; - else { - if (!scsi_device_ius(sdev) && - !scsi_device_qas(sdev)) -@@ -209,6 +215,10 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST - target->maxOffset = offset; - target->maxWidth = width; - -+ spi_min_period(scsi_target(sdev)) = factor; -+ spi_max_offset(scsi_target(sdev)) = offset; -+ spi_max_width(scsi_target(sdev)) = width; -+ - target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO; - - /* Disable unused features. -@@ -230,7 +240,7 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST - */ - - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "Disabling QAS due to noQas=%02x on id=%d!\n", ioc->name, noQas, id)); -+ "Disabling QAS due to noQas=%02x on id=%d!\n", ioc->name, noQas, id)); - } - } - -@@ -262,7 +272,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, - */ - if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT -- "writeIOCPage4 : no msg frames!\n",ioc->name)); -+ "writeIOCPage4 : no msg frames!\n", ioc->name)); - return -EAGAIN; - } - -@@ -304,7 +314,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, - - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", -- ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, id, channel)); -+ ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, id, channel)); - - mpt_put_msg_frame(ioc->DoneCtx, ioc, mf); - -@@ -371,7 +381,7 @@ mptspi_initTarget(MPT_SCSI_HOST *hd, Vir - * non-zero = true - * zero = false - * -- */ -+ **/ - static int - mptspi_is_raid(struct _MPT_SCSI_HOST *hd, u32 id) - { -@@ -399,12 +409,11 @@ static int mptspi_target_alloc(struct sc - struct Scsi_Host *shost = dev_to_shost(&starget->dev); - struct _MPT_SCSI_HOST *hd = shost_priv(shost); - VirtTarget *vtarget; -- MPT_ADAPTER *ioc; -+ MPT_ADAPTER *ioc = hd->ioc; - - if (hd == NULL) - return -ENODEV; - -- ioc = hd->ioc; - vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); - if (!vtarget) - return -ENOMEM; -@@ -471,9 +480,12 @@ mptspi_target_destroy(struct scsi_target - static void - mptspi_print_write_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii) - { -- ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Requested = 0x%08x" -+ if (!(hd->ioc->debug_level & MPT_DEBUG_DV)) -+ return; -+ -+ starget_printk(KERN_DEBUG, starget, MYIOC_s_FMT "Wrote = 0x%08x" - " ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n", -- hd->ioc->name, starget->id, ii, -+ hd->ioc->name, ii, - ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "", - ((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF), - ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "", -@@ -483,7 +495,7 @@ mptspi_print_write_nego(struct _MPT_SCSI - ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "", - ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "", - ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "", -- ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": "")); -+ ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""); - } - - /** -@@ -496,9 +508,12 @@ mptspi_print_write_nego(struct _MPT_SCSI - static void - mptspi_print_read_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii) - { -- ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Read = 0x%08x" -+ if (!(hd->ioc->debug_level & MPT_DEBUG_DV)) -+ return; -+ -+ starget_printk(KERN_DEBUG, starget, MYIOC_s_FMT "Read = 0x%08x" - " ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n", -- hd->ioc->name, starget->id, ii, -+ hd->ioc->name, ii, - ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "", - ((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF), - ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "", -@@ -508,7 +523,7 @@ mptspi_print_read_nego(struct _MPT_SCSI_ - ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "", - ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "", - ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "", -- ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": "")); -+ ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""); - } - - static int mptspi_read_spi_device_pg0(struct scsi_target *starget, -@@ -557,9 +572,11 @@ static int mptspi_read_spi_device_pg0(st - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - cfg.dir = 0; - cfg.pageAddr = starget->id; -+ cfg.timeout = 60; - - if (mpt_config(ioc, &cfg)) { -- starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name); -+ starget_printk(KERN_ERR, starget, -+ MYIOC_s_FMT "mpt_config failed\n", ioc->name); - goto out_free; - } - err = 0; -@@ -614,76 +631,11 @@ static void mptspi_read_parameters(struc - spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; - } - --int --mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) --{ -- MPT_ADAPTER *ioc = hd->ioc; -- MpiRaidActionRequest_t *pReq; -- MPT_FRAME_HDR *mf; -- int ret; -- unsigned long timeleft; -- -- mutex_lock(&ioc->internal_cmds.mutex); -- -- /* Get and Populate a free Frame -- */ -- if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { -- dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT -- "%s: no msg frames!\n", ioc->name, __func__)); -- ret = -EAGAIN; -- goto out; -- } -- pReq = (MpiRaidActionRequest_t *)mf; -- if (quiesce) -- pReq->Action = MPI_RAID_ACTION_QUIESCE_PHYS_IO; -- else -- pReq->Action = MPI_RAID_ACTION_ENABLE_PHYS_IO; -- pReq->Reserved1 = 0; -- pReq->ChainOffset = 0; -- pReq->Function = MPI_FUNCTION_RAID_ACTION; -- pReq->VolumeID = id; -- pReq->VolumeBus = channel; -- pReq->PhysDiskNum = 0; -- pReq->MsgFlags = 0; -- pReq->Reserved2 = 0; -- pReq->ActionDataWord = 0; /* Reserved for this action */ -- -- ioc->add_sge((char *)&pReq->ActionDataSGE, -- MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); -- -- ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", -- ioc->name, pReq->Action, channel, id)); -- -- INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status) -- mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); -- timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ); -- if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -- ret = -ETIME; -- dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n", -- ioc->name, __func__)); -- if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) -- goto out; -- if (!timeleft) { -- printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", -- ioc->name, __func__); -- mpt_HardResetHandler(ioc, CAN_SLEEP); -- mpt_free_msg_frame(ioc, mf); -- } -- goto out; -- } -- -- ret = ioc->internal_cmds.completion_code; -- -- out: -- CLEAR_MGMT_STATUS(ioc->internal_cmds.status) -- mutex_unlock(&ioc->internal_cmds.mutex); -- return ret; --} -- - static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, - struct scsi_device *sdev) - { - VirtTarget *vtarget = scsi_target(sdev)->hostdata; -+ struct scsi_target *starget = scsi_target(sdev); - MPT_ADAPTER *ioc = hd->ioc; - - /* no DV on RAID devices */ -@@ -691,11 +643,20 @@ static void mptspi_dv_device(struct _MPT - mptspi_is_raid(hd, sdev->id)) - return; - -+ if (ioc->debug_level & MPT_DEBUG_DV) -+ starget_printk(KERN_DEBUG, starget, MYIOC_s_FMT -+ "sdtr=%d, wdtr=%d, ppr=%d, min_period=0x%02x, " -+ "max_offset=0x%02x, max_width=%d, nego_flags=0x%02x, " -+ "tflags=0x%02x\n", ioc->name, sdev->sdtr, sdev->wdtr, -+ sdev->ppr, spi_min_period(starget), -+ spi_max_offset(starget), spi_max_width(starget), -+ vtarget->negoFlags, vtarget->tflags); -+ - /* If this is a piece of a RAID, then quiesce first */ - if (sdev->channel == 1 && - mptscsih_quiesce_raid(hd, 1, vtarget->channel, vtarget->id) < 0) { -- starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT -- "Integrated RAID quiesce failed\n", ioc->name); -+ starget_printk(KERN_ERR, scsi_target(sdev), -+ MYIOC_s_FMT "Integrated RAID quiesce failed\n", ioc->name); - return; - } - -@@ -705,8 +666,8 @@ static void mptspi_dv_device(struct _MPT - - if (sdev->channel == 1 && - mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0) -- starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT -- "Integrated RAID resume failed\n", ioc->name); -+ starget_printk(KERN_ERR, scsi_target(sdev), -+ MYIOC_s_FMT "Integrated RAID resume failed\n", ioc->name); - - mptspi_read_parameters(sdev->sdev_target); - spi_display_xfer_agreement(sdev->sdev_target); -@@ -728,7 +689,7 @@ static int mptspi_slave_alloc(struct scs - vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); - if (!vdevice) { - printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", -- ioc->name, sizeof(VirtDevice)); -+ ioc->name, sizeof(VirtDevice)); - return -ENOMEM; - } - -@@ -750,21 +711,13 @@ static int mptspi_slave_configure(struct - { - struct _MPT_SCSI_HOST *hd = shost_priv(sdev->host); - VirtTarget *vtarget = scsi_target(sdev)->hostdata; -- int ret; -+ int ret; - - mptspi_initTarget(hd, vtarget, sdev); -- - ret = mptscsih_slave_configure(sdev); -- - if (ret) - return ret; - -- ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d min_period=0x%02x" -- " max_offset=0x%02x max_width=%d\n", hd->ioc->name, -- sdev->id, spi_min_period(scsi_target(sdev)), -- spi_max_offset(scsi_target(sdev)), -- spi_max_width(scsi_target(sdev)))); -- - if ((sdev->channel == 1 || - !(mptspi_is_raid(hd, sdev->id))) && - !spi_initial_dv(sdev->sdev_target)) -@@ -869,8 +822,8 @@ static int mptspi_write_spi_device_pg1(s - - pg1 = dma_alloc_coherent(&ioc->pcidev->dev, size, &pg1_dma, GFP_KERNEL); - if (pg1 == NULL) { -- starget_printk(KERN_ERR, starget, MYIOC_s_FMT -- "dma_alloc_coherent for parameters failed\n", ioc->name); -+ starget_printk(KERN_ERR, starget, -+ MYIOC_s_FMT "dma_alloc_coherent for parameters failed\n", ioc->name); - return -EINVAL; - } - -@@ -899,8 +852,8 @@ static int mptspi_write_spi_device_pg1(s - mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters)); - - if (mpt_config(ioc, &cfg)) { -- starget_printk(KERN_ERR, starget, MYIOC_s_FMT -- "mpt_config failed\n", ioc->name); -+ starget_printk(KERN_ERR, starget, -+ MYIOC_s_FMT "mpt_config failed\n", ioc->name); - goto out_free; - } - err = 0; -@@ -975,14 +928,15 @@ static void mptspi_write_dt(struct scsi_ - if (spi_period(starget) == -1) - mptspi_read_parameters(starget); - -- if (!dt && spi_period(starget) < 10) -- spi_period(starget) = 10; -+ if (!dt) { -+ spi_qas(starget) = 0; -+ spi_iu(starget) = 0; -+ } - - spi_dt(starget) = dt; - - nego = mptspi_getRP(starget); - -- - pg1.RequestedParameters = cpu_to_le32(nego); - pg1.Reserved = 0; - pg1.Configuration = 0; -@@ -998,9 +952,6 @@ static void mptspi_write_iu(struct scsi_ - if (spi_period(starget) == -1) - mptspi_read_parameters(starget); - -- if (!iu && spi_period(starget) < 9) -- spi_period(starget) = 9; -- - spi_iu(starget) = iu; - - nego = mptspi_getRP(starget); -@@ -1042,9 +993,11 @@ static void mptspi_write_qas(struct scsi - struct _MPT_SCSI_HOST *hd = shost_priv(shost); - VirtTarget *vtarget = starget->hostdata; - u32 nego; -+ MPT_ADAPTER *ioc = hd->ioc; - -- if ((vtarget->negoFlags & MPT_TARGET_NO_NEGO_QAS) || -- hd->ioc->spi_data.noQas) -+ if (!mpt_qas || -+ (vtarget->negoFlags & MPT_TARGET_NO_NEGO_QAS) || -+ ioc->spi_data.noQas) - spi_qas(starget) = 0; - else - spi_qas(starget) = qas; -@@ -1065,8 +1018,8 @@ static void mptspi_write_width(struct sc - - if (!width) { - spi_dt(starget) = 0; -- if (spi_period(starget) < 10) -- spi_period(starget) = 10; -+ spi_qas(starget) = 0; -+ spi_iu(starget) = 0; - } - - spi_width(starget) = width; -@@ -1086,10 +1039,17 @@ struct work_queue_wrapper { - int disk; - }; - --static void mpt_work_wrapper(struct work_struct *work) -+static void -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) -+mpt_work_wrapper(struct work_struct *work) - { - struct work_queue_wrapper *wqw = - container_of(work, struct work_queue_wrapper, work); -+#else -+mpt_work_wrapper(void *data) -+{ -+ struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; -+#endif - struct _MPT_SCSI_HOST *hd = wqw->hd; - MPT_ADAPTER *ioc = hd->ioc; - struct Scsi_Host *shost = ioc->sh; -@@ -1117,12 +1077,12 @@ static void mpt_work_wrapper(struct work - if(vtarget->id != disk) - continue; - -- starget_printk(KERN_INFO, vtarget->starget, MYIOC_s_FMT -- "Integrated RAID requests DV of new device\n", ioc->name); -+ starget_printk(KERN_INFO, vtarget->starget, -+ MYIOC_s_FMT "Integrated RAID requests DV of new device\n", ioc->name); - mptspi_dv_device(hd, sdev); - } -- shost_printk(KERN_INFO, shost, MYIOC_s_FMT -- "Integrated RAID detects new device %d\n", ioc->name, disk); -+ shost_printk(KERN_INFO, shost, -+ MYIOC_s_FMT "Integrated RAID detects new device %d\n", ioc->name, disk); - scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1); - } - -@@ -1133,12 +1093,16 @@ static void mpt_dv_raid(struct _MPT_SCSI - MPT_ADAPTER *ioc = hd->ioc; - - if (!wqw) { -- shost_printk(KERN_ERR, ioc->sh, MYIOC_s_FMT -- "Failed to act on RAID event for physical disk %d\n", -+ shost_printk(KERN_ERR, ioc->sh, -+ MYIOC_s_FMT "Failed to act on RAID event for physical disk %d\n", - ioc->name, disk); - return; - } -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) - INIT_WORK(&wqw->work, mpt_work_wrapper); -+#else -+ INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); -+#endif - wqw->hd = hd; - wqw->disk = disk; - -@@ -1151,6 +1115,9 @@ mptspi_event_process(MPT_ADAPTER *ioc, E - u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; - struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); - -+ if (ioc->bus_type != SPI) -+ return 0; -+ - if (hd && event == MPI_EVENT_INTEGRATED_RAID) { - int reason - = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16; -@@ -1225,14 +1192,20 @@ static struct pci_device_id mptspi_pci_t - MODULE_DEVICE_TABLE(pci, mptspi_pci_table); - - --/* -+/** - * renegotiate for a given target -- */ -+ **/ - static void -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) - mptspi_dv_renegotiate_work(struct work_struct *work) - { - struct work_queue_wrapper *wqw = - container_of(work, struct work_queue_wrapper, work); -+#else -+mptspi_dv_renegotiate_work(void *data) -+{ -+ struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; -+#endif - struct _MPT_SCSI_HOST *hd = wqw->hd; - struct scsi_device *sdev; - struct scsi_target *starget; -@@ -1267,38 +1240,43 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_H - if (!wqw) - return; - -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) - INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work); -+#else -+ INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); -+#endif - wqw->hd = hd; - - schedule_work(&wqw->work); - } - --/* -+/** - * spi module reset handler -- */ -+ **/ - static int - mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) - { -+ struct _MPT_SCSI_HOST *hd = NULL; - int rc; - - rc = mptscsih_ioc_reset(ioc, reset_phase); -+ if ((ioc->bus_type != SPI) || (!rc)) -+ goto out; - -- /* only try to do a renegotiation if we're properly set up -- * if we get an ioc fault on bringup, ioc->sh will be NULL */ -- if (reset_phase == MPT_IOC_POST_RESET && -- ioc->sh) { -- struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); -+ hd = shost_priv(ioc->sh); -+ if (!hd->ioc) -+ goto out; - -+ if (ioc->active && reset_phase == MPT_IOC_POST_RESET) - mptspi_dv_renegotiate(hd); -- } -- -+ out: - return rc; - } - - #ifdef CONFIG_PM --/* -+/** - * spi module resume handler -- */ -+ **/ - static int - mptspi_resume(struct pci_dev *pdev) - { -@@ -1315,13 +1293,13 @@ mptspi_resume(struct pci_dev *pdev) - - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ --/* -+/** - * mptspi_probe - Installs scsi devices per bus. - * @pdev: Pointer to pci_dev structure - * - * Returns 0 for success, non-zero for failure. - * -- */ -+ **/ - static int - mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) - { -@@ -1446,6 +1424,7 @@ mptspi_probe(struct pci_dev *pdev, const - (ioc->req_sz - 64) / ioc->SGE_size; - } - -+ - if (numSGE < sh->sg_tablesize) { - /* Reset this value */ - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT -@@ -1454,11 +1433,11 @@ mptspi_probe(struct pci_dev *pdev, const - sh->sg_tablesize = numSGE; - } - -- spin_unlock_irqrestore(&ioc->FreeQlock, flags); -- - hd = shost_priv(sh); - hd->ioc = ioc; - -+ spin_unlock_irqrestore(&ioc->FreeQlock, flags); -+ - /* SCSI needs scsi_cmnd lookup table! - * (with size equal to req_depth*PtrSz!) - */ -@@ -1472,12 +1451,11 @@ mptspi_probe(struct pci_dev *pdev, const - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", - ioc->name, ioc->ScsiLookup)); - -+ ioc->sdev_queue_depth = MPT_SCSI_CMD_PER_DEV_HIGH; - ioc->spi_data.Saf_Te = mpt_saf_te; - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT -- "saf_te %x\n", -- ioc->name, -- mpt_saf_te)); -- ioc->spi_data.noQas = 0; -+ "saf_te %x\n", ioc->name, mpt_saf_te)); -+ ioc->spi_data.noQas = mpt_qas ? 0 : MPT_TARGET_NO_NEGO_QAS; - - hd->last_queue_full = 0; - hd->spi_pending = 0; -@@ -1528,7 +1506,7 @@ static struct pci_driver mptspi_driver = - * mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. - * - * Returns 0 for success, non-zero for failure. -- */ -+ **/ - static int __init - mptspi_init(void) - { -@@ -1558,7 +1536,8 @@ mptspi_init(void) - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ - /** - * mptspi_exit - Unregisters MPT adapter(s) -- */ -+ * -+ **/ - static void __exit - mptspi_exit(void) - { -@@ -1566,7 +1545,6 @@ mptspi_exit(void) - - mpt_reset_deregister(mptspiDoneCtx); - mpt_event_deregister(mptspiDoneCtx); -- - mpt_deregister(mptspiInternalCtx); - mpt_deregister(mptspiTaskCtx); - mpt_deregister(mptspiDoneCtx); ---- /dev/null -+++ b/drivers/message/fusion/rejected_ioctls/diag_buffer.c -@@ -0,0 +1,671 @@ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+/* REGISTER DIAG BUFFER Routine. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -EBUSY if previous command timout and IOC reset is not complete. -+ * -ENODEV if no such device/adapter -+ * -ETIME if timer expires -+ * -ENOMEM if memory allocation error -+ */ -+static int -+mptctl_register_diag_buffer (unsigned long arg) -+{ -+ mpt_diag_register_t __user *uarg = (void __user *) arg; -+ mpt_diag_register_t karg; -+ MPT_ADAPTER *ioc; -+ int iocnum, rc, ii; -+ void * request_data; -+ dma_addr_t request_data_dma; -+ u32 request_data_sz; -+ MPT_FRAME_HDR *mf; -+ DiagBufferPostRequest_t *diag_buffer_post_request; -+ DiagBufferPostReply_t *diag_buffer_post_reply; -+ u32 tmp; -+ u8 buffer_type; -+ unsigned long timeleft; -+ -+ rc = 0; -+ if (copy_from_user(&karg, uarg, sizeof(mpt_diag_register_t))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in mpt_diag_register_t struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || -+ (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s enter.\n", ioc->name, -+ __FUNCTION__)); -+ buffer_type = karg.data.BufferType; -+ if (!(ioc->facts.IOCCapabilities & MPT_DIAG_CAPABILITY(buffer_type))) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have Capability for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ return -ENODEV; -+ } -+ -+ if (ioc->DiagBuffer_Status[buffer_type] & -+ MPT_DIAG_BUFFER_IS_REGISTERED) { -+ printk(MYIOC_s_DEBUG_FMT "%s: already has a Registered " -+ "buffer for buffer_type=%x\n", ioc->name, __FUNCTION__, -+ buffer_type); -+ return -EFAULT; -+ } -+ -+ /* Get a free request frame and save the message context. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) -+ return -EAGAIN; -+ -+ request_data = ioc->DiagBuffer[buffer_type]; -+ request_data_sz = karg.data.RequestedBufferSize; -+ -+ if (request_data) { -+ request_data_dma = ioc->DiagBuffer_dma[buffer_type]; -+ if (request_data_sz != ioc->DiagBuffer_sz[buffer_type]) { -+ pci_free_consistent(ioc->pcidev, -+ ioc->DiagBuffer_sz[buffer_type], -+ request_data, request_data_dma); -+ request_data = NULL; -+ } -+ } -+ -+ if (request_data == NULL) { -+ ioc->DiagBuffer_sz[buffer_type] = 0; -+ ioc->DiagBuffer_dma[buffer_type] = 0; -+ ioc->DataSize[buffer_type] = 0; -+ request_data = pci_alloc_consistent( -+ ioc->pcidev, request_data_sz, &request_data_dma); -+ if (request_data == NULL) { -+ printk(MYIOC_s_DEBUG_FMT "%s: pci_alloc_consistent" -+ " FAILED, (request_sz=%d)\n", ioc->name, -+ __FUNCTION__, request_data_sz); -+ mpt_free_msg_frame(ioc, mf); -+ return -EAGAIN; -+ } -+ ioc->DiagBuffer[buffer_type] = request_data; -+ ioc->DiagBuffer_sz[buffer_type] = request_data_sz; -+ ioc->DiagBuffer_dma[buffer_type] = request_data_dma; -+ } -+ -+ ioc->DiagBuffer_Status[buffer_type] = 0; -+ diag_buffer_post_request = (DiagBufferPostRequest_t *)mf; -+ diag_buffer_post_request->Function = MPI_FUNCTION_DIAG_BUFFER_POST; -+ diag_buffer_post_request->ChainOffset = 0; -+ diag_buffer_post_request->BufferType = karg.data.BufferType; -+ diag_buffer_post_request->TraceLevel = ioc->TraceLevel[buffer_type] = -+ karg.data.TraceLevel; -+ diag_buffer_post_request->MsgFlags = 0; -+ diag_buffer_post_request->Reserved1 = 0; -+ diag_buffer_post_request->Reserved2 = 0; -+ diag_buffer_post_request->Reserved3 = 0; -+ diag_buffer_post_request->BufferAddress.High = 0; -+ if (buffer_type == MPI_DIAG_BUF_TYPE_EXTENDED) -+ ioc->ExtendedType[buffer_type] = karg.data.ExtendedType; -+ else -+ ioc->ExtendedType[buffer_type] = 0; -+ diag_buffer_post_request->ExtendedType = -+ cpu_to_le32(ioc->ExtendedType[buffer_type]); -+ ioc->UniqueId[buffer_type] = karg.data.UniqueId; -+ diag_buffer_post_request->BufferLength = cpu_to_le32(request_data_sz); -+ for (ii = 0; ii < 4; ii++) { -+ ioc->ProductSpecific[buffer_type][ii] = -+ karg.data.ProductSpecific[ii]; -+ diag_buffer_post_request->ProductSpecific[ii] = -+ cpu_to_le32(ioc->ProductSpecific[buffer_type][ii]); -+ } -+ -+ tmp = request_data_dma & 0xFFFFFFFF; -+ diag_buffer_post_request->BufferAddress.Low = cpu_to_le32(tmp); -+ if (ioc->sg_addr_size == sizeof(u64)) { -+ tmp = (u32)((u64)request_data_dma >> 32); -+ diag_buffer_post_request->BufferAddress.High = cpu_to_le32(tmp); -+ } -+ -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, -+ diag_buffer_post_request->MsgContext); -+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) -+ mpt_put_msg_frame(mptctl_id, ioc, mf); -+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, -+ MPT_IOCTL_DEFAULT_TIMEOUT*HZ); -+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ rc = -ETIME; -+ printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, -+ __FUNCTION__); -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { -+ mpt_free_msg_frame(ioc, mf); -+ goto out; -+ } -+ if (!timeleft) -+ mptctl_timeout_expired(ioc, mf); -+ goto out; -+ } -+ -+ /* process the completed Reply Message Frame */ -+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) == 0) { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: status=%x\n", -+ ioc->name, __FUNCTION__, ioc->ioctl_cmds.status)); -+ rc = -EFAULT; -+ goto out; -+ } -+ -+ diag_buffer_post_reply = (DiagBufferPostReply_t *)ioc->ioctl_cmds.reply; -+ if (le16_to_cpu(diag_buffer_post_reply->IOCStatus) == -+ MPI_IOCSTATUS_SUCCESS) { -+ if (diag_buffer_post_reply->MsgLength > 5) -+ ioc->DataSize[buffer_type] = -+ le32_to_cpu(diag_buffer_post_reply->TransferLength); -+ ioc->DiagBuffer_Status[buffer_type] |= -+ MPT_DIAG_BUFFER_IS_REGISTERED; -+ } else { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: IOCStatus=%x " -+ "IOCLogInfo=%x\n", ioc->name, __FUNCTION__, -+ diag_buffer_post_reply->IOCStatus, -+ diag_buffer_post_reply->IOCLogInfo)); -+ rc = -EFAULT; -+ } -+ -+ out: -+ -+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); -+ if (rc) { -+ pci_free_consistent(ioc->pcidev, request_data_sz, -+ request_data, request_data_dma); -+ ioc->DiagBuffer[buffer_type] = NULL; -+ ioc->DiagBuffer_sz[buffer_type] = 0; -+ ioc->DiagBuffer_dma[buffer_type] = 0; -+ } -+ return rc; -+} -+ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+/* RELEASE DIAG BUFFER Routine. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -EBUSY if previous command timout and IOC reset is not complete. -+ * -ENODEV if no such device/adapter -+ * -ETIME if timer expires -+ * -ENOMEM if memory allocation error -+ */ -+static int -+mptctl_release_diag_buffer (unsigned long arg) -+{ -+ mpt_diag_release_t __user *uarg = (void __user *) arg; -+ mpt_diag_release_t karg; -+ MPT_ADAPTER *ioc; -+ void * request_data; -+ int iocnum, rc; -+ MPT_FRAME_HDR *mf; -+ DiagReleaseRequest_t *diag_release; -+ DiagReleaseReply_t *diag_release_reply; -+ u8 buffer_type; -+ unsigned long timeleft; -+ -+ rc = 0; -+ if (copy_from_user(&karg, uarg, sizeof(mpt_diag_release_t))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in mpt_diag_release_t struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || -+ (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s enter.\n", ioc->name, -+ __FUNCTION__)); -+ buffer_type = karg.data.UniqueId & 0x000000ff; -+ if (!(ioc->facts.IOCCapabilities & MPT_DIAG_CAPABILITY(buffer_type))) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have Capability for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ return -ENODEV; -+ } -+ -+ if ((ioc->DiagBuffer_Status[buffer_type] & -+ MPT_DIAG_BUFFER_IS_REGISTERED) == 0 ) { -+ printk(MYIOC_s_DEBUG_FMT "%s: buffer_type=%x is not " -+ "registered\n", ioc->name, __FUNCTION__, buffer_type); -+ return -EFAULT; -+ } -+ -+ if (karg.data.UniqueId != ioc->UniqueId[buffer_type]) { -+ printk(MYIOC_s_DEBUG_FMT "%s: unique_id=%x is not registered\n", -+ ioc->name, __FUNCTION__, karg.data.UniqueId); -+ return -EFAULT; -+ } -+ -+ if (ioc->DiagBuffer_Status[buffer_type] & MPT_DIAG_BUFFER_IS_RELEASED) { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: buffer_type=%x " -+ "is already released\n", ioc->name, __FUNCTION__, -+ buffer_type)); -+ return rc; -+ } -+ -+ request_data = ioc->DiagBuffer[buffer_type]; -+ -+ if (request_data == NULL) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have buffer for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ return -ENODEV; -+ } -+ -+ /* Get a free request frame and save the message context. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) -+ return -EAGAIN; -+ -+ diag_release = (DiagReleaseRequest_t *)mf; -+ diag_release->Function = MPI_FUNCTION_DIAG_RELEASE; -+ diag_release->BufferType = buffer_type; -+ diag_release->ChainOffset = 0; -+ diag_release->Reserved1 = 0; -+ diag_release->Reserved2 = 0; -+ diag_release->Reserved3 = 0; -+ diag_release->MsgFlags = 0; -+ -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, -+ diag_release->MsgContext); -+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) -+ mpt_put_msg_frame(mptctl_id, ioc, mf); -+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, -+ MPT_IOCTL_DEFAULT_TIMEOUT*HZ); -+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ rc = -ETIME; -+ printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, -+ __FUNCTION__); -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { -+ mpt_free_msg_frame(ioc, mf); -+ goto out; -+ } -+ if (!timeleft) -+ mptctl_timeout_expired(ioc, mf); -+ goto out; -+ } -+ -+ /* process the completed Reply Message Frame */ -+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) == 0) { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: status=%x\n", -+ ioc->name, __FUNCTION__, ioc->ioctl_cmds.status)); -+ rc = -EFAULT; -+ goto out; -+ } -+ -+ diag_release_reply = (DiagReleaseReply_t *)ioc->ioctl_cmds.reply; -+ if (le16_to_cpu(diag_release_reply->IOCStatus) != -+ MPI_IOCSTATUS_SUCCESS) { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: IOCStatus=%x " -+ "IOCLogInfo=%x\n", -+ ioc->name, __FUNCTION__, diag_release_reply->IOCStatus, -+ diag_release_reply->IOCLogInfo)); -+ rc = -EFAULT; -+ } else -+ ioc->DiagBuffer_Status[buffer_type] |= -+ MPT_DIAG_BUFFER_IS_RELEASED; -+ -+ out: -+ -+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); -+ return rc; -+} -+ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+/* UNREGISTER DIAG BUFFER Routine. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -EBUSY if previous command timout and IOC reset is not complete. -+ * -ENODEV if no such device/adapter -+ * -ETIME if timer expires -+ * -ENOMEM if memory allocation error -+ */ -+static int -+mptctl_unregister_diag_buffer (unsigned long arg) -+{ -+ mpt_diag_unregister_t __user *uarg = (void __user *) arg; -+ mpt_diag_unregister_t karg; -+ MPT_ADAPTER *ioc; -+ int iocnum; -+ void * request_data; -+ dma_addr_t request_data_dma; -+ u32 request_data_sz; -+ u8 buffer_type; -+ -+ if (copy_from_user(&karg, uarg, sizeof(mpt_diag_unregister_t))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in mpt_diag_unregister_t struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || -+ (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s enter.\n", ioc->name, -+ __FUNCTION__)); -+ buffer_type = karg.data.UniqueId & 0x000000ff; -+ if (!(ioc->facts.IOCCapabilities & MPT_DIAG_CAPABILITY(buffer_type))) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have Capability for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ return -ENODEV; -+ } -+ -+ if ((ioc->DiagBuffer_Status[buffer_type] & -+ MPT_DIAG_BUFFER_IS_REGISTERED) == 0) { -+ printk(MYIOC_s_DEBUG_FMT "%s: buffer_type=%x is not " -+ "registered\n", ioc->name, __FUNCTION__, buffer_type); -+ return -EFAULT; -+ } -+ if ((ioc->DiagBuffer_Status[buffer_type] & -+ MPT_DIAG_BUFFER_IS_RELEASED) == 0) { -+ printk(MYIOC_s_DEBUG_FMT "%s: buffer_type=%x has not been " -+ "released\n", ioc->name, __FUNCTION__, buffer_type); -+ return -EFAULT; -+ } -+ -+ if (karg.data.UniqueId != ioc->UniqueId[buffer_type]) { -+ printk(MYIOC_s_DEBUG_FMT "%s: unique_id=%x is not registered\n", -+ ioc->name, __FUNCTION__, karg.data.UniqueId); -+ return -EFAULT; -+ } -+ -+ request_data = ioc->DiagBuffer[buffer_type]; -+ if (!request_data) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have buffer for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ return -ENODEV; -+ } -+ -+ request_data_sz = ioc->DiagBuffer_sz[buffer_type]; -+ request_data_dma = ioc->DiagBuffer_dma[buffer_type]; -+ pci_free_consistent(ioc->pcidev, request_data_sz, -+ request_data, request_data_dma); -+ ioc->DiagBuffer[buffer_type] = NULL; -+ ioc->DiagBuffer_Status[buffer_type] = 0; -+ return 0; -+} -+ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+/* QUERY DIAG BUFFER Routine. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -EBUSY if previous command timout and IOC reset is not complete. -+ * -ENODEV if no such device/adapter -+ * -ETIME if timer expires -+ * -ENOMEM if memory allocation error -+ */ -+static int -+mptctl_query_diag_buffer (unsigned long arg) -+{ -+ mpt_diag_query_t __user *uarg = (void __user *)arg; -+ mpt_diag_query_t karg; -+ MPT_ADAPTER *ioc; -+ void * request_data; -+ int iocnum, ii, rc; -+ u8 buffer_type; -+ -+ rc = -EFAULT; -+ if (copy_from_user(&karg, uarg, sizeof(mpt_diag_query_t))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in mpt_diag_query_t struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ karg.data.Flags = 0; -+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || -+ (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ goto out; -+ } -+ -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s enter.\n", ioc->name, -+ __FUNCTION__)); -+ buffer_type = karg.data.BufferType; -+ if (!(ioc->facts.IOCCapabilities & MPT_DIAG_CAPABILITY(buffer_type))) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have Capability for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ goto out; -+ } -+ -+ if ((ioc->DiagBuffer_Status[buffer_type] & -+ MPT_DIAG_BUFFER_IS_REGISTERED) == 0) { -+ printk(MYIOC_s_DEBUG_FMT "%s: buffer_type=%x is not " -+ "registered\n", ioc->name, __FUNCTION__, buffer_type); -+ goto out; -+ } -+ -+ if (karg.data.UniqueId & 0xffffff00) { -+ if (karg.data.UniqueId != ioc->UniqueId[buffer_type]) { -+ printk(MYIOC_s_DEBUG_FMT "%s: unique_id=%x is not " -+ "registered\n", ioc->name, __FUNCTION__, -+ karg.data.UniqueId); -+ goto out; -+ } -+ } -+ -+ request_data = ioc->DiagBuffer[buffer_type]; -+ if (!request_data) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have buffer for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ goto out; -+ } -+ -+ rc = 0; -+ if (buffer_type == MPI_DIAG_BUF_TYPE_EXTENDED) { -+ if (karg.data.ExtendedType != ioc->ExtendedType[buffer_type]) -+ goto out; -+ } else -+ karg.data.ExtendedType = 0; -+ -+ if (ioc->DiagBuffer_Status[buffer_type] & MPT_DIAG_BUFFER_IS_RELEASED) -+ karg.data.Flags = 3; -+ else -+ karg.data.Flags = 7; -+ karg.data.TraceLevel = ioc->TraceLevel[buffer_type]; -+ for (ii = 0; ii < 4; ii++) -+ karg.data.ProductSpecific[ii] = -+ ioc->ProductSpecific[buffer_type][ii]; -+ karg.data.DataSize = ioc->DiagBuffer_sz[buffer_type]; -+ karg.data.DriverAddedBufferSize = 0; -+ karg.data.UniqueId = ioc->UniqueId[buffer_type]; -+ -+ out: -+ if (copy_to_user(uarg, &karg, sizeof(mpt_diag_query_t))) { -+ printk(MYIOC_s_ERR_FMT "%s Unable to write mpt_diag_query_t " -+ "data @ %p\n", ioc->name, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ return rc; -+} -+ -+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -+/* READ DIAG BUFFER Routine. -+ * -+ * Outputs: None. -+ * Return: 0 if successful -+ * -EFAULT if data unavailable -+ * -EBUSY if previous command timout and IOC reset is not complete. -+ * -ENODEV if no such device/adapter -+ * -ETIME if timer expires -+ * -ENOMEM if memory allocation error -+ */ -+static int -+mptctl_read_diag_buffer (unsigned long arg) -+{ -+ mpt_diag_read_buffer_t __user *uarg = (void __user *) arg; -+ mpt_diag_read_buffer_t karg; -+ MPT_ADAPTER *ioc; -+ void *request_data, *diagData; -+ dma_addr_t request_data_dma; -+ DiagBufferPostRequest_t *diag_buffer_post_request; -+ DiagBufferPostReply_t *diag_buffer_post_reply; -+ MPT_FRAME_HDR *mf; -+ int iocnum, rc, ii; -+ u8 buffer_type; -+ u32 tmp; -+ unsigned long timeleft; -+ -+ rc = 0; -+ if (copy_from_user(&karg, uarg, sizeof(mpt_diag_read_buffer_t))) { -+ printk(KERN_ERR "%s@%d::%s - " -+ "Unable to read in mpt_diag_read_buffer_t struct @ %p\n", -+ __FILE__, __LINE__, __FUNCTION__, uarg); -+ return -EFAULT; -+ } -+ -+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || -+ (ioc == NULL)) { -+ printk(KERN_ERR "%s::%s() @%d - ioc%d not found!\n", -+ __FILE__, __FUNCTION__, __LINE__, iocnum); -+ return -ENODEV; -+ } -+ -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s enter.\n", ioc->name, -+ __FUNCTION__)); -+ buffer_type = karg.data.UniqueId & 0x000000ff; -+ if (!(ioc->facts.IOCCapabilities & MPT_DIAG_CAPABILITY(buffer_type))) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have Capability " -+ "for buffer_type=%x\n", ioc->name, __FUNCTION__, -+ buffer_type); -+ return -EFAULT; -+ } -+ -+ if (karg.data.UniqueId != ioc->UniqueId[buffer_type]) { -+ printk(MYIOC_s_DEBUG_FMT "%s: unique_id=%x is not registered\n", -+ ioc->name, __FUNCTION__, karg.data.UniqueId); -+ return -EFAULT; -+ } -+ -+ request_data = ioc->DiagBuffer[buffer_type]; -+ if (!request_data) { -+ printk(MYIOC_s_DEBUG_FMT "%s: doesn't have buffer for " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type); -+ return -EFAULT; -+ } -+ -+ diagData = (void *)(request_data + karg.data.StartingOffset); -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: diagData=%p " -+ "request_data=%p StartingOffset=%x\n", ioc->name, __FUNCTION__, -+ diagData, request_data, karg.data.StartingOffset)); -+ -+ if (copy_to_user((void __user *)&uarg->data.DiagnosticData[0], -+ diagData, karg.data.BytesToRead)) { -+ printk(MYIOC_s_ERR_FMT "%s: Unable to write " -+ "mpt_diag_read_buffer_t data @ %p\n", ioc->name, -+ __FUNCTION__, diagData); -+ return -EFAULT; -+ } -+ -+ if ((karg.data.Flags & MPI_FW_DIAG_FLAG_REREGISTER) == 0) -+ goto out; -+ -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Reregister " -+ "buffer_type=%x\n", ioc->name, __FUNCTION__, buffer_type)); -+ if ((ioc->DiagBuffer_Status[buffer_type] & -+ MPT_DIAG_BUFFER_IS_RELEASED) == 0) { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: buffer_type=%x " -+ "is still registered\n", ioc->name, __FUNCTION__, -+ buffer_type)); -+ return rc; -+ } -+ /* Get a free request frame and save the message context. -+ */ -+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) -+ return -EAGAIN; -+ -+ diag_buffer_post_request = (DiagBufferPostRequest_t *)mf; -+ diag_buffer_post_request->Function = MPI_FUNCTION_DIAG_BUFFER_POST; -+ diag_buffer_post_request->ChainOffset = 0; -+ diag_buffer_post_request->BufferType = buffer_type; -+ diag_buffer_post_request->TraceLevel = -+ ioc->TraceLevel[buffer_type]; -+ diag_buffer_post_request->MsgFlags = 0; -+ diag_buffer_post_request->Reserved1 = 0; -+ diag_buffer_post_request->Reserved2 = 0; -+ diag_buffer_post_request->Reserved3 = 0; -+ diag_buffer_post_request->BufferAddress.High = 0; -+ if ( buffer_type == MPI_DIAG_BUF_TYPE_EXTENDED ) -+ diag_buffer_post_request->ExtendedType = -+ cpu_to_le32(ioc->ExtendedType[buffer_type]); -+ diag_buffer_post_request->BufferLength = -+ cpu_to_le32(ioc->DiagBuffer_sz[buffer_type]); -+ for (ii = 0; ii < 4; ii++) -+ diag_buffer_post_request->ProductSpecific[ii] = -+ cpu_to_le32(ioc->ProductSpecific[buffer_type][ii]); -+ request_data_dma = ioc->DiagBuffer_dma[buffer_type]; -+ tmp = request_data_dma & 0xFFFFFFFF; -+ diag_buffer_post_request->BufferAddress.Low = cpu_to_le32(tmp); -+ if (ioc->sg_addr_size == sizeof(u64)) { -+ tmp = (u32)((u64)request_data_dma >> 32); -+ diag_buffer_post_request->BufferAddress.High = cpu_to_le32(tmp); -+ } -+ -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, -+ diag_buffer_post_request->MsgContext); -+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) -+ mpt_put_msg_frame(mptctl_id, ioc, mf); -+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, -+ MPT_IOCTL_DEFAULT_TIMEOUT*HZ); -+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { -+ rc = -ETIME; -+ printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, -+ __FUNCTION__); -+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { -+ mpt_free_msg_frame(ioc, mf); -+ goto out; -+ } -+ if (!timeleft) -+ mptctl_timeout_expired(ioc, mf); -+ goto out; -+ } -+ -+ /* process the completed Reply Message Frame */ -+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) == 0) { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: status=%x\n", -+ ioc->name, __FUNCTION__, ioc->ioctl_cmds.status)); -+ rc = -EFAULT; -+ } -+ -+ diag_buffer_post_reply = (DiagBufferPostReply_t *)ioc->ioctl_cmds.reply; -+ if (le16_to_cpu(diag_buffer_post_reply->IOCStatus) == -+ MPI_IOCSTATUS_SUCCESS) { -+ if (diag_buffer_post_reply->MsgLength > 5) -+ ioc->DataSize[buffer_type] = -+ le32_to_cpu(diag_buffer_post_reply->TransferLength); -+ ioc->DiagBuffer_Status[buffer_type] |= -+ MPT_DIAG_BUFFER_IS_REGISTERED; -+ } else { -+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: IOCStatus=%x " -+ "IOCLogInfo=%x\n", ioc->name, __FUNCTION__, -+ diag_buffer_post_reply->IOCStatus, -+ diag_buffer_post_reply->IOCLogInfo)); -+ rc = -EFAULT; -+ } -+ -+ out: -+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) -+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); -+ return rc; -+} ---- /dev/null -+++ b/drivers/message/fusion/rejected_ioctls/diag_buffer.h -@@ -0,0 +1,101 @@ -+#define MPTDIAGREGISTER _IOWR(MPT_MAGIC_NUMBER,26,mpt_diag_register_t) -+#define MPTDIAGRELEASE _IOWR(MPT_MAGIC_NUMBER,27,mpt_diag_release_t) -+#define MPTDIAGUNREGISTER _IOWR(MPT_MAGIC_NUMBER,28,mpt_diag_unregister_t) -+#define MPTDIAGQUERY _IOWR(MPT_MAGIC_NUMBER,29,mpt_diag_query_t) -+#define MPTDIAGREADBUFFER _IOWR(MPT_MAGIC_NUMBER,30,mpt_diag_read_buffer_t) -+ -+#define MPI_FW_DIAG_IOCTL (0x80646961) -+#define MPI_FW_DIAG_TYPE_REGISTER (0x00000001) -+#define MPI_FW_DIAG_TYPE_UNREGISTER (0x00000002) -+#define MPI_FW_DIAG_TYPE_QUERY (0x00000003) -+#define MPI_FW_DIAG_TYPE_READ_BUFFER (0x00000004) -+#define MPI_FW_DIAG_TYPE_RELEASE (0x00000005) -+ -+#define MPI_FW_DIAG_INVALID_UID (0x00000000) -+#define FW_DIAGNOSTIC_BUFFER_COUNT (3) -+#define FW_DIAGNOSTIC_UID_NOT_FOUND (0xFF) -+ -+#define MPI_FW_DIAG_ERROR_SUCCESS (0x00000000) -+#define MPI_FW_DIAG_ERROR_FAILURE (0x00000001) -+#define MPI_FW_DIAG_ERROR_INVALID_PARAMETER (0x00000002) -+#define MPI_FW_DIAG_ERROR_POST_FAILED (0x00000010) -+#define MPI_FW_DIAG_ERROR_INVALID_UID (0x00000011) -+ -+#define MPI_FW_DIAG_ERROR_RELEASE_FAILED (0x00000012) -+#define MPI_FW_DIAG_ERROR_NO_BUFFER (0x00000013) -+#define MPI_FW_DIAG_ERROR_ALREADY_RELEASED (0x00000014) -+ -+#define MPT_DIAG_CAPABILITY(bufftype) (MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER << bufftype) -+ -+#define MPT_DIAG_BUFFER_IS_REGISTERED 1 -+#define MPT_DIAG_BUFFER_IS_RELEASED 2 -+ -+typedef struct _MPI_FW_DIAG_REGISTER { -+ u8 TraceLevel; -+ u8 BufferType; -+ u16 Flags; -+ u32 ExtendedType; -+ u32 ProductSpecific[4]; -+ u32 RequestedBufferSize; -+ u32 UniqueId; -+} MPI_FW_DIAG_REGISTER, *PTR_MPI_FW_DIAG_REGISTER; -+ -+typedef struct _mpt_diag_register { -+ mpt_ioctl_header hdr; -+ MPI_FW_DIAG_REGISTER data; -+} mpt_diag_register_t; -+ -+typedef struct _MPI_FW_DIAG_UNREGISTER { -+ u32 UniqueId; -+} MPI_FW_DIAG_UNREGISTER, *PTR_MPI_FW_DIAG_UNREGISTER; -+ -+typedef struct _mpt_diag_unregister { -+ mpt_ioctl_header hdr; -+ MPI_FW_DIAG_UNREGISTER data; -+} mpt_diag_unregister_t; -+ -+#define MPI_FW_DIAG_FLAG_APP_OWNED (0x0001) -+#define MPI_FW_DIAG_FLAG_BUFFER_VALID (0x0002) -+#define MPI_FW_DIAG_FLAG_FW_BUFFER_ACCESS (0x0004) -+ -+typedef struct _MPI_FW_DIAG_QUERY { -+ u8 TraceLevel; -+ u8 BufferType; -+ u16 Flags; -+ u32 ExtendedType; -+ u32 ProductSpecific[4]; -+ u32 DataSize; -+ u32 DriverAddedBufferSize; -+ u32 UniqueId; -+} MPI_FW_DIAG_QUERY, *PTR_MPI_FW_DIAG_QUERY; -+ -+typedef struct _mpt_diag_query { -+ mpt_ioctl_header hdr; -+ MPI_FW_DIAG_QUERY data; -+} mpt_diag_query_t; -+ -+typedef struct _MPI_FW_DIAG_RELEASE { -+ u32 UniqueId; -+} MPI_FW_DIAG_RELEASE, *PTR_MPI_FW_DIAG_RELEASE; -+ -+typedef struct _mpt_diag_release { -+ mpt_ioctl_header hdr; -+ MPI_FW_DIAG_RELEASE data; -+} mpt_diag_release_t; -+ -+#define MPI_FW_DIAG_FLAG_REREGISTER (0x0001) -+ -+typedef struct _MPI_FW_DIAG_READ_BUFFER { -+ u8 Status; -+ u8 Reserved; -+ u16 Flags; -+ u32 StartingOffset; -+ u32 BytesToRead; -+ u32 UniqueId; -+ u32 DiagnosticData[1]; -+} MPI_FW_DIAG_READ_BUFFER, *PTR_MPI_FW_DIAG_READ_BUFFER; -+ -+typedef struct _mpt_diag_read_buffer { -+ mpt_ioctl_header hdr; -+ MPI_FW_DIAG_READ_BUFFER data; -+} mpt_diag_read_buffer_t; diff --git a/patches.drivers/ppc64-adb b/patches.drivers/ppc64-adb deleted file mode 100644 index fe87d31..0000000 --- a/patches.drivers/ppc64-adb +++ /dev/null @@ -1,53 +0,0 @@ -From: Olaf Hering -Subject: enable mouse button emulation also for G5 -Patch-mainline: never - -fix compile errors - - drivers/macintosh/Kconfig | 2 +- - drivers/macintosh/adb.c | 4 ++++ - drivers/macintosh/adbhid.c | 6 +++++- - 3 files changed, 10 insertions(+), 2 deletions(-) - ---- a/drivers/macintosh/Kconfig -+++ b/drivers/macintosh/Kconfig -@@ -13,7 +13,7 @@ if MACINTOSH_DRIVERS - - config ADB - bool "Apple Desktop Bus (ADB) support" -- depends on MAC || (PPC_PMAC && PPC32) -+ depends on MAC || PPC_PMAC - help - Apple Desktop Bus (ADB) support is for support of devices which - are connected to an ADB port. ADB devices tend to have 4 pins. ---- a/drivers/macintosh/adb.c -+++ b/drivers/macintosh/adb.c -@@ -298,6 +298,10 @@ static int __init adb_init(void) - if (!machine_is(chrp) && !machine_is(powermac)) - return 0; - #endif -+#ifdef CONFIG_PPC64 -+ if (!machine_is(powermac)) -+ return 0; -+#endif - #ifdef CONFIG_MAC - if (!MACH_IS_MAC) - return 0; ---- a/drivers/macintosh/adbhid.c -+++ b/drivers/macintosh/adbhid.c -@@ -1264,10 +1264,14 @@ init_ms_a3(int id) - - static int __init adbhid_init(void) - { --#ifndef CONFIG_MAC -+#ifdef CONFIG_PPC32 - if (!machine_is(chrp) && !machine_is(powermac)) - return 0; - #endif -+#ifdef CONFIG_PPC64 -+ if (!machine_is(powermac)) -+ return 0; -+#endif - - led_request.complete = 1; - diff --git a/patches.drivers/qla4xxx-5.01.00-k9-5.01.00.00.11.01-k10.patch b/patches.drivers/qla4xxx-5.01.00-k9-5.01.00.00.11.01-k10.patch deleted file mode 100644 index 1af1c14..0000000 --- a/patches.drivers/qla4xxx-5.01.00-k9-5.01.00.00.11.01-k10.patch +++ /dev/null @@ -1,2809 +0,0 @@ -From: Ravi Anand -Subject: Update qla4xxx driver for SLES11 SP1 -References: bnc#556572,FATE#307128 -Patch-mainline: not yet - -Change log from v5.01.00-k9 to v5.01.00.00.11.01-k10: - -- Wait for device online in reset_lun - for 10s, for device to come online otherwise indicating - an error condition. - -- Updated IPv6 support - -- Link Down -> Mark all devices missing - This change will cut 20 seconds of failover times. - Previously, the driver took no action on a Link Down, - and waited for the I/O on a dead connection to timeout - in the firmware before marking the DDB missing. - -- Code Clean up - remove "marker_needed" - -- Updated firmware ready timeout algorithm to prevent long delays - and use jiffies to time out instead of counter. Also use - msleep_interruptible instead of msleep. - -- Added srb reference count support - Serialization between the error handler and recovery code. - -- Avoid relogin on device marked missing - causing mailbox command (0x63) failure - -- Check for command completion while device and host reset - Created variables to reference h, b, t, l, because - if the IOCTL scsi passthru command completes within - eh_device_reset, the cmd structure may no longer be valid. - Fix for ER67742: hang while sg_reset. - Also wait for hba online in device reset path - -- Do not retry login to CHAP auth failed targets - Per RFC 3720, Login Response Status Code 0x02 should not be retried. - Condensed connection error checking code to a single routine, and - added check for status class 0x02. - -- Added support for abort task management command - Handles SCSI aborts. - -- Add Async PDU support - Added support for Asynchronous PDU IOCB - -- handle DDB removal via DPC - mailbox command free device ddb (0x31) does not generate AEN, - so we need to process it separately where we defer the removal - of ddb to DPC. - -- Update data structure to use single ioctl module - update scsi_qla_host to match with qisioctl struct. - -- ioctl initialization - to interact with the application - -- Add support for ACB firmware features in the driver - to notify the firmware that the driver supports ifcb size - greater than 512B. - -- added active srb array implementation - for effective srb processing within the io path. - -- v5.01.00.00.11.01-k10 - Changed driver version for SLES11 SP1. - -Signed-off-by: Ravi Anand -Acked-by: Hannes Reinecke - ---- - drivers/scsi/qla4xxx/ql4_def.h | 120 +++++++- - drivers/scsi/qla4xxx/ql4_fw.h | 91 ++++-- - drivers/scsi/qla4xxx/ql4_glbl.h | 18 + - drivers/scsi/qla4xxx/ql4_init.c | 349 +++++++++++++++++------ - drivers/scsi/qla4xxx/ql4_inline.h | 71 ++++ - drivers/scsi/qla4xxx/ql4_iocb.c | 21 + - drivers/scsi/qla4xxx/ql4_isr.c | 53 +++ - drivers/scsi/qla4xxx/ql4_mbx.c | 426 ++++++++++++++++++++++------ - drivers/scsi/qla4xxx/ql4_os.c | 553 ++++++++++++++++++++++++++++++++++--- - drivers/scsi/qla4xxx/ql4_version.h | 2 - 10 files changed, 1432 insertions(+), 272 deletions(-) - ---- a/drivers/scsi/qla4xxx/ql4_def.h -+++ b/drivers/scsi/qla4xxx/ql4_def.h -@@ -90,16 +90,17 @@ - ***********************************/ - #define MAX_HBAS 16 - #define MAX_BUSES 1 --#define MAX_TARGETS (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES) -+#define MAX_TARGETS MAX_DEV_DB_ENTRIES - #define MAX_LUNS 0xffff - #define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */ --#define MAX_DDB_ENTRIES (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES) -+#define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES - #define MAX_PDU_ENTRIES 32 - #define INVALID_ENTRY 0xFFFF - #define MAX_CMDS_TO_RISC 1024 - #define MAX_SRBS MAX_CMDS_TO_RISC - #define MBOX_AEN_REG_COUNT 5 - #define MAX_INIT_RETRIES 5 -+#define LEGACY_IFCB_SIZE 0x200 - - /* - * Buffer sizes -@@ -114,6 +115,7 @@ - */ - #define MAC_ADDR_LEN 6 /* in bytes */ - #define IP_ADDR_LEN 4 /* in bytes */ -+#define IPv6_ADDR_LEN 16 /* IPv6 address size */ - #define DRIVER_NAME "qla4xxx" - - #define MAX_LINKED_CMDS_PER_LUN 3 -@@ -146,6 +148,7 @@ - #define ISNS_DEREG_TOV 5 - - #define MAX_RESET_HA_RETRIES 2 -+#define DEVICE_ONLINE_TOV 10 - - /* - * SCSI Request Block structure (srb) that is placed -@@ -220,7 +223,7 @@ struct ddb_entry { - - uint16_t os_target_id; /* Target ID */ - uint16_t fw_ddb_index; /* DDB firmware index */ -- uint8_t reserved[2]; -+ uint8_t options; - uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ - - uint32_t CmdSn; -@@ -245,10 +248,13 @@ struct ddb_entry { - - uint16_t port; - uint32_t tpgt; -- uint8_t ip_addr[ISCSI_IPADDR_SIZE]; -+ uint8_t ip_addr[IP_ADDR_LEN]; - uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */ - uint8_t iscsi_alias[0x20]; - uint8_t isid[6]; -+ -+ struct in6_addr remote_ipv6_addr; -+ struct in6_addr link_local_ipv6_addr; - }; - - /* -@@ -260,6 +266,8 @@ struct ddb_entry { - * commands */ - #define DDB_STATE_MISSING 2 /* Device logged off, trying - * to re-login */ -+#define DDB_STATE_REMOVED 3 /* The fw ddb_entry is freed -+ * the session can be destroyed */ - - /* - * DDB flags. -@@ -269,16 +277,38 @@ struct ddb_entry { - * logged it out */ - #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ - #define DF_FO_MASKED 3 -+#define DF_REMOVE 4 /* FW DDB is destroyed */ - - - #include "ql4_fw.h" - #include "ql4_nvram.h" - -+/* shortcut to print ISID */ -+#define ISID(addr) \ -+ ((unsigned char *)&addr)[5], \ -+ ((unsigned char *)&addr)[4], \ -+ ((unsigned char *)&addr)[3], \ -+ ((unsigned char *)&addr)[2], \ -+ ((unsigned char *)&addr)[1], \ -+ ((unsigned char *)&addr)[0] -+#define ISID_FMT "0x%02x%02x%02x%02x%02x%02x" -+ - /* - * Linux Host Adapter structure - */ - struct scsi_qla_host { -+ struct klist_node node; -+ uint16_t instance; -+ uint16_t rsvd0; -+ -+ /* exported functions */ -+ int (*ql4cmd)(struct scsi_qla_host *ha, struct srb * srb); -+ int (*ql4mbx)(struct scsi_qla_host *ha, uint8_t inCount, -+ uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); -+ - /* Linux adapter configuration data */ -+ struct Scsi_Host *host; /* pointer to host data */ -+ uint32_t tot_ddbs; - unsigned long flags; - - #define AF_ONLINE 0 /* 0x00000001 */ -@@ -290,6 +320,7 @@ struct scsi_qla_host { - #define AF_LINK_UP 8 /* 0x00000100 */ - #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ - #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ -+#define AF_OS_INDEX_VALID 12 /* 0x00001000 */ - - unsigned long dpc_flags; - -@@ -301,9 +332,9 @@ struct scsi_qla_host { - #define DPC_ISNS_RESTART 7 /* 0x00000080 */ - #define DPC_AEN 9 /* 0x00000200 */ - #define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ -- -- struct Scsi_Host *host; /* pointer to host data */ -- uint32_t tot_ddbs; -+#define DPC_REMOVE_DEVICE 17 /* 0x00020000 */ -+#define DPC_LINK_CHANGED 18 /* 0x00040000 */ -+#define DPC_ASYNC_MSG_PDU 19 /* 0x00080000 */ - - uint16_t iocb_cnt; - -@@ -320,14 +351,14 @@ struct scsi_qla_host { - #define MIN_IOBASE_LEN 0x100 - - uint16_t req_q_count; -- uint8_t marker_needed; -- uint8_t rsvd1; -+ uint8_t rsvd1[2]; - - unsigned long host_no; - - /* NVRAM registers */ - struct eeprom_data *nvram; - spinlock_t hardware_lock ____cacheline_aligned; -+ spinlock_t list_lock; - uint32_t eeprom_cmd_data; - - /* Counters for general statistics */ -@@ -352,7 +383,6 @@ struct scsi_qla_host { - uint32_t firmware_version[2]; - uint32_t patch_number; - uint32_t build_number; -- uint32_t board_id; - - /* --- From Init_FW --- */ - /* init_cb_t *init_cb; */ -@@ -372,6 +402,7 @@ struct scsi_qla_host { - - /* --- From GetFwState --- */ - uint32_t firmware_state; -+ uint32_t board_id; - uint32_t addl_fw_state; - - /* Linux kernel thread */ -@@ -394,6 +425,10 @@ struct scsi_qla_host { - uint16_t free_srb_q_count; - uint16_t num_srbs_allocated; - -+ /* Active array */ -+ struct srb *active_srb_array[MAX_SRBS]; -+ uint16_t current_active_index; -+ - /* DMA Memory Block */ - void *queues; - dma_addr_t queues_dma; -@@ -422,12 +457,20 @@ struct scsi_qla_host { - uint16_t aen_out; - struct aen aen_q[MAX_AEN_ENTRIES]; - -- struct ql4_aen_log aen_log;/* tracks all aens */ -+ /* pdu variables */ -+ uint16_t pdu_count; /* Number of available aen_q entries */ -+ uint16_t pdu_in; /* Current indexes */ -+ uint16_t pdu_out; -+ uint16_t pdu_active; -+ struct pdu_entry *free_pdu_top; -+ struct pdu_entry *free_pdu_bottom; -+ struct pdu_entry pdu_queue[MAX_PDU_ENTRIES]; - - /* This mutex protects several threads to do mailbox commands - * concurrently. - */ - struct mutex mbox_sem; -+ wait_queue_head_t mailbox_wait_queue; - - /* temporary mailbox status registers */ - volatile uint8_t mbox_status_count; -@@ -439,10 +482,63 @@ struct scsi_qla_host { - /* Map ddb_list entry by FW ddb index */ - struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; - -+ struct ql4_aen_log aen_log;/* tracks all aens */ -+ void (*ql4getaenlog)(struct scsi_qla_host *ha, struct ql4_aen_log *aenl); -+ -+#define QL_INDICES_PER_ENTRY 32 -+#define QL_OSINDEX_ENTRIES (MAX_DDB_ENTRIES/QL_INDICES_PER_ENTRY) -+ volatile unsigned long os_map[QL_OSINDEX_ENTRIES]; -+ - /* Saved srb for status continuation entry processing */ - struct srb *status_srb; -+ -+ struct list_head async_iocb_list; -+ dma_addr_t gen_req_rsp_iocb_dma; -+ void *gen_req_rsp_iocb; -+ -+ /* IPv6 support info from InitFW */ -+ uint8_t acb_version; -+ uint8_t ipv4_addr_state; -+ uint16_t ipv4_options; -+ -+ uint32_t resvd2; -+ uint32_t ipv6_options; -+ uint32_t ipv6_addl_options; -+ uint8_t ipv6_link_local_state; -+ uint8_t ipv6_addr0_state; -+ uint8_t ipv6_addr1_state; -+ uint8_t ipv6_default_router_state; -+ struct in6_addr ipv6_link_local_addr; -+ struct in6_addr ipv6_addr0; -+ struct in6_addr ipv6_addr1; -+ struct in6_addr ipv6_default_router_addr; -+ -+ uint16_t ifcb_size; -+}; -+ -+static inline int is_ipv4_enabled(struct scsi_qla_host *ha) -+{ -+ return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0); -+} -+ -+static inline int is_ipv6_enabled(struct scsi_qla_host *ha) -+{ -+ return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0); -+} -+ -+/* -+ * structure to buffer Async PDUs -+ */ -+struct async_msg_pdu_iocb { -+ struct list_head list; -+ uint8_t iocb[0x40]; - }; - -+typedef struct _ASYNC_PDU_SENSE { -+ uint16_t sense_len; /* 00-01 */ -+ uint8_t sense_data[0]; -+} ASYNC_PDU_SENSE; -+ - static inline int is_qla4010(struct scsi_qla_host *ha) - { - return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010; -@@ -511,7 +607,7 @@ static inline void __iomem* isp_port_err - &ha->reg->u2.isp4022.p0.port_err_status); - } - --static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha) -+static inline void __iomem *isp_gp_out(struct scsi_qla_host *ha) - { - return (is_qla4010(ha) ? - &ha->reg->u2.isp4010.gp_out : ---- a/drivers/scsi/qla4xxx/ql4_fw.h -+++ b/drivers/scsi/qla4xxx/ql4_fw.h -@@ -215,6 +215,7 @@ union external_hw_config_reg { - /* Mailbox command definitions */ - #define MBOX_CMD_ABOUT_FW 0x0009 - #define MBOX_CMD_PING 0x000B -+#define MBOX_CMD_ABORT_TASK 0x0015 - #define MBOX_CMD_LUN_RESET 0x0016 - #define MBOX_CMD_TARGET_WARM_RESET 0x0017 - #define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E -@@ -227,8 +228,8 @@ union external_hw_config_reg { - #define MBOX_CMD_READ_FLASH 0x0026 - #define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 - #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 --#define LOGOUT_OPTION_CLOSE_SESSION 0x01 --#define LOGOUT_OPTION_RELOGIN 0x02 -+#define LOGOUT_OPTION_CLOSE_SESSION 0x02 -+#define LOGOUT_OPTION_RESET 0x04 - #define MBOX_CMD_EXECUTE_IOCB_A64 0x005A - #define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060 - #define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061 -@@ -258,13 +259,15 @@ union external_hw_config_reg { - /* Mailbox 1 */ - #define FW_STATE_READY 0x0000 - #define FW_STATE_CONFIG_WAIT 0x0001 --#define FW_STATE_WAIT_LOGIN 0x0002 -+#define FW_STATE_WAIT_AUTOCONNECT 0x0002 - #define FW_STATE_ERROR 0x0004 --#define FW_STATE_DHCP_IN_PROGRESS 0x0008 -+#define FW_STATE_CONFIGURING_IP 0x0008 - - /* Mailbox 3 */ - #define FW_ADDSTATE_OPTICAL_MEDIA 0x0001 --#define FW_ADDSTATE_DHCP_ENABLED 0x0002 -+#define FW_ADDSTATE_DHCPv4_ENABLED 0x0002 -+#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED 0x0004 -+#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008 - #define FW_ADDSTATE_LINK_UP 0x0010 - #define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020 - #define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B -@@ -320,6 +323,8 @@ union external_hw_config_reg { - /* Host Adapter Initialization Control Block (from host) */ - struct addr_ctrl_blk { - uint8_t version; /* 00 */ -+#define IFCB_VER_MIN 0x01 -+#define IFCB_VER_MAX 0x02 - uint8_t control; /* 01 */ - - uint16_t fw_options; /* 02-03 */ -@@ -351,11 +356,15 @@ struct addr_ctrl_blk { - uint16_t iscsi_opts; /* 30-31 */ - uint16_t ipv4_tcp_opts; /* 32-33 */ - uint16_t ipv4_ip_opts; /* 34-35 */ -+#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000 - - uint16_t iscsi_max_pdu_size; /* 36-37 */ - uint8_t ipv4_tos; /* 38 */ - uint8_t ipv4_ttl; /* 39 */ - uint8_t acb_version; /* 3A */ -+#define ACB_NOT_SUPPORTED 0x00 -+#define ACB_SUPPORTED 0x02 /* Capable of ACB Version 2 Features */ -+ - uint8_t res2; /* 3B */ - uint16_t def_timeout; /* 3C-3D */ - uint16_t iscsi_fburst_len; /* 3E-3F */ -@@ -397,16 +406,34 @@ struct addr_ctrl_blk { - uint32_t cookie; /* 200-203 */ - uint16_t ipv6_port; /* 204-205 */ - uint16_t ipv6_opts; /* 206-207 */ -+#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000 -+ - uint16_t ipv6_addtl_opts; /* 208-209 */ -+#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB Only */ -+#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001 -+ - uint16_t ipv6_tcp_opts; /* 20A-20B */ - uint8_t ipv6_tcp_wsf; /* 20C */ - uint16_t ipv6_flow_lbl; /* 20D-20F */ -- uint8_t ipv6_gw_addr[16]; /* 210-21F */ -+ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */ - uint16_t ipv6_vlan_tag; /* 220-221 */ - uint8_t ipv6_lnk_lcl_addr_state;/* 222 */ - uint8_t ipv6_addr0_state; /* 223 */ - uint8_t ipv6_addr1_state; /* 224 */ -- uint8_t ipv6_gw_state; /* 225 */ -+#define IP_ADDRSTATE_UNCONFIGURED 0 -+#define IP_ADDRSTATE_INVALID 1 -+#define IP_ADDRSTATE_ACQUIRING 2 -+#define IP_ADDRSTATE_TENTATIVE 3 -+#define IP_ADDRSTATE_DEPRICATED 4 -+#define IP_ADDRSTATE_PREFERRED 5 -+#define IP_ADDRSTATE_DISABLING 6 -+ -+ uint8_t ipv6_dflt_rtr_state; /* 225 */ -+#define IPV6_RTRSTATE_UNKNOWN 0 -+#define IPV6_RTRSTATE_MANUAL 1 -+#define IPV6_RTRSTATE_ADVERTISED 3 -+#define IPV6_RTRSTATE_STALE 4 -+ - uint8_t ipv6_traffic_class; /* 226 */ - uint8_t ipv6_hop_limit; /* 227 */ - uint8_t ipv6_if_id[8]; /* 228-22F */ -@@ -424,7 +451,7 @@ struct addr_ctrl_blk { - - struct init_fw_ctrl_blk { - struct addr_ctrl_blk pri; -- struct addr_ctrl_blk sec; -+/* struct addr_ctrl_blk sec;*/ - }; - - /*************************************************************************/ -@@ -433,6 +460,9 @@ struct dev_db_entry { - uint16_t options; /* 00-01 */ - #define DDB_OPT_DISC_SESSION 0x10 - #define DDB_OPT_TARGET 0x02 /* device is a target */ -+#define DDB_OPT_IPV6_DEVICE 0x100 -+#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */ -+#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */ - - uint16_t exec_throttle; /* 02-03 */ - uint16_t exec_count; /* 04-05 */ -@@ -468,7 +498,7 @@ struct dev_db_entry { - * pointer to a string so we - * don't have to reserve soooo - * much RAM */ -- uint8_t ipv6_addr[0x10];/* 1A0-1AF */ -+ uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */ - uint8_t res5[0x10]; /* 1B0-1BF */ - uint16_t ddb_link; /* 1C0-1C1 */ - uint16_t chap_tbl_idx; /* 1C2-1C3 */ -@@ -577,13 +607,14 @@ struct conn_event_log_entry { - /* IOCB header structure */ - struct qla4_header { - uint8_t entryType; --#define ET_STATUS 0x03 --#define ET_MARKER 0x04 --#define ET_CONT_T1 0x0A --#define ET_STATUS_CONTINUATION 0x10 --#define ET_CMND_T3 0x19 --#define ET_PASSTHRU0 0x3A --#define ET_PASSTHRU_STATUS 0x3C -+#define ET_STATUS 0x03 -+#define ET_MARKER 0x04 -+#define ET_CONT_T1 0x0A -+#define ET_STATUS_CONTINUATION 0x10 -+#define ET_CMND_T3 0x19 -+#define ET_ASYNC_PDU 0x37 -+#define ET_PASSTHRU0 0x3A -+#define ET_PASSTHRU_STATUS 0x3C - - uint8_t entryStatus; - uint8_t systemDefined; -@@ -692,6 +723,18 @@ struct qla4_marker_entry { - uint64_t reserved6; /* 38-3F */ - }; - -+/* Asynchronous PDU IOCB structure */ -+struct async_pdu_iocb { -+ struct qla4_header hdr; /* 00-02 */ -+ uint32_t async_pdu_handle; /* 03-06 */ -+ uint16_t target_id; /* 07-08 */ -+ uint16_t status; /* 09-0A */ -+#define ASYNC_PDU_IOCB_STS_OK 0x01 -+ -+ uint32_t rsrvd; /* 0B-0F */ -+ uint8_t iscsi_pdu_hdr[48]; /* 10-3F */ -+}; -+ - /* Status entry structure*/ - struct status_entry { - struct qla4_header hdr; /* 00-03 */ -@@ -734,6 +777,15 @@ struct status_entry { - - }; - -+struct pdu_entry { -+ uint8_t *Buff; -+ uint32_t BuffLen; -+ uint32_t SendBuffLen; -+ uint32_t RecvBuffLen; -+ struct pdu_entry *Next; -+ dma_addr_t DmaBuff; -+}; -+ - /* Status Continuation entry */ - struct status_cont_entry { - struct qla4_header hdr; /* 00-03 */ -@@ -745,11 +797,9 @@ struct passthru0 { - uint32_t handle; /* 04-07 */ - uint16_t target; /* 08-09 */ - uint16_t connectionID; /* 0A-0B */ --#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000) - - uint16_t controlFlags; /* 0C-0D */ --#define PT_FLAG_ETHERNET_FRAME 0x8000 --#define PT_FLAG_ISNS_PDU 0x8000 -+#define PT_FLAG_ISCSI_PDU 0x1000 - #define PT_FLAG_SEND_BUFFER 0x0200 - #define PT_FLAG_WAIT_4_RESPONSE 0x0100 - -@@ -759,7 +809,8 @@ struct passthru0 { - struct data_seg_a64 outDataSeg64; /* 10-1B */ - uint32_t res1; /* 1C-1F */ - struct data_seg_a64 inDataSeg64; /* 20-2B */ -- uint8_t res2[20]; /* 2C-3F */ -+ uint8_t res2[16]; /* 2C-3F */ -+ uint32_t async_pdu_handle; - }; - - struct passthru_status { ---- a/drivers/scsi/qla4xxx/ql4_glbl.h -+++ b/drivers/scsi/qla4xxx/ql4_glbl.h -@@ -20,17 +20,21 @@ int qla4xxx_soft_reset(struct scsi_qla_h - irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id); - - void qla4xxx_free_ddb_list(struct scsi_qla_host * ha); -+void qla4xxx_free_ddb(struct scsi_qla_host *, struct ddb_entry *); - void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen); - - int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha); - int qla4xxx_relogin_device(struct scsi_qla_host * ha, - struct ddb_entry * ddb_entry); -+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb); - int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, - int lun); - int qla4xxx_reset_target(struct scsi_qla_host * ha, - struct ddb_entry * ddb_entry); - int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, - uint32_t offset, uint32_t len); -+int qla4xxx_issue_iocb(struct scsi_qla_host *ha, uint32_t comp_offset, -+ dma_addr_t phys_addr); - int qla4xxx_get_firmware_status(struct scsi_qla_host * ha); - int qla4xxx_get_firmware_state(struct scsi_qla_host * ha); - int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha); -@@ -58,6 +62,10 @@ void qla4xxx_get_crash_record(struct scs - struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha); - int qla4xxx_add_sess(struct ddb_entry *); - void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry); -+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, -+ uint16_t fw_ddb_index, -+ uint16_t connection_id, -+ uint16_t option); - int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha); - int qla4xxx_get_fw_version(struct scsi_qla_host * ha); - void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, -@@ -67,12 +75,18 @@ struct srb * qla4xxx_del_from_active_arr - uint32_t index); - void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb); - int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha); --int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha, -- uint32_t fw_ddb_index, uint32_t state); -+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, -+ uint32_t state, uint32_t conn_error); - void qla4xxx_dump_buffer(void *b, uint32_t size); - int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, - struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod); - -+void sp_put(struct scsi_qla_host *ha, struct srb *sp); -+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err); -+int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, -+ uint8_t outCount, uint32_t *mbx_cmd, -+ uint32_t *mbx_sts); -+ - extern int ql4xextended_error_logging; - extern int ql4xdiscoverywait; - extern int ql4xdontresethba; ---- a/drivers/scsi/qla4xxx/ql4_init.c -+++ b/drivers/scsi/qla4xxx/ql4_init.c -@@ -51,7 +51,7 @@ static void ql4xxx_set_mac_number(struct - * This routine deallocates and unlinks the specified ddb_entry from the - * adapter's - **/ --static void qla4xxx_free_ddb(struct scsi_qla_host *ha, -+void qla4xxx_free_ddb(struct scsi_qla_host *ha, - struct ddb_entry *ddb_entry) - { - /* Remove device entry from list */ -@@ -95,6 +95,7 @@ void qla4xxx_free_ddb_list(struct scsi_q - **/ - int qla4xxx_init_rings(struct scsi_qla_host *ha) - { -+ uint16_t i; - unsigned long flags = 0; - - /* Initialize request queue. */ -@@ -123,6 +124,10 @@ int qla4xxx_init_rings(struct scsi_qla_h - writel(0, &ha->reg->rsp_q_out); - readl(&ha->reg->rsp_q_out); - -+ /* Initialize active array */ -+ for (i = 0; i < MAX_SRBS; i++) -+ ha->active_srb_array[i] = NULL; -+ - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - return QLA_SUCCESS; -@@ -189,6 +194,71 @@ static int qla4xxx_init_local_data(struc - return qla4xxx_get_firmware_status(ha); - } - -+static uint8_t -+qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) -+{ -+ uint8_t ipv4_wait = 0; -+ uint8_t ipv6_wait = 0; -+ int8_t ip_address[IPv6_ADDR_LEN] = {0} ; -+ -+ /* If both IPv4 & IPv6 are enabled, possibly only one -+ * IP address may be acquired, so check to see if we -+ * need to wait for another */ -+ if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) { -+ if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) && -+ ((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) { -+ ipv4_wait = 1; -+ } -+ if (((ha->ipv6_addl_options & IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) && -+ ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) || -+ (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) || -+ (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) { -+ -+ ipv6_wait = 1; -+ -+ if ((ha->ipv6_link_local_state == IP_ADDRSTATE_PREFERRED) || -+ (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) || -+ (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) { -+ DEBUG2(printk("scsi%ld: %s: " -+ "Preferred IP configured. Don't wait! \n", -+ ha->host_no, __func__)); -+ ipv6_wait = 0; -+ } -+ if (memcmp(&ha->ipv6_default_router_addr, ip_address, -+ IPv6_ADDR_LEN) == 0) { -+ DEBUG2(printk("scsi%ld: %s: " -+ "No Router configured. Don't wait! \n", -+ ha->host_no, __func__)); -+ ipv6_wait = 0; -+ } -+ if ((ha->ipv6_default_router_state == IPV6_RTRSTATE_MANUAL) && -+ (ha->ipv6_link_local_state == IP_ADDRSTATE_TENTATIVE) && -+ (memcmp(&ha->ipv6_link_local_addr, -+ &ha->ipv6_default_router_addr, 4) == 0)) { -+ DEBUG2(printk("scsi%ld: %s: LinkLocal Router & " -+ "IP configured. Don't wait! \n", -+ ha->host_no, __func__)); -+ ipv6_wait = 0; -+ } -+ } -+ if (ipv4_wait || ipv6_wait) { -+ DEBUG2(printk("scsi%ld: %s: Wait for additional IP(s) \"", -+ ha->host_no, __func__)); -+ if (ipv4_wait) -+ DEBUG2(printk("IPv4 ")); -+ if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) -+ DEBUG2(printk("IPv6LinkLocal ")); -+ if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) -+ DEBUG2(printk("IPv6Addr0 ")); -+ if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING) -+ DEBUG2(printk("IPv6Addr1 ")); -+ DEBUG2(printk("\"\n")); -+ } -+ } -+ -+ return (ipv4_wait|ipv6_wait); -+} -+ - static int qla4xxx_fw_ready(struct scsi_qla_host *ha) - { - uint32_t timeout_count; -@@ -226,38 +296,75 @@ static int qla4xxx_fw_ready(struct scsi_ - continue; - } - -+ if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { -+ DEBUG2(printk("scsi%ld: %s: fwstate:" -+ "AUTOCONNECT in progress\n", -+ ha->host_no, __func__)); -+ } -+ -+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { -+ DEBUG2(printk("scsi%ld: %s: fwstate: CONFIGURING IP\n", -+ ha->host_no, __func__)); -+ /* -+ * Check for link state after 15 secs and if link is still DOWN then, -+ * cable is unplugged. Ignore "DHCP in Progress/CONFIGURING IP" bit -+ * to check if firmware is in ready state or not after 15 secs. -+ * This is applicable for both 2.x & 3.x firmware -+ */ -+ if (timeout_count <= (ADAPTER_INIT_TOV - 15)) { -+ if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) { -+ DEBUG2(printk("scsi%ld: %s: LINK UP " -+ "(Cable plugged)\n", -+ ha->host_no, __func__)); -+ } -+ else if (ha->firmware_state & -+ (FW_STATE_CONFIGURING_IP | FW_STATE_READY)) { -+ DEBUG2(printk("scsi%ld: %s: LINK DOWN " -+ "(Cable unplugged)\n", -+ ha->host_no, __func__)); -+ ha->firmware_state = FW_STATE_READY; -+ } -+ } -+ } -+ - if (ha->firmware_state == FW_STATE_READY) { -- DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n")); -- /* The firmware is ready to process SCSI commands. */ -- DEBUG2(dev_info(&ha->pdev->dev, -- "scsi%ld: %s: MEDIA TYPE - %s\n", -- ha->host_no, -- __func__, (ha->addl_fw_state & -- FW_ADDSTATE_OPTICAL_MEDIA) -- != 0 ? "OPTICAL" : "COPPER")); -- DEBUG2(dev_info(&ha->pdev->dev, -- "scsi%ld: %s: DHCP STATE Enabled " -- "%s\n", -- ha->host_no, __func__, -- (ha->addl_fw_state & -- FW_ADDSTATE_DHCP_ENABLED) != 0 ? -- "YES" : "NO")); -- DEBUG2(dev_info(&ha->pdev->dev, -- "scsi%ld: %s: LINK %s\n", -- ha->host_no, __func__, -- (ha->addl_fw_state & -- FW_ADDSTATE_LINK_UP) != 0 ? -- "UP" : "DOWN")); -- DEBUG2(dev_info(&ha->pdev->dev, -- "scsi%ld: %s: iSNS Service " -- "Started %s\n", -- ha->host_no, __func__, -- (ha->addl_fw_state & -- FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ? -- "YES" : "NO")); -+ /* If DHCP IP Addr is available, retrieve it now. */ -+ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) -+ qla4xxx_get_dhcp_ip_address(ha); -+ -+ if (!qla4xxx_wait_for_ip_config(ha) || timeout_count == 1) { -+ DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n")); -+ /* The firmware is ready to process SCSI commands. */ -+ DEBUG2(dev_info(&ha->pdev->dev, -+ "scsi%ld: %s: MEDIA TYPE - %s\n", -+ ha->host_no, -+ __func__, (ha->addl_fw_state & -+ FW_ADDSTATE_OPTICAL_MEDIA) -+ != 0 ? "OPTICAL" : "COPPER")); -+ DEBUG2(dev_info(&ha->pdev->dev, -+ "scsi%ld: %s: DHCPv4 STATE Enabled " -+ "%s\n", -+ ha->host_no, __func__, -+ (ha->addl_fw_state & -+ FW_ADDSTATE_DHCPv4_ENABLED) != 0 ? -+ "YES" : "NO")); -+ DEBUG2(dev_info(&ha->pdev->dev, -+ "scsi%ld: %s: LINK %s\n", -+ ha->host_no, __func__, -+ (ha->addl_fw_state & -+ FW_ADDSTATE_LINK_UP) != 0 ? -+ "UP" : "DOWN")); -+ DEBUG2(dev_info(&ha->pdev->dev, -+ "scsi%ld: %s: iSNS Service " -+ "Started %s\n", -+ ha->host_no, __func__, -+ (ha->addl_fw_state & -+ FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ? -+ "YES" : "NO")); - -- ready = 1; -- break; -+ ready = 1; -+ break; -+ } - } - DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - " - "seconds expired= %d\n", ha->host_no, __func__, -@@ -272,15 +379,19 @@ static int qla4xxx_fw_ready(struct scsi_ - msleep(1000); - } /* end of for */ - -- if (timeout_count == 0) -+ if (timeout_count <= 0) - DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", - ha->host_no, __func__)); - -- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) { -- DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to" -- " grab an IP address from DHCP server\n", -- ha->host_no, __func__)); -+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { -+ DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting " -+ "it's waiting to configure an IP address\n", -+ ha->host_no, __func__)); - ready = 1; -+ } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { -+ DEBUG2(printk("scsi%ld: %s: FW initialized, but " -+ "auto-discovery still in process\n", -+ ha->host_no, __func__)); - } - - return ready; -@@ -343,11 +454,11 @@ static struct ddb_entry* qla4xxx_get_ddb - __func__, fw_ddb_index)); - list_for_each_entry(ddb_entry, &ha->ddb_list, list) { - if ((memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name, -- ISCSI_NAME_SIZE) == 0) && -- (ddb_entry->tpgt == -- le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) && -- (memcmp(ddb_entry->isid, fw_ddb_entry->isid, -- sizeof(ddb_entry->isid)) == 0)) { -+ ISCSI_NAME_SIZE) == 0) && -+ (ddb_entry->tpgt == -+ le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) && -+ (memcmp(ddb_entry->isid, fw_ddb_entry->isid, -+ sizeof(ddb_entry->isid)) == 0)) { - found++; - break; - } -@@ -387,6 +498,7 @@ static int qla4xxx_update_ddb_entry(stru - struct dev_db_entry *fw_ddb_entry = NULL; - dma_addr_t fw_ddb_entry_dma; - int status = QLA_ERROR; -+ uint32_t conn_err; - - if (ddb_entry == NULL) { - DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no, -@@ -405,12 +517,13 @@ static int qla4xxx_update_ddb_entry(stru - goto exit_update_ddb; - } - -- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, -+ if ((qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, - fw_ddb_entry_dma, NULL, NULL, -- &ddb_entry->fw_ddb_device_state, NULL, -+ &ddb_entry->fw_ddb_device_state, &conn_err, - &ddb_entry->tcp_source_port_num, -- &ddb_entry->connection_id) == -- QLA_ERROR) { -+ &ddb_entry->connection_id) == QLA_SUCCESS)) -+ status = QLA_SUCCESS; -+ else { - DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for " - "fw_ddb_index %d\n", ha->host_no, __func__, - fw_ddb_index)); -@@ -418,7 +531,6 @@ static int qla4xxx_update_ddb_entry(stru - goto exit_update_ddb; - } - -- status = QLA_SUCCESS; - ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid); - ddb_entry->task_mgmt_timeout = - le16_to_cpu(fw_ddb_entry->def_timeout); -@@ -442,9 +554,26 @@ static int qla4xxx_update_ddb_entry(stru - memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0], - min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr))); - -- DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n", -- ha->host_no, __func__, fw_ddb_index, -- ddb_entry->fw_ddb_device_state, status)); -+ if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) { -+ memcpy(&ddb_entry->remote_ipv6_addr, -+ fw_ddb_entry->ip_addr, -+ min(sizeof(ddb_entry->remote_ipv6_addr), -+ sizeof(fw_ddb_entry->ip_addr))); -+ memcpy(&ddb_entry->link_local_ipv6_addr, -+ fw_ddb_entry->link_local_ipv6_addr, -+ min(sizeof(ddb_entry->link_local_ipv6_addr), -+ sizeof(fw_ddb_entry->link_local_ipv6_addr))); -+ } -+ -+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d " -+ "State %04x ConnErr %08x " -+ NIPQUAD_FMT ":%04d \"%s\"\n", -+ __func__, fw_ddb_index, -+ ddb_entry->os_target_id, -+ ddb_entry->fw_ddb_device_state, conn_err, -+ NIPQUAD(fw_ddb_entry->ip_addr), -+ le16_to_cpu(fw_ddb_entry->port), -+ fw_ddb_entry->iscsi_name)); - - exit_update_ddb: - if (fw_ddb_entry) -@@ -492,6 +621,39 @@ static struct ddb_entry * qla4xxx_alloc_ - } - - /** -+ * qla4_is_relogin_allowed - Are we allowed to login? -+ * @ha: Pointer to host adapter structure. -+ * @conn_err: Last connection error associated with the ddb -+ * -+ * This routine tests the given connection error to determine if -+ * we are allowed to login. -+ **/ -+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err) -+{ -+ uint32_t err_code, login_rsp_sts_class; -+ int relogin = 1; -+ -+ err_code = ((conn_err & 0x00ff0000) >> 16); -+ login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8); -+ if (err_code == 0x1c || err_code == 0x06) { -+ DEBUG2(dev_info(&ha->pdev->dev, -+ ": conn_err=0x%08x, send target completed or access" -+ " denied failure\n", conn_err)); -+ relogin = 0; -+ } -+ if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) { -+ /* Login Response PDU returned an error. -+ Login Response Status in Error Code Detail -+ indicates login should not be retried.*/ -+ DEBUG2(dev_info(&ha->pdev->dev, -+ ": conn_err=0x%08x, do not retry relogin\n", conn_err)); -+ relogin = 0; -+ } -+ -+ return relogin; -+} -+ -+/** - * qla4xxx_configure_ddbs - builds driver ddb list - * @ha: Pointer to host adapter structure. - * -@@ -505,15 +667,25 @@ static int qla4xxx_build_ddb_list(struct - uint32_t fw_ddb_index = 0; - uint32_t next_fw_ddb_index = 0; - uint32_t ddb_state; -- uint32_t conn_err, err_code; -+ uint32_t conn_err; - struct ddb_entry *ddb_entry; -+ struct dev_db_entry *fw_ddb_entry = NULL; -+ dma_addr_t fw_ddb_entry_dma; - uint32_t new_tgt; -+ uint32_t ipv6_device; -+ -+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), -+ &fw_ddb_entry_dma, GFP_KERNEL); -+ if (fw_ddb_entry == NULL) { -+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DMA alloc failed\n", __func__)); -+ return QLA_ERROR; -+ } - - dev_info(&ha->pdev->dev, "Initializing DDBs ...\n"); - for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; - fw_ddb_index = next_fw_ddb_index) { - /* First, let's see if a device exists here */ -- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL, -+ if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, 0, NULL, - &next_fw_ddb_index, &ddb_state, - &conn_err, NULL, NULL) == - QLA_ERROR) { -@@ -533,13 +705,11 @@ static int qla4xxx_build_ddb_list(struct - /* Try and login to device */ - DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n", - ha->host_no, __func__, fw_ddb_index)); -- err_code = ((conn_err & 0x00ff0000) >> 16); -- if (err_code == 0x1c || err_code == 0x06) { -- DEBUG2(printk("scsi%ld: %s send target " -- "completed " -- "or access denied failure\n", -- ha->host_no, __func__)); -- } else { -+ ipv6_device = le16_to_cpu(fw_ddb_entry->options) & -+ DDB_OPT_IPV6_DEVICE; -+ if (qla4_is_relogin_allowed(ha, conn_err) && -+ ((!ipv6_device && *((uint32_t *)fw_ddb_entry->ip_addr)) -+ || ipv6_device)) { - qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0); - if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, - NULL, 0, NULL, &next_fw_ddb_index, -@@ -599,7 +769,6 @@ next_one: - struct qla4_relog_scan { - int halt_wait; - uint32_t conn_err; -- uint32_t err_code; - uint32_t fw_ddb_index; - uint32_t next_fw_ddb_index; - uint32_t fw_ddb_device_state; -@@ -609,18 +778,7 @@ static int qla4_test_rdy(struct scsi_qla - { - struct ddb_entry *ddb_entry; - -- /* -- * Don't want to do a relogin if connection -- * error is 0x1c. -- */ -- rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16); -- if (rs->err_code == 0x1c || rs->err_code == 0x06) { -- DEBUG2(printk( -- "scsi%ld: %s send target" -- " completed or " -- "access denied failure\n", -- ha->host_no, __func__)); -- } else { -+ if (qla4_is_relogin_allowed(ha, rs->conn_err)) { - /* We either have a device that is in - * the process of relogging in or a - * device that is waiting to be -@@ -908,7 +1066,7 @@ static void qla4x00_pci_config(struct sc - static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha) - { - int status = QLA_ERROR; -- uint32_t max_wait_time; -+ unsigned long max_wait_time; - unsigned long flags; - uint32_t mbox_status; - -@@ -927,6 +1085,13 @@ static int qla4xxx_start_firmware_from_f - ha->host_no, __func__)); - - spin_lock_irqsave(&ha->hardware_lock, flags); -+ /* -+ * Firmware must be informed that the driver supports -+ * ACB firmware features while starting firmware. -+ * If the firmware also supports these features it will -+ * be indicated in the IFCB offset 0x3A (acb_version). -+ */ -+ writel(ACB_SUPPORTED, &ha->reg->mailbox[6]); - writel(jiffies, &ha->reg->mailbox[7]); - if (is_qla4022(ha) | is_qla4032(ha)) - writel(set_rmask(NVR_WRITE_ENABLE), -@@ -940,7 +1105,10 @@ static int qla4xxx_start_firmware_from_f - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - /* Wait for firmware to come UP. */ -- max_wait_time = FIRMWARE_UP_TOV * 4; -+ DEBUG2(printk("scsi%ld: %s: Wait up to %d seconds for " -+ "boot firmware to complete... \n", -+ ha->host_no, __func__, FIRMWARE_UP_TOV)); -+ max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ); - do { - uint32_t ctrl_status; - -@@ -955,12 +1123,11 @@ static int qla4xxx_start_firmware_from_f - break; - - DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to " -- "complete... ctrl_sts=0x%x, remaining=%d\n", -- ha->host_no, __func__, ctrl_status, -- max_wait_time)); -+ "complete... ctrl_sts=0x%x\n", -+ ha->host_no, __func__, ctrl_status)); - -- msleep(250); -- } while ((max_wait_time--)); -+ msleep_interruptible(250); -+ } while (!time_after_eq(jiffies, max_wait_time)); - - if (mbox_status == MBOX_STS_COMMAND_COMPLETE) { - DEBUG(printk("scsi%ld: %s: Firmware has started\n", -@@ -1141,6 +1308,7 @@ int qla4xxx_initialize_adapter(struct sc - int status = QLA_ERROR; - int8_t ip_address[IP_ADDR_LEN] = {0} ; - -+ clear_bit(AF_ONLINE, &ha->flags); - ha->eeprom_cmd_data = 0; - - qla4x00_pci_config(ha); -@@ -1166,7 +1334,7 @@ int qla4xxx_initialize_adapter(struct sc - * the ddb_list and wait for DHCP lease acquired aen to come in - * followed by 0x8014 aen" to trigger the tgt discovery process. - */ -- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) -+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) - goto exit_init_online; - - /* Skip device discovery if ip and subnet is zero */ -@@ -1270,8 +1438,8 @@ static void qla4xxx_add_device_dynamical - * - * This routine processes a Decive Database Changed AEN Event. - **/ --int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, -- uint32_t fw_ddb_index, uint32_t state) -+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, -+ uint32_t state, uint32_t conn_err) - { - struct ddb_entry * ddb_entry; - uint32_t old_fw_ddb_device_state; -@@ -1318,19 +1486,24 @@ int qla4xxx_process_ddb_changed(struct s - * the device came back. - */ - } else { -- /* Device went away, try to relogin. */ -- /* Mark device missing */ -- if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) -+ /* Device went away, mark device missing */ -+ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { -+ DEBUG2(dev_info(&ha->pdev->dev, "%s mark missing " -+ "ddb_entry 0x%p sess 0x%p conn 0x%p\n", -+ __func__, ddb_entry, -+ ddb_entry->sess, ddb_entry->conn)); - qla4xxx_mark_device_missing(ha, ddb_entry); -+ } -+ - /* - * Relogin if device state changed to a not active state. -- * However, do not relogin if this aen is a result of an IOCTL -- * logout (DF_NO_RELOGIN) or if this is a discovered device. -+ * However, do not relogin if a RELOGIN is in process, or -+ * we are not allowed to relogin to this DDB. - */ - if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && - !test_bit(DF_RELOGIN, &ddb_entry->flags) && - !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && -- !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) { -+ qla4_is_relogin_allowed(ha, conn_err)) { - /* - * This triggers a relogin. After the relogin_timer - * expires, the relogin gets scheduled. We must wait a -@@ -1338,7 +1511,7 @@ int qla4xxx_process_ddb_changed(struct s - * with failed device_state or a logout response before - * we can issue another relogin. - */ -- /* Firmware padds this timeout: (time2wait +1). -+ /* Firmware pads this timeout: (time2wait +1). - * Driver retry to login should be longer than F/W. - * Otherwise F/W will fail - * set_ddb() mbx cmd with 0x4005 since it still ---- a/drivers/scsi/qla4xxx/ql4_inline.h -+++ b/drivers/scsi/qla4xxx/ql4_inline.h -@@ -35,6 +35,34 @@ qla4xxx_lookup_ddb_by_fw_index(struct sc - return ddb_entry; - } - -+/* -+ * The MBOX_CMD_CLEAR_DATABASE_ENTRY (0x31) mailbox command does not -+ * result in an AEN, so we need to process it seperately. -+ */ -+static inline void qla4xxx_check_for_clear_ddb(struct scsi_qla_host *ha, -+ uint32_t *mbox_cmd) -+{ -+ uint32_t fw_ddb_index; -+ struct ddb_entry *ddb_entry = NULL; -+ -+ if (mbox_cmd[0] == MBOX_CMD_CLEAR_DATABASE_ENTRY) { -+ -+ fw_ddb_index = mbox_cmd[1]; -+ -+ if (fw_ddb_index < MAX_DDB_ENTRIES) -+ ddb_entry = ha->fw_ddb_index_map[fw_ddb_index]; -+ -+ if (ddb_entry) { -+ dev_info(&ha->pdev->dev, "%s: ddb[%d] os[%d] freed\n", -+ __func__, ddb_entry->fw_ddb_index, -+ ddb_entry->os_target_id); -+ set_bit(DF_REMOVE, &ddb_entry->flags); -+ set_bit(DPC_REMOVE_DEVICE, &ha->dpc_flags); -+ queue_work(ha->dpc_thread, &ha->dpc_work); -+ } -+ } -+} -+ - static inline void - __qla4xxx_enable_intrs(struct scsi_qla_host *ha) - { -@@ -82,3 +110,46 @@ qla4xxx_disable_intrs(struct scsi_qla_ho - __qla4xxx_disable_intrs(ha); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - } -+ -+static inline void -+qla4xxx_remove_device(struct scsi_qla_host *ha) -+{ -+ struct ddb_entry *ddb_entry, *dtemp; -+ -+ if (test_and_clear_bit(DPC_REMOVE_DEVICE, &ha->dpc_flags)) { -+ list_for_each_entry_safe(ddb_entry, dtemp, -+ &ha->ddb_list, list) { -+ if (test_and_clear_bit(DF_REMOVE, &ddb_entry->flags)) { -+ dev_info(&ha->pdev->dev, -+ "%s: ddb[%d] os[%d] - removed\n", -+ __func__, ddb_entry->fw_ddb_index, -+ ddb_entry->os_target_id); -+ qla4xxx_free_ddb(ha, ddb_entry); -+ } -+ } -+ } -+} -+ -+static void -+ql4_get_aen_log(struct scsi_qla_host *ha, struct ql4_aen_log *aenl) -+{ -+ if (aenl) { -+ memcpy(aenl, &ha->aen_log, sizeof (ha->aen_log)); -+ ha->aen_log.count = 0; -+ } -+} -+ -+static inline int -+qla4xxx_ioctl_init(struct scsi_qla_host *ha) -+{ -+ ha->ql4mbx = qla4xxx_mailbox_command; -+ ha->ql4cmd = qla4xxx_send_command_to_isp; -+ ha->ql4getaenlog = ql4_get_aen_log; -+ return 0; -+} -+ -+static inline void -+qla4xxx_ioctl_exit(struct scsi_qla_host *ha) -+{ -+ return; -+} ---- a/drivers/scsi/qla4xxx/ql4_iocb.c -+++ b/drivers/scsi/qla4xxx/ql4_iocb.c -@@ -209,6 +209,7 @@ int qla4xxx_send_command_to_isp(struct s - int nseg; - uint16_t tot_dsds; - uint16_t req_cnt; -+ uint16_t i; - unsigned long flags; - uint32_t index; - char tag[2]; -@@ -221,7 +222,24 @@ int qla4xxx_send_command_to_isp(struct s - /* Acquire hardware specific lock */ - spin_lock_irqsave(&ha->hardware_lock, flags); - -- index = (uint32_t)cmd->request->tag; -+ //index = (uint32_t)cmd->request->tag; -+ index = ha->current_active_index; -+ for (i = 0; i < MAX_SRBS; i++) { -+ index++; -+ if (index == MAX_SRBS) -+ index = 1; -+ if (ha->active_srb_array[index] == 0) { -+ ha->current_active_index = index; -+ break; -+ } -+ } -+ -+ if (i >= MAX_SRBS) { -+ printk(KERN_INFO "scsi%ld: %s: NO more SRB entries used " -+ "iocbs=%d, \n reqs remaining=%d\n", ha->host_no, -+ __func__, ha->iocb_cnt, ha->req_q_count); -+ goto queuing_error; -+ } - - /* - * Check to see if adapter is online before placing request on -@@ -300,6 +318,7 @@ int qla4xxx_send_command_to_isp(struct s - wmb(); - - srb->cmd->host_scribble = (unsigned char *)srb; -+ ha->active_srb_array[index] = srb; - - /* update counters */ - srb->state = SRB_ACTIVE_STATE; ---- a/drivers/scsi/qla4xxx/ql4_isr.c -+++ b/drivers/scsi/qla4xxx/ql4_isr.c -@@ -9,6 +9,7 @@ - #include "ql4_glbl.h" - #include "ql4_dbg.h" - #include "ql4_inline.h" -+#include - - /** - * qla4xxx_copy_sense - copy sense data into cmd sense buffer -@@ -97,7 +98,7 @@ qla4xxx_status_cont_entry(struct scsi_ql - - /* Place command on done queue. */ - if (srb->req_sense_len == 0) { -- qla4xxx_srb_compl(ha, srb); -+ sp_put(ha, srb); - ha->status_srb = NULL; - } - } -@@ -329,7 +330,7 @@ status_entry_exit: - /* complete the request, if not waiting for status_continuation pkt */ - srb->cc_stat = sts_entry->completionStatus; - if (ha->status_srb == NULL) -- qla4xxx_srb_compl(ha, srb); -+ sp_put(ha, srb); - } - - /** -@@ -344,6 +345,9 @@ static void qla4xxx_process_response_que - uint32_t count = 0; - struct srb *srb = NULL; - struct status_entry *sts_entry; -+ struct async_pdu_iocb *apdu; -+ struct iscsi_hdr *pdu_hdr; -+ struct async_msg_pdu_iocb *apdu_iocb; - - /* Process all responses from response queue */ - while ((ha->response_in = -@@ -371,6 +375,34 @@ static void qla4xxx_process_response_que - case ET_PASSTHRU_STATUS: - break; - -+ case ET_ASYNC_PDU: -+ apdu = (struct async_pdu_iocb *)sts_entry; -+ if (apdu->status != ASYNC_PDU_IOCB_STS_OK) -+ break; -+ -+ pdu_hdr = (struct iscsi_hdr *)apdu->iscsi_pdu_hdr; -+ if (pdu_hdr->hlength || pdu_hdr->dlength[0] || -+ pdu_hdr->dlength[1] || pdu_hdr->dlength[2]){ -+ apdu_iocb = kmalloc(sizeof(struct async_msg_pdu_iocb), -+ GFP_ATOMIC); -+ if (apdu_iocb) { -+ memcpy(apdu_iocb->iocb, apdu, -+ sizeof(struct async_pdu_iocb)); -+ list_add_tail(&apdu_iocb->list, -+ &ha->async_iocb_list); -+ DEBUG2(printk("scsi%ld:" -+ "%s: schedule async msg pdu\n", -+ ha->host_no, __func__)); -+ set_bit(DPC_ASYNC_MSG_PDU, -+ &ha->dpc_flags); -+ } else { -+ DEBUG2(printk("scsi%ld:" -+ "%s: unable to alloc ASYNC PDU\n", -+ ha->host_no, __func__)); -+ } -+ } -+ break; -+ - case ET_STATUS_CONTINUATION: - qla4xxx_status_cont_entry(ha, - (struct status_cont_entry *) sts_entry); -@@ -393,7 +425,7 @@ static void qla4xxx_process_response_que - /* ETRY normally by sending it back with - * DID_BUS_BUSY */ - srb->cmd->result = DID_BUS_BUSY << 16; -- qla4xxx_srb_compl(ha, srb); -+ sp_put(ha, srb); - break; - - case ET_CONTINUE: -@@ -498,15 +530,20 @@ static void qla4xxx_isr_decode_mailbox(s - break; - - case MBOX_ASTS_LINK_UP: -- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n", -- ha->host_no, mbox_status)); - set_bit(AF_LINK_UP, &ha->flags); -+ if (test_bit(AF_INIT_DONE, &ha->flags)) -+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); -+ -+ DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n", -+ ha->host_no, mbox_status)); - break; - - case MBOX_ASTS_LINK_DOWN: -- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n", -- ha->host_no, mbox_status)); - clear_bit(AF_LINK_UP, &ha->flags); -+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); -+ -+ DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n", -+ ha->host_no, mbox_status)); - break; - - case MBOX_ASTS_HEARTBEAT: -@@ -831,7 +868,7 @@ void qla4xxx_process_aen(struct scsi_qla - qla4xxx_reinitialize_ddb_list(ha); - } else if (mbox_sts[1] == 1) { /* Specific device. */ - qla4xxx_process_ddb_changed(ha, mbox_sts[2], -- mbox_sts[3]); -+ mbox_sts[3], mbox_sts[4]); - } - break; - } ---- a/drivers/scsi/qla4xxx/ql4_mbx.c -+++ b/drivers/scsi/qla4xxx/ql4_mbx.c -@@ -23,7 +23,7 @@ - * If outCount is 0, this routine completes successfully WITHOUT waiting - * for the mailbox command to complete. - **/ --static int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, -+int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, - uint8_t outCount, uint32_t *mbx_cmd, - uint32_t *mbx_sts) - { -@@ -164,6 +164,8 @@ static int qla4xxx_mailbox_command(struc - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - mbox_exit: -+ if (status == QLA_SUCCESS) -+ qla4xxx_check_for_clear_ddb(ha, mbx_cmd); - mutex_lock(&ha->mbox_sem); - clear_bit(AF_MBOX_COMMAND, &ha->flags); - mutex_unlock(&ha->mbox_sem); -@@ -173,107 +175,284 @@ mbox_exit: - } - - /** -+ * qla4xxx_issue_iocb - issue mailbox iocb command -+ * @ha: adapter state pointer. -+ * @buffer: buffer pointer. -+ * @phys_addr: physical address of buffer. -+ * @size: size of buffer. -+ * -+ * Issues iocbs via mailbox commands. -+ * TARGET_QUEUE_LOCK must be released. -+ * ADAPTER_STATE_LOCK must be released. -+ **/ -+int -+qla4xxx_issue_iocb(struct scsi_qla_host *ha, uint32_t comp_offset, -+ dma_addr_t phys_addr) -+{ -+ uint32_t mbox_cmd[MBOX_REG_COUNT]; -+ uint32_t mbox_sts[MBOX_REG_COUNT]; -+ int status; -+ -+ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); -+ memset(&mbox_sts, 0, sizeof(mbox_sts)); -+ -+ mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64; -+ mbox_cmd[1] = comp_offset; -+ mbox_cmd[2] = LSDW(phys_addr); -+ mbox_cmd[3] = MSDW(phys_addr); -+ -+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); -+ return status; -+} -+ -+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, -+ uint16_t fw_ddb_index, -+ uint16_t connection_id, -+ uint16_t option) -+{ -+ uint32_t mbox_cmd[MBOX_REG_COUNT]; -+ uint32_t mbox_sts[MBOX_REG_COUNT]; -+ -+ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); -+ memset(&mbox_sts, 0, sizeof(mbox_sts)); -+ -+ mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT; -+ mbox_cmd[1] = fw_ddb_index; -+ mbox_cmd[2] = connection_id; -+ mbox_cmd[3] = LOGOUT_OPTION_RESET; -+ -+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) != -+ QLA_SUCCESS) { -+ DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT " -+ "option %04x failed sts %04X %04X", -+ ha->host_no, __func__, -+ option, mbox_sts[0], mbox_sts[1])); -+ if (mbox_sts[0] == 0x4005) -+ DEBUG2(printk("%s reason %04X\n", __func__, -+ mbox_sts[1])); -+ } -+ return QLA_SUCCESS; -+} -+ -+uint8_t -+qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, -+ dma_addr_t init_fw_cb_dma) -+{ -+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); -+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); -+ mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; -+ mbox_cmd[1] = 0; -+ mbox_cmd[2] = LSDW(init_fw_cb_dma); -+ mbox_cmd[3] = MSDW(init_fw_cb_dma); -+ if (ha->ifcb_size > LEGACY_IFCB_SIZE) { -+ mbox_cmd[4] = ha->ifcb_size; -+ mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN; -+ } -+ -+ if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) -+ != QLA_SUCCESS) { -+ DEBUG2(printk("scsi%ld: %s: " -+ "MBOX_CMD_INITIALIZE_FIRMWARE failed w/ status %04X\n", -+ ha->host_no, __func__, mbox_sts[0])); -+ return (QLA_ERROR); -+ } -+ return (QLA_SUCCESS); -+} -+ -+uint8_t -+qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, -+ dma_addr_t init_fw_cb_dma) -+{ -+ int i; -+ -+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); -+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); -+ mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; -+ mbox_cmd[2] = LSDW(init_fw_cb_dma); -+ mbox_cmd[3] = MSDW(init_fw_cb_dma); -+ if (init_fw_cb_dma != 0 && ha->ifcb_size > LEGACY_IFCB_SIZE) -+ mbox_cmd[4] = ha->ifcb_size; -+ -+ if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) -+ != QLA_SUCCESS) { -+ if (init_fw_cb_dma == 0 && -+ mbox_sts[0] == MBOX_STS_COMMAND_ERROR) -+ return (QLA_SUCCESS); -+ -+ DEBUG2(printk("scsi%ld: %s: " -+ "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK failed w/ status %04X\n", -+ ha->host_no, __func__, mbox_sts[0])); -+ -+ for (i = 0; i < MBOX_REG_COUNT; i++) { -+ DEBUG2(printk("mbox_cmd[%d] = %08x, " -+ "mbox_sts[%d] = %08x\n", -+ i, mbox_cmd[i], i, mbox_sts[i])); -+ } -+ -+ return (QLA_ERROR); -+ } -+ return (QLA_SUCCESS); -+} -+ -+void -+qla4xxx_update_local_ip(struct scsi_qla_host *ha, -+ struct addr_ctrl_blk *init_fw_cb) -+{ -+ /* Save IPv4 Address Info */ -+ memcpy(ha->ip_address, init_fw_cb->ipv4_addr, -+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr))); -+ memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet, -+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet))); -+ memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr, -+ min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr))); -+ -+ if (is_ipv6_enabled(ha)) { -+ /* Save IPv6 Address */ -+ ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state; -+ ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state; -+ ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state; -+ ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state; -+ ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE; -+ ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; -+ -+ memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8], init_fw_cb->ipv6_if_id, -+ min(sizeof(ha->ipv6_link_local_addr)/2, sizeof(init_fw_cb->ipv6_if_id))); -+ memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0, -+ min(sizeof(ha->ipv6_addr0), sizeof(init_fw_cb->ipv6_addr0))); -+ memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1, -+ min(sizeof(ha->ipv6_addr1), sizeof(init_fw_cb->ipv6_addr1))); -+ memcpy(&ha->ipv6_default_router_addr, init_fw_cb->ipv6_dflt_rtr_addr, -+ min(sizeof(ha->ipv6_default_router_addr), sizeof(init_fw_cb->ipv6_dflt_rtr_addr))); -+ } -+} -+ -+uint8_t -+qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, -+ uint32_t *mbox_cmd, -+ uint32_t *mbox_sts, -+ struct addr_ctrl_blk *init_fw_cb, -+ dma_addr_t init_fw_cb_dma) -+{ -+ if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma) -+ != QLA_SUCCESS) { -+ DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", -+ ha->host_no, __func__)); -+ return (QLA_ERROR); -+ } -+ -+ DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk))); -+ -+ /* Save some info in adapter structure. */ -+ ha->acb_version = init_fw_cb->acb_version; -+ ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options); -+ ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts); -+ ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts); -+ ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state); -+ ha->heartbeat_interval = init_fw_cb->hb_interval; -+ memcpy(ha->name_string, init_fw_cb->iscsi_name, -+ min(sizeof(ha->name_string), -+ sizeof(init_fw_cb->iscsi_name))); -+ /*memcpy(ha->alias, init_fw_cb->Alias, -+ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ -+ -+ /* Save Command Line Paramater info */ -+ ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout); -+ ha->discovery_wait = ql4xdiscoverywait; -+ -+ if (ha->acb_version == ACB_SUPPORTED) { -+ ha->ipv6_options = init_fw_cb->ipv6_opts; -+ ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; -+ } -+ qla4xxx_update_local_ip(ha, init_fw_cb); -+ -+ return (QLA_SUCCESS); -+} -+ -+/** - * qla4xxx_initialize_fw_cb - initializes firmware control block. - * @ha: Pointer to host adapter structure. - **/ - int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) - { -- struct init_fw_ctrl_blk *init_fw_cb; -+ struct addr_ctrl_blk *init_fw_cb; - dma_addr_t init_fw_cb_dma; - uint32_t mbox_cmd[MBOX_REG_COUNT]; - uint32_t mbox_sts[MBOX_REG_COUNT]; - int status = QLA_ERROR; - -+ /* Default to Legacy IFCB Size */ -+ ha->ifcb_size = LEGACY_IFCB_SIZE; -+ - init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, -- sizeof(struct init_fw_ctrl_blk), -+ sizeof(struct addr_ctrl_blk), - &init_fw_cb_dma, GFP_KERNEL); - if (init_fw_cb == NULL) { - DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", - ha->host_no, __func__)); - return 10; - } -- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk)); -+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); - - /* Get Initialize Firmware Control Block. */ - memset(&mbox_cmd, 0, sizeof(mbox_cmd)); - memset(&mbox_sts, 0, sizeof(mbox_sts)); - -- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; -- mbox_cmd[2] = LSDW(init_fw_cb_dma); -- mbox_cmd[3] = MSDW(init_fw_cb_dma); -- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk); -- -- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) != -- QLA_SUCCESS) { -+ /* -+ * Determine if larger IFCB is supported -+ */ -+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) -+ != QLA_SUCCESS) { - dma_free_coherent(&ha->pdev->dev, -- sizeof(struct init_fw_ctrl_blk), -+ sizeof(struct addr_ctrl_blk), - init_fw_cb, init_fw_cb_dma); -- return status; -+ goto exit_init_fw_cb; -+ } -+ -+ if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR && -+ mbox_sts[4] > LEGACY_IFCB_SIZE) { -+ /* Supports larger ifcb size */ -+ ha->ifcb_size = mbox_sts[4]; - } - - /* Initialize request and response queues. */ - qla4xxx_init_rings(ha); - - /* Fill in the request and response queue information. */ -- init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out); -- init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in); -- init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH); -- init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH); -- init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma)); -- init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma)); -- init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma)); -- init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma)); -- init_fw_cb->pri.shdwreg_addr_lo = -- cpu_to_le32(LSDW(ha->shadow_regs_dma)); -- init_fw_cb->pri.shdwreg_addr_hi = -- cpu_to_le32(MSDW(ha->shadow_regs_dma)); -+ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out); -+ init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in); -+ init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH); -+ init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH); -+ init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma)); -+ init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma)); -+ init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma)); -+ init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma)); -+ init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma)); -+ init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma)); - - /* Set up required options. */ -- init_fw_cb->pri.fw_options |= -+ init_fw_cb->fw_options |= - __constant_cpu_to_le16(FWOPT_SESSION_MODE | - FWOPT_INITIATOR_MODE); -- init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); -- -- /* Save some info in adapter structure. */ -- ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options); -- ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts); -- ha->heartbeat_interval = init_fw_cb->pri.hb_interval; -- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr, -- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr))); -- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet, -- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet))); -- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr, -- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr))); -- memcpy(ha->name_string, init_fw_cb->pri.iscsi_name, -- min(sizeof(ha->name_string), -- sizeof(init_fw_cb->pri.iscsi_name))); -- /*memcpy(ha->alias, init_fw_cb->Alias, -- min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ -- -- /* Save Command Line Paramater info */ -- ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout); -- ha->discovery_wait = ql4xdiscoverywait; -- -- /* Send Initialize Firmware Control Block. */ -- memset(&mbox_cmd, 0, sizeof(mbox_cmd)); -- memset(&mbox_sts, 0, sizeof(mbox_sts)); -+ init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); - -- mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; -- mbox_cmd[1] = 0; -- mbox_cmd[2] = LSDW(init_fw_cb_dma); -- mbox_cmd[3] = MSDW(init_fw_cb_dma); -- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk); -+ if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) -+ != QLA_SUCCESS) { -+ DEBUG2(printk("scsi%ld: %s: Failed to set init_fw_ctrl_blk\n", -+ ha->host_no, __func__)); -+ goto exit_init_fw_cb; -+ } - -- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) == -- QLA_SUCCESS) -- status = QLA_SUCCESS; -- else { -- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE " -- "failed w/ status %04X\n", ha->host_no, __func__, -- mbox_sts[0])); -+ if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], -+ init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) { -+ DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n", -+ ha->host_no, __func__)); -+ goto exit_init_fw_cb; - } -- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk), -- init_fw_cb, init_fw_cb_dma); -+ status = QLA_SUCCESS; -+ -+exit_init_fw_cb: -+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), -+ init_fw_cb, init_fw_cb_dma); - - return status; - } -@@ -284,13 +463,13 @@ int qla4xxx_initialize_fw_cb(struct scsi - **/ - int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha) - { -- struct init_fw_ctrl_blk *init_fw_cb; -+ struct addr_ctrl_blk *init_fw_cb; - dma_addr_t init_fw_cb_dma; - uint32_t mbox_cmd[MBOX_REG_COUNT]; - uint32_t mbox_sts[MBOX_REG_COUNT]; - - init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, -- sizeof(struct init_fw_ctrl_blk), -+ sizeof(struct addr_ctrl_blk), - &init_fw_cb_dma, GFP_KERNEL); - if (init_fw_cb == NULL) { - printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, -@@ -299,35 +478,21 @@ int qla4xxx_get_dhcp_ip_address(struct s - } - - /* Get Initialize Firmware Control Block. */ -- memset(&mbox_cmd, 0, sizeof(mbox_cmd)); -- memset(&mbox_sts, 0, sizeof(mbox_sts)); -- -- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk)); -- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; -- mbox_cmd[2] = LSDW(init_fw_cb_dma); -- mbox_cmd[3] = MSDW(init_fw_cb_dma); -- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk); -- -- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) != -- QLA_SUCCESS) { -+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); -+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) -+ != QLA_SUCCESS) { - DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", - ha->host_no, __func__)); - dma_free_coherent(&ha->pdev->dev, -- sizeof(struct init_fw_ctrl_blk), -+ sizeof(struct addr_ctrl_blk), - init_fw_cb, init_fw_cb_dma); - return QLA_ERROR; - } - - /* Save IP Address. */ -- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr, -- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr))); -- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet, -- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet))); -- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr, -- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr))); -- -- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk), -- init_fw_cb, init_fw_cb_dma); -+ qla4xxx_update_local_ip(ha, init_fw_cb); -+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), -+ init_fw_cb, init_fw_cb_dma); - - return QLA_SUCCESS; - } -@@ -441,14 +606,14 @@ int qla4xxx_get_fwddb_entry(struct scsi_ - goto exit_get_fwddb; - } - if (fw_ddb_entry) { -- dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d " -- "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n", -- fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], -- mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0], -- fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2], -- fw_ddb_entry->ip_addr[3], -- le16_to_cpu(fw_ddb_entry->port), -- fw_ddb_entry->iscsi_name); -+ dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d " -+ "Next %d State %04x ConnErr %08x " NIPQUAD_FMT -+ ":%04d \"%s\"\n", __func__, fw_ddb_index, -+ mbox_sts[0], mbox_sts[2], mbox_sts[3], -+ mbox_sts[4], mbox_sts[5], -+ NIPQUAD(fw_ddb_entry->ip_addr), -+ le16_to_cpu(fw_ddb_entry->port), -+ fw_ddb_entry->iscsi_name); - } - if (num_valid_ddb_entries) - *num_valid_ddb_entries = mbox_sts[2]; -@@ -664,6 +829,53 @@ exit_get_event_log: - } - - /** -+ * qla4xxx_abort_task - issues Abort Task -+ * @ha: Pointer to host adapter structure. -+ * @srb: Pointer to srb entry -+ * -+ * This routine performs a LUN RESET on the specified target/lun. -+ * The caller must ensure that the ddb_entry and lun_entry pointers -+ * are valid before calling this routine. -+ **/ -+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb) -+{ -+ uint32_t mbox_cmd[MBOX_REG_COUNT]; -+ uint32_t mbox_sts[MBOX_REG_COUNT]; -+ struct scsi_cmnd *cmd = srb->cmd; -+ int status = QLA_SUCCESS; -+ -+ DEBUG2(printk("scsi%ld:%d:%d:%d: abort task issued\n", ha->host_no, -+ cmd->device->channel, cmd->device->id, cmd->device->lun)); -+ -+ /* -+ * Send abort task command to ISP, so that the ISP will return -+ * request with ABORT status -+ */ -+ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); -+ memset(&mbox_sts, 0, sizeof(mbox_sts)); -+ -+ mbox_cmd[0] = MBOX_CMD_ABORT_TASK; -+ mbox_cmd[1] = srb->fw_ddb_index; -+ mbox_cmd[2] = (unsigned long)(unsigned char *)cmd->host_scribble; -+ mbox_cmd[5] = 0x01; /* Immediate Command Enable */ -+ -+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]); -+ if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) { -+ status = QLA_ERROR; -+ -+ DEBUG2(printk("scsi%ld:%d:%d:%d: abort task FAILED: ", ha->host_no, -+ cmd->device->channel, cmd->device->id, cmd->device->lun)); -+ DEBUG2(printk("mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n", -+ mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], -+ mbox_sts[4])); -+ } -+ -+ return status; -+} -+ -+ -+ -+/** - * qla4xxx_reset_lun - issues LUN Reset - * @ha: Pointer to host adapter structure. - * @db_entry: Pointer to device database entry -@@ -679,11 +891,33 @@ int qla4xxx_reset_lun(struct scsi_qla_ho - uint32_t mbox_cmd[MBOX_REG_COUNT]; - uint32_t mbox_sts[MBOX_REG_COUNT]; - int status = QLA_SUCCESS; -+ unsigned long wait_online; - - DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no, - ddb_entry->os_target_id, lun)); - - /* -+ * If device is not online wait for 10 sec for device to come online. -+ * else return error -+ */ -+ if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { -+ wait_online = jiffies + (DEVICE_ONLINE_TOV * HZ); -+ while (time_before(jiffies, wait_online)) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule_timeout(HZ); -+ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) -+ break; -+ } -+ -+ if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { -+ DEBUG2(printk("scsi%ld: %s: Unable to reset lun." -+ "Device is not online.\n", ha->host_no -+ , __func__)); -+ return QLA_ERROR; -+ } -+ } -+ -+ /* - * Send lun reset command to ISP, so that the ISP will return all - * outstanding requests with RESET status - */ ---- a/drivers/scsi/qla4xxx/ql4_os.c -+++ b/drivers/scsi/qla4xxx/ql4_os.c -@@ -8,6 +8,8 @@ - - #include - #include -+#include -+#include - - #include "ql4_def.h" - #include "ql4_version.h" -@@ -18,7 +20,18 @@ - /* - * Driver version - */ --static char qla4xxx_version_str[40]; -+char qla4xxx_version_str[40]; -+EXPORT_SYMBOL_GPL(qla4xxx_version_str); -+ -+/* -+ * List of host adapters -+ */ -+struct klist qla4xxx_hostlist; -+ -+struct klist *qla4xxx_hostlist_ptr = &qla4xxx_hostlist; -+EXPORT_SYMBOL_GPL(qla4xxx_hostlist_ptr); -+ -+static atomic_t qla4xxx_hba_count; - - /* - * SRB allocation cache -@@ -73,6 +86,7 @@ static enum blk_eh_timer_return qla4xxx_ - */ - static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, - void (*done) (struct scsi_cmnd *)); -+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); - static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); - static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); - static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); -@@ -87,6 +101,7 @@ static struct scsi_host_template qla4xxx - .proc_name = DRIVER_NAME, - .queuecommand = qla4xxx_queuecommand, - -+ .eh_abort_handler = qla4xxx_eh_abort, - .eh_device_reset_handler = qla4xxx_eh_device_reset, - .eh_target_reset_handler = qla4xxx_eh_target_reset, - .eh_host_reset_handler = qla4xxx_eh_host_reset, -@@ -95,6 +110,7 @@ static struct scsi_host_template qla4xxx - .slave_configure = qla4xxx_slave_configure, - .slave_alloc = qla4xxx_slave_alloc, - .slave_destroy = qla4xxx_slave_destroy, -+ .target_destroy = NULL, - - .scan_finished = iscsi_scan_finished, - .scan_start = qla4xxx_scan_start, -@@ -272,10 +288,8 @@ void qla4xxx_destroy_sess(struct ddb_ent - if (!ddb_entry->sess) - return; - -- if (ddb_entry->conn) { -- atomic_set(&ddb_entry->state, DDB_STATE_DEAD); -+ if (ddb_entry->conn) - iscsi_remove_session(ddb_entry->sess); -- } - iscsi_free_session(ddb_entry->sess); - } - -@@ -370,8 +384,19 @@ void qla4xxx_mark_device_missing(struct - ddb_entry->fw_ddb_index)); - iscsi_block_session(ddb_entry->sess); - iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); -+ set_bit(DF_NO_RELOGIN, &ddb_entry->flags); - } - -+/*** -+ * qla4xxx_get_new_srb - Allocate memory for a local srb. -+ * @ha: Pointer to host adapter structure. -+ * @ddb_entry: Pointer to device database entry -+ * @cmd: Pointer to Linux's SCSI command structure -+ * @done: Pointer to Linux's SCSI mid-layer done function -+ * -+ * NOTE: Sets te ref_count for non-NULL srb to one, -+ * and initializes some fields. -+ **/ - static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, - struct ddb_entry *ddb_entry, - struct scsi_cmnd *cmd, -@@ -417,6 +442,33 @@ void qla4xxx_srb_compl(struct scsi_qla_h - } - - /** -+ * sp_put - Decrement reference count and call callback. -+ * @ha: Pointer to host adapter structure. -+ * @sp: Pointer to srb structure -+ **/ -+void sp_put(struct scsi_qla_host *ha, struct srb *sp) -+{ -+ if (atomic_read(&sp->ref_count) == 0) { -+ DEBUG2(printk("%s: SP->ref_count ZERO\n", __func__)); -+ DEBUG2(BUG()); -+ return; -+ } -+ if (!atomic_dec_and_test(&sp->ref_count)) { -+ return; -+ } -+ qla4xxx_srb_compl(ha, sp); -+} -+ -+/** -+ * sp_get - Increment reference count of the specified sp. -+ * @sp: Pointer to srb structure -+ **/ -+void sp_get(struct srb *sp) -+{ -+ atomic_inc(&sp->ref_count); -+} -+ -+/** - * qla4xxx_queuecommand - scsi layer issues scsi command to driver. - * @cmd: Pointer to Linux's SCSI command structure - * @done_fn: Function that the driver calls to notify the SCSI mid-layer -@@ -451,7 +503,7 @@ static int qla4xxx_queuecommand(struct s - } - - if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { -- if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) { -+ if ((atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { - cmd->result = DID_NO_CONNECT << 16; - goto qc_fail_command; - } -@@ -498,10 +550,24 @@ qc_fail_command: - **/ - static void qla4xxx_mem_free(struct scsi_qla_host *ha) - { -+ struct list_head *ptr; -+ struct async_msg_pdu_iocb *apdu_iocb; -+ - if (ha->queues) - dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, - ha->queues_dma); - -+ if (ha->gen_req_rsp_iocb) -+ dma_free_coherent(&ha->pdev->dev, PAGE_SIZE, -+ ha->gen_req_rsp_iocb, ha->gen_req_rsp_iocb_dma); -+ -+ while (!list_empty(&ha->async_iocb_list)) { -+ ptr = ha->async_iocb_list.next; -+ apdu_iocb = list_entry(ptr, struct async_msg_pdu_iocb, list); -+ list_del_init(&apdu_iocb->list); -+ kfree(apdu_iocb); -+ } -+ - ha->queues_len = 0; - ha->queues = NULL; - ha->queues_dma = 0; -@@ -587,6 +653,15 @@ static int qla4xxx_mem_alloc(struct scsi - goto mem_alloc_error_exit; - } - -+ ha->gen_req_rsp_iocb = dma_alloc_coherent(&ha->pdev->dev, PAGE_SIZE, -+ &ha->gen_req_rsp_iocb_dma, GFP_KERNEL); -+ if (ha->gen_req_rsp_iocb == NULL) { -+ dev_warn(&ha->pdev->dev, -+ "Memory Allocation failed - gen_req_rsp_iocb.\n"); -+ -+ goto mem_alloc_error_exit; -+ } -+ - return QLA_SUCCESS; - - mem_alloc_error_exit: -@@ -605,6 +680,24 @@ static void qla4xxx_timer(struct scsi_ql - - /* Search for relogin's to time-out and port down retry. */ - list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { -+ /* First check to see if the device has exhausted the -+ * port down retry count */ -+ if (atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) { -+ if (atomic_read(&ddb_entry->port_down_timer) == 0) -+ continue; -+ -+ if (atomic_dec_and_test(&ddb_entry->port_down_timer)) { -+ DEBUG2(printk("scsi%ld: %s: index [%d] " -+ "port down retry count of (%d) secs " -+ "exhausted.\n", -+ ha->host_no, __func__, -+ ddb_entry->fw_ddb_index, -+ ha->port_down_retry_count);) -+ -+ atomic_set(&ddb_entry->state, DDB_STATE_DEAD); -+ start_dpc++; -+ } -+ } - /* Count down time between sending relogins */ - if (adapter_up(ha) && - !test_bit(DF_RELOGIN, &ddb_entry->flags) && -@@ -639,7 +732,8 @@ static void qla4xxx_timer(struct scsi_ql - if (atomic_read(&ddb_entry->state) != - DDB_STATE_ONLINE && - ddb_entry->fw_ddb_device_state == -- DDB_DS_SESSION_FAILED) { -+ DDB_DS_SESSION_FAILED && -+ !test_bit(DF_NO_RELOGIN, &ddb_entry->flags)) { - /* Reset retry relogin timer */ - atomic_inc(&ddb_entry->relogin_retry_count); - DEBUG2(printk("scsi%ld: index[%d] relogin" -@@ -684,6 +778,9 @@ static void qla4xxx_timer(struct scsi_ql - test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) || - test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || - test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || -+ test_bit(DPC_REMOVE_DEVICE, &ha->dpc_flags) || -+ test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || -+ test_bit(DPC_ASYNC_MSG_PDU, &ha->dpc_flags) || - test_bit(DPC_AEN, &ha->dpc_flags)) && - ha->dpc_thread) { - DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" -@@ -710,7 +807,6 @@ static int qla4xxx_cmd_wait(struct scsi_ - uint32_t index = 0; - int stat = QLA_SUCCESS; - unsigned long flags; -- struct scsi_cmnd *cmd; - int wait_cnt = WAIT_CMD_TOV; /* - * Initialized for 30 seconds as we - * expect all commands to retuned -@@ -720,9 +816,8 @@ static int qla4xxx_cmd_wait(struct scsi_ - while (wait_cnt) { - spin_lock_irqsave(&ha->hardware_lock, flags); - /* Find a command that hasn't completed. */ -- for (index = 0; index < ha->host->can_queue; index++) { -- cmd = scsi_host_find_tag(ha->host, index); -- if (cmd != NULL) -+ for (index = 1; index < MAX_SRBS; index++) { -+ if (ha->active_srb_array[index] != NULL) - break; - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); -@@ -881,11 +976,11 @@ static void qla4xxx_flush_active_srbs(st - unsigned long flags; - - spin_lock_irqsave(&ha->hardware_lock, flags); -- for (i = 0; i < ha->host->can_queue; i++) { -- srb = qla4xxx_del_from_active_array(ha, i); -- if (srb != NULL) { -+ for (i = 1; i < MAX_SRBS; i++) { -+ if ((srb = ha->active_srb_array[i]) != NULL) { -+ qla4xxx_del_from_active_array(ha, i); - srb->cmd->result = DID_RESET << 16; -- qla4xxx_srb_compl(ha, srb); -+ sp_put(ha, srb); - } - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); -@@ -1000,6 +1095,134 @@ static int qla4xxx_recover_adapter(struc - return status; - } - -+/* -+ * qla4xxx_async_iocbs - processes ASYNC PDU IOCBS, if they are greater in -+ * length than 48 bytes (i.e., more than just the iscsi header). Used for -+ * unsolicited pdus received from target. -+ */ -+static void qla4xxx_async_iocbs(struct scsi_qla_host *ha, -+ struct async_msg_pdu_iocb *amsg_pdu_iocb) -+{ -+ struct iscsi_hdr *hdr; -+ struct async_pdu_iocb *apdu; -+ uint32_t len; -+ void *buf_addr; -+ dma_addr_t buf_addr_dma; -+ uint32_t offset; -+ struct passthru0 *pthru0_iocb; -+ struct ddb_entry *ddb_entry = NULL; -+ ASYNC_PDU_SENSE *pdu_sense; -+ -+ uint8_t using_prealloc = 1; -+ uint8_t async_event_type; -+ -+ apdu = (struct async_pdu_iocb *)amsg_pdu_iocb->iocb; -+ hdr = (struct iscsi_hdr *)apdu->iscsi_pdu_hdr; -+ len = hdr->hlength + hdr->dlength[2] + -+ (hdr->dlength[1]<<8) + (hdr->dlength[0]<<16); -+ -+ offset = sizeof(struct passthru0) + sizeof(struct passthru_status); -+ if (len <= (PAGE_SIZE - offset)) { -+ buf_addr_dma = ha->gen_req_rsp_iocb_dma + offset; -+ buf_addr = (uint8_t *)ha->gen_req_rsp_iocb + offset; -+ } else { -+ using_prealloc = 0; -+ buf_addr = dma_alloc_coherent(&ha->pdev->dev, len, -+ &buf_addr_dma, GFP_KERNEL); -+ if (!buf_addr) { -+ dev_info(&ha->pdev->dev, -+ "%s: dma_alloc_coherent failed\n", __func__); -+ return; -+ } -+ } -+ /* Create the pass-thru0 iocb */ -+ pthru0_iocb = ha->gen_req_rsp_iocb; -+ memset(pthru0_iocb, 0, offset); -+ pthru0_iocb->hdr.entryType = ET_PASSTHRU0; -+ pthru0_iocb->hdr.entryCount = 1; -+ pthru0_iocb->target = cpu_to_le16(apdu->target_id); -+ pthru0_iocb->controlFlags = -+ cpu_to_le16(PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE); -+ pthru0_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT); -+ pthru0_iocb->inDataSeg64.base.addrHigh = -+ cpu_to_le32(MSDW(buf_addr_dma)); -+ pthru0_iocb->inDataSeg64.base.addrLow = -+ cpu_to_le32(LSDW(buf_addr_dma)); -+ pthru0_iocb->inDataSeg64.count = cpu_to_le32(len); -+ pthru0_iocb->async_pdu_handle = cpu_to_le32(apdu->async_pdu_handle); -+ -+ dev_info(&ha->pdev->dev, -+ "%s: qla4xxx_issue_iocb\n", __func__); -+ -+ if (qla4xxx_issue_iocb(ha, sizeof(struct passthru0), -+ ha->gen_req_rsp_iocb_dma) != QLA_SUCCESS) { -+ dev_info(&ha->pdev->dev, -+ "%s: qla4xxx_issue_iocb failed\n", __func__); -+ goto exit_async_pdu_iocb; -+ } -+ -+ async_event_type = ((struct iscsi_async *)hdr)->async_event; -+ pdu_sense = (ASYNC_PDU_SENSE *)buf_addr; -+ -+ switch (async_event_type) { -+ case ISCSI_ASYNC_MSG_SCSI_EVENT: -+ dev_info(&ha->pdev->dev, -+ "%s: async msg event 0x%x processed\n" -+ , __func__, async_event_type); -+ -+ qla4xxx_dump_buffer(buf_addr, len); -+ -+ if (pdu_sense->sense_data[12] == 0x3F) { -+ if (pdu_sense->sense_data[13] == 0x0E) { -+ /* reported luns data has changed */ -+ uint16_t fw_index = apdu->target_id; -+ -+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_index); -+ if (ddb_entry == NULL) { -+ dev_info(&ha->pdev->dev, -+ "%s: No DDB entry for index [%d]\n" -+ , __func__, fw_index); -+ goto exit_async_pdu_iocb; -+ } -+ if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) { -+ dev_info(&ha->pdev->dev, -+ "scsi%ld: %s: No Active Session for index [%d]\n", -+ ha->host_no, __func__, fw_index); -+ goto exit_async_pdu_iocb; -+ } -+ -+ /* report new lun to kernel */ -+ scsi_scan_target(&ddb_entry->sess->dev, 0, -+ ddb_entry->sess->target_id, -+ SCAN_WILD_CARD, 0); -+ } -+ } -+ -+ break; -+ case ISCSI_ASYNC_MSG_REQUEST_LOGOUT: -+ case ISCSI_ASYNC_MSG_DROPPING_CONNECTION: -+ case ISCSI_ASYNC_MSG_DROPPING_ALL_CONNECTIONS: -+ case ISCSI_ASYNC_MSG_PARAM_NEGOTIATION: -+ dev_info(&ha->pdev->dev, -+ "%s: async msg event 0x%x processed\n" -+ , __func__, async_event_type); -+ qla4xxx_conn_close_sess_logout(ha, apdu->target_id, 0, 0); -+ break; -+ default: -+ dev_info(&ha->pdev->dev, -+ "%s: async msg event 0x%x not processed\n", -+ __func__, async_event_type); -+ break; -+ }; -+ -+ exit_async_pdu_iocb: -+ if (!using_prealloc) -+ dma_free_coherent(&ha->pdev->dev, len, -+ buf_addr, buf_addr_dma); -+ -+ return; -+} -+ - /** - * qla4xxx_do_dpc - dpc routine - * @data: in our case pointer to adapter structure -@@ -1016,6 +1239,7 @@ static void qla4xxx_do_dpc(struct work_s - struct scsi_qla_host *ha = - container_of(work, struct scsi_qla_host, dpc_work); - struct ddb_entry *ddb_entry, *dtemp; -+ struct async_msg_pdu_iocb *apdu_iocb, *apdu_iocb_tmp; - int status = QLA_ERROR; - - DEBUG2(printk("scsi%ld: %s: DPC handler waking up." -@@ -1068,13 +1292,16 @@ static void qla4xxx_do_dpc(struct work_s - if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) - qla4xxx_get_dhcp_ip_address(ha); - -+ qla4xxx_remove_device(ha); -+ - /* ---- relogin device? --- */ - if (adapter_up(ha) && - test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { - list_for_each_entry_safe(ddb_entry, dtemp, - &ha->ddb_list, list) { -- if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && -- atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) -+ if ((test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) && -+ (!test_bit(DF_NO_RELOGIN, &ddb_entry->flags)) && -+ (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) - qla4xxx_relogin_device(ha, ddb_entry); - - /* -@@ -1091,6 +1318,29 @@ static void qla4xxx_do_dpc(struct work_s - } - } - } -+ -+ if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { -+ if (!test_bit(AF_LINK_UP, &ha->flags)) { -+ /* ---- link down? --- */ -+ list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { -+ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) -+ qla4xxx_mark_device_missing(ha, ddb_entry); -+ } -+ } -+ } -+ -+ /* Check for ASYNC PDU IOCBs */ -+ if (adapter_up(ha) && -+ test_bit(DPC_ASYNC_MSG_PDU, &ha->dpc_flags)) { -+ -+ list_for_each_entry_safe(apdu_iocb, apdu_iocb_tmp, -+ &ha->async_iocb_list, list) { -+ qla4xxx_async_iocbs(ha, apdu_iocb); -+ list_del_init(&apdu_iocb->list); -+ kfree(apdu_iocb); -+ } -+ clear_bit(DPC_ASYNC_MSG_PDU, &ha->dpc_flags); -+ } - } - - /** -@@ -1244,6 +1494,7 @@ static int __devinit qla4xxx_probe_adapt - /* Initialize lists and spinlocks. */ - INIT_LIST_HEAD(&ha->ddb_list); - INIT_LIST_HEAD(&ha->free_srb_q); -+ INIT_LIST_HEAD(&ha->async_iocb_list); - - mutex_init(&ha->mbox_sem); - -@@ -1319,8 +1570,6 @@ static int __devinit qla4xxx_probe_adapt - /* Start timer thread. */ - qla4xxx_start_timer(ha, qla4xxx_timer, 1); - -- set_bit(AF_INIT_DONE, &ha->flags); -- - pci_set_drvdata(pdev, ha); - - ret = scsi_add_host(host, &pdev->dev); -@@ -1333,9 +1582,27 @@ static int __devinit qla4xxx_probe_adapt - qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), - ha->host_no, ha->firmware_version[0], ha->firmware_version[1], - ha->patch_number, ha->build_number); -+ - scsi_scan_host(host); -+ -+ /* Insert new entry into the list of adapters. */ -+ klist_add_tail(&ha->node, &qla4xxx_hostlist); -+ ha->instance = atomic_inc_return(&qla4xxx_hba_count) - 1; -+ -+ if (qla4xxx_ioctl_init(ha)) { -+ dev_info(&ha->pdev->dev, "ioctl init failed\n"); -+ goto remove_host; -+ } -+ -+ set_bit(AF_INIT_DONE, &ha->flags); -+ dev_info(&ha->pdev->dev, "%s: AF_INIT_DONE\n", __func__); -+ - return 0; - -+remove_host: -+ qla4xxx_free_ddb_list(ha); -+ scsi_remove_host(host); -+ - probe_failed: - qla4xxx_free_adapter(ha); - scsi_host_put(ha->host); -@@ -1361,11 +1628,16 @@ static void __devexit qla4xxx_remove_ada - while (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) - ssleep(1); - -+ klist_remove(&ha->node); -+ atomic_dec(&qla4xxx_hba_count); -+ - /* remove devs from iscsi_sessions to scsi_devices */ - qla4xxx_free_ddb_list(ha); - - scsi_remove_host(ha->host); - -+ qla4xxx_ioctl_exit(ha); -+ - qla4xxx_free_adapter(ha); - - scsi_host_put(ha->host); -@@ -1429,12 +1701,14 @@ static void qla4xxx_slave_destroy(struct - struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index) - { - struct srb *srb = NULL; -- struct scsi_cmnd *cmd; - -- if (!(cmd = scsi_host_find_tag(ha->host, index))) -+ /* validate handle and remove from active array */ -+ if (index >= MAX_SRBS) - return srb; - -- if (!(srb = (struct srb *)cmd->host_scribble)) -+ srb = ha->active_srb_array[index]; -+ ha->active_srb_array[index] = NULL; -+ if (!srb) - return srb; - - /* update counters */ -@@ -1451,16 +1725,20 @@ struct srb * qla4xxx_del_from_active_arr - * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware - * @ha: actual ha whose done queue will contain the comd returned by firmware. - * @cmd: Scsi Command to wait on. -+ * @got_ref: Additional reference retrieved by caller. - * - * This routine waits for the command to be returned by the Firmware - * for some max time. - **/ - static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, -- struct scsi_cmnd *cmd) -+ struct scsi_cmnd *cmd, int got_ref) - { -+#define ABORT_POLLING_PERIOD 1000 -+#define ABORT_WAIT_ITER 1 -+ - int done = 0; - struct srb *rp; -- uint32_t max_wait_time = EH_WAIT_CMD_TOV; -+ unsigned long wait_iter = ABORT_WAIT_ITER; - - do { - /* Checking to see if its returned to OS */ -@@ -1470,8 +1748,13 @@ static int qla4xxx_eh_wait_on_command(st - break; - } - -- msleep(2000); -- } while (max_wait_time--); -+ if (got_ref && (atomic_read(&rp->ref_count) == 1)) { -+ done++; -+ break; -+ } -+ -+ msleep(ABORT_POLLING_PERIOD); -+ } while (!(--wait_iter)); - - return done; - } -@@ -1513,26 +1796,176 @@ static int qla4xxx_eh_wait_for_commands( - { - int cnt; - int status = 0; -+ struct srb *sp; - struct scsi_cmnd *cmd; -+ unsigned long flags; - - /* - * Waiting for all commands for the designated target or dev - * in the active array - */ -- for (cnt = 0; cnt < ha->host->can_queue; cnt++) { -- cmd = scsi_host_find_tag(ha->host, cnt); -- if (cmd && stgt == scsi_target(cmd->device) && -- (!sdev || sdev == cmd->device)) { -- if (!qla4xxx_eh_wait_on_command(ha, cmd)) { -- status++; -- break; -+ for (cnt = 1; cnt < MAX_SRBS; cnt++) { -+ spin_lock_irqsave(&ha->hardware_lock, flags); -+ sp = ha->active_srb_array[cnt]; -+ if (sp) { -+ cmd = sp->cmd; -+ spin_unlock_irqrestore(&ha->hardware_lock, flags); -+ if (cmd && stgt == scsi_target(cmd->device) && -+ (!sdev || sdev == cmd->device)) { -+ if (!qla4xxx_eh_wait_on_command(ha, cmd, 0)) { -+ status++; -+ break; -+ } - } -+ } else { -+ spin_unlock_irqrestore(&ha->hardware_lock, flags); - } - } - return status; - } - - /** -+ * qla4xxx_eh_abort - callback for abort task. -+ * @cmd: Pointer to Linux's SCSI command structure -+ * -+ * This routine is called by the Linux OS to abort the specified -+ * command. -+ **/ -+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) -+{ -+ struct scsi_qla_host *ha; -+ struct srb *srb = NULL; -+ struct ddb_entry *ddb_entry; -+ int ret = SUCCESS; -+ unsigned int channel; -+ unsigned int id; -+ unsigned int lun; -+ unsigned long serial; -+ unsigned long flags = 0; -+ int i = 0; -+ int got_ref = 0; -+ unsigned long wait_online; -+ -+ if (cmd == NULL) { -+ DEBUG2(printk("ABORT - **** SCSI mid-layer passing in NULL cmd\n")); -+ return SUCCESS; -+ } -+ -+ ha = to_qla_host(cmd->device->host); -+ ddb_entry = cmd->device->hostdata; -+ channel = cmd->device->channel; -+ id = cmd->device->id; -+ lun = cmd->device->lun; -+ serial = cmd->serial_number; -+ -+ if (!ddb_entry) { -+ DEBUG2(printk("scsi%ld: ABORT - NULL ddb entry.\n", ha->host_no)); -+ return FAILED; -+ } -+ -+ if (!cmd->SCp.ptr) { -+ DEBUG2(printk("scsi%ld: ABORT - cmd already completed.\n", -+ ha->host_no)); -+ return ret; -+ } -+ -+ -+ -+ srb = (struct srb *) cmd->SCp.ptr; -+ -+ dev_info(&ha->pdev->dev, "scsi%ld:%d:%d:%d: ABORT ISSUED " -+ "cmd=%p, pid=%ld, ref=%d\n", ha->host_no, channel, id, lun, -+ cmd, serial, atomic_read(&srb->ref_count)); -+ -+ if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { -+ DEBUG2(printk("scsi%ld:%d: %s: Unable to abort task. Adapter " -+ "DEAD.\n", ha->host_no, cmd->device->channel -+ , __func__)); -+ -+ return FAILED; -+ } -+ -+ /* Check active list for command */ -+ spin_lock_irqsave(&ha->hardware_lock, flags); -+ for (i = 1; i < MAX_SRBS; i++) { -+ srb = ha->active_srb_array[i]; -+ -+ if (srb == NULL) -+ continue; -+ -+ if (srb->cmd != cmd) -+ continue; -+ -+ DEBUG2(printk("scsi%ld:%d:%d:%d %s: aborting srb %p from RISC. " -+ "pid=%ld.\n", ha->host_no, channel, id, lun, -+ __func__, srb, serial)); -+ DEBUG3(qla4xxx_print_scsi_cmd(cmd)); -+ -+ /* Get a reference to the sp and drop the lock.*/ -+ sp_get(srb); -+ got_ref++; -+ -+ spin_unlock_irqrestore(&ha->hardware_lock, flags); -+ -+ /* -+ * If device is not online wait for 10 sec for device to come online, -+ * else return error and do not issue abort task. -+ */ -+ if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { -+ wait_online = jiffies + (DEVICE_ONLINE_TOV * HZ); -+ while (time_before(jiffies, wait_online)) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule_timeout(HZ); -+ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) -+ break; -+ } -+ if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { -+ DEBUG2(printk("scsi%ld:%d: %s: Unable to abort task." -+ "Device is not online.\n", ha->host_no -+ , cmd->device->channel, __func__)); -+ -+ return FAILED; -+ } -+ } -+ -+ if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { -+ dev_info(&ha->pdev->dev, -+ "scsi%ld:%d:%d:%d: ABORT TASK - FAILED.\n", -+ ha->host_no, channel, id, lun); -+ } else { -+ dev_info(&ha->pdev->dev, -+ "scsi%ld:%d:%d:%d: ABORT TASK - mbx success.\n", -+ ha->host_no, channel, id, lun); -+ } -+ spin_lock_irqsave(&ha->hardware_lock, flags); -+ break; -+ } -+ spin_unlock_irqrestore(&ha->hardware_lock, flags); -+ -+ /* Wait for command to complete */ -+ spin_unlock_irqrestore(&ha->hardware_lock, flags); -+ if (qla4xxx_eh_wait_on_command(ha, cmd, got_ref)) { -+ dev_info(&ha->pdev->dev, -+ "scsi%ld:%d:%d:%d: ABORT SUCCEEDED - " -+ "cmd returned back to OS.\n", -+ ha->host_no, channel, id, lun); -+ ret = SUCCESS; -+ } -+ -+ if (got_ref) -+ sp_put(ha, srb); -+ -+ DEBUG2(printk("scsi%ld:%d:%d:%d: ABORT cmd=%p, pid=%ld, ref=%d, " -+ "ret=%x\n", ha->host_no, channel, id, lun, cmd, -+ serial, atomic_read(&srb->ref_count), ret)); -+ -+ return ret; -+} -+ -+ -+ -+ -+/** - * qla4xxx_eh_device_reset - callback for target reset. - * @cmd: Pointer to Linux's SCSI command structure - * -@@ -1541,16 +1974,34 @@ static int qla4xxx_eh_wait_for_commands( - **/ - static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) - { -- struct scsi_qla_host *ha = to_qla_host(cmd->device->host); -- struct ddb_entry *ddb_entry = cmd->device->hostdata; -+ struct scsi_qla_host *ha; -+ struct ddb_entry *ddb_entry; - int ret = FAILED, stat; -+ struct Scsi_Host *h; -+ unsigned int b, t, l; - -- if (!ddb_entry) -+ if (cmd == NULL) { -+ DEBUG2(printk("%s: **** SCSI mid-layer passing in NULL cmd" -+ "DEVICE RESET - cmd already completed.\n", -+ __func__)); -+ return SUCCESS; -+ } -+ -+ h = cmd->device->host; -+ b = cmd->device->channel; -+ t = cmd->device->id; -+ l = cmd->device->lun; -+ ha = to_qla_host(h); -+ ddb_entry = cmd->device->hostdata; -+ -+ if (!ddb_entry) { -+ DEBUG2(printk("scsi%ld: DEVICE RESET - NULL ddb entry.\n" -+ , ha->host_no)); - return ret; -+ } - -- dev_info(&ha->pdev->dev, -- "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no, -- cmd->device->channel, cmd->device->id, cmd->device->lun); -+ dev_info(&ha->pdev->dev, "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n" -+ , ha->host_no, b, t, l); - - DEBUG2(printk(KERN_INFO - "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," -@@ -1558,8 +2009,13 @@ static int qla4xxx_eh_device_reset(struc - cmd, jiffies, cmd->request->timeout / HZ, - ha->dpc_flags, cmd->result, cmd->allowed)); - -- /* FIXME: wait for hba to go online */ -- stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); -+ /* wait for hba to go online */ -+ if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { -+ dev_info(&ha->pdev->dev, "%s: DEVICE RESET." -+ "Adapter Offline.\n", __func__); -+ return FAILED; -+ } -+ stat = qla4xxx_reset_lun(ha, ddb_entry, l); - if (stat != QLA_SUCCESS) { - dev_info(&ha->pdev->dev, "DEVICE RESET FAILED. %d\n", stat); - goto eh_dev_reset_done; -@@ -1574,14 +2030,13 @@ static int qla4xxx_eh_device_reset(struc - } - - /* Send marker. */ -- if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, -- MM_LUN_RESET) != QLA_SUCCESS) -+ if (qla4xxx_send_marker_iocb(ha, ddb_entry, l, MM_LUN_RESET) -+ != QLA_SUCCESS) - goto eh_dev_reset_done; - - dev_info(&ha->pdev->dev, - "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", -- ha->host_no, cmd->device->channel, cmd->device->id, -- cmd->device->lun); -+ ha->host_no, b, t, l); - - ret = SUCCESS; - -@@ -1655,6 +2110,13 @@ static int qla4xxx_eh_host_reset(struct - int return_status = FAILED; - struct scsi_qla_host *ha; - -+ if (cmd == NULL) { -+ DEBUG2(printk("%s: **** SCSI mid-layer passing in NULL cmd" -+ "HOST RESET - cmd already completed.\n", -+ __func__)); -+ return SUCCESS; -+ } -+ - ha = (struct scsi_qla_host *) cmd->device->host->hostdata; - - dev_info(&ha->pdev->dev, -@@ -1717,6 +2179,9 @@ static int __init qla4xxx_module_init(vo - { - int ret; - -+ atomic_set(&qla4xxx_hba_count, 0); -+ klist_init(&qla4xxx_hostlist, NULL, NULL); -+ - /* Allocate cache for SRBs. */ - srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, - SLAB_HWCACHE_ALIGN, NULL); ---- a/drivers/scsi/qla4xxx/ql4_version.h -+++ b/drivers/scsi/qla4xxx/ql4_version.h -@@ -5,5 +5,5 @@ - * See LICENSE.qla4xxx for copyright and licensing details. - */ - --#define QLA4XXX_DRIVER_VERSION "5.01.00-k9" -+#define QLA4XXX_DRIVER_VERSION "5.01.00.00.11.01-k10" - diff --git a/patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch b/patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch deleted file mode 100644 index dd63298..0000000 --- a/patches.drivers/staging-samsung-laptop-add-support-for-lots-of-laptops.patch +++ /dev/null @@ -1,707 +0,0 @@ -From foo@baz Wed Feb 9 13:35:10 PST 2011 -Date: Wed, 09 Feb 2011 13:35:10 -0800 -To: Greg KH -From: Greg Kroah-Hartman -Subject: Staging: samsung-laptop: add support for lots of laptops -References: bnc#661682 -Patch-mainline: 2.6.39 - -This is a backport of the upstream version of the driver that added support for -all samsung laptop devices. - -Signed-off-by: Greg Kroah-Hartman - -diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c -index 701e8d5..51ec621 100644 ---- a/drivers/staging/samsung-laptop/samsung-laptop.c -+++ b/drivers/staging/samsung-laptop/samsung-laptop.c -@@ -1,5 +1,5 @@ - /* -- * Samsung N130 Laptop driver -+ * Samsung Laptop driver - * - * Copyright (C) 2009 Greg Kroah-Hartman (gregkh@suse.de) - * Copyright (C) 2009 Novell Inc. -@@ -33,51 +33,6 @@ - */ - #define MAX_BRIGHT 0x07 - --/* Brightness is 0 - 8, as described above. Value 0 is for the BIOS to use */ --#define GET_BRIGHTNESS 0x00 --#define SET_BRIGHTNESS 0x01 -- --/* first byte: -- * 0x00 - wireless is off -- * 0x01 - wireless is on -- * second byte: -- * 0x02 - 3G is off -- * 0x03 - 3G is on -- * TODO, verify 3G is correct, that doesn't seem right... -- */ --#define GET_WIRELESS_BUTTON 0x02 --#define SET_WIRELESS_BUTTON 0x03 -- --/* 0 is off, 1 is on */ --#define GET_BACKLIGHT 0x04 --#define SET_BACKLIGHT 0x05 -- --/* -- * 0x80 or 0x00 - no action -- * 0x81 - recovery key pressed -- */ --#define GET_RECOVERY_METHOD 0x06 --#define SET_RECOVERY_METHOD 0x07 -- --/* 0 is low, 1 is high */ --#define GET_PERFORMANCE_LEVEL 0x08 --#define SET_PERFORMANCE_LEVEL 0x09 -- --/* -- * Tell the BIOS that Linux is running on this machine. -- * 81 is on, 80 is off -- */ --#define SET_LINUX 0x0a -- -- --#define MAIN_FUNCTION 0x4c49 -- --#define SABI_HEADER_PORT 0x00 --#define SABI_HEADER_RE_MEM 0x02 --#define SABI_HEADER_IFACEFUNC 0x03 --#define SABI_HEADER_EN_MEM 0x04 --#define SABI_HEADER_DATA_OFFSET 0x05 --#define SABI_HEADER_DATA_SEGMENT 0x07 - - #define SABI_IFACE_MAIN 0x00 - #define SABI_IFACE_SUB 0x02 -@@ -89,6 +44,173 @@ struct sabi_retval { - u8 retval[20]; - }; - -+struct sabi_header_offsets { -+ u8 port; -+ u8 re_mem; -+ u8 iface_func; -+ u8 en_mem; -+ u8 data_offset; -+ u8 data_segment; -+}; -+ -+struct sabi_commands { -+ /* -+ * Brightness is 0 - 8, as described above. -+ * Value 0 is for the BIOS to use -+ */ -+ u8 get_brightness; -+ u8 set_brightness; -+ -+ /* -+ * first byte: -+ * 0x00 - wireless is off -+ * 0x01 - wireless is on -+ * second byte: -+ * 0x02 - 3G is off -+ * 0x03 - 3G is on -+ * TODO, verify 3G is correct, that doesn't seem right... -+ */ -+ u8 get_wireless_button; -+ u8 set_wireless_button; -+ -+ /* 0 is off, 1 is on */ -+ u8 get_backlight; -+ u8 set_backlight; -+ -+ /* -+ * 0x80 or 0x00 - no action -+ * 0x81 - recovery key pressed -+ */ -+ u8 get_recovery_mode; -+ u8 set_recovery_mode; -+ -+ /* -+ * on seclinux: 0 is low, 1 is high, -+ * on swsmi: 0 is normal, 1 is silent, 2 is turbo -+ */ -+ u8 get_performance_level; -+ u8 set_performance_level; -+ -+ /* -+ * Tell the BIOS that Linux is running on this machine. -+ * 81 is on, 80 is off -+ */ -+ u8 set_linux; -+}; -+ -+struct sabi_performance_level { -+ const char *name; -+ u8 value; -+}; -+ -+struct sabi_config { -+ const char *test_string; -+ u16 main_function; -+ struct sabi_header_offsets header_offsets; -+ struct sabi_commands commands; -+ struct sabi_performance_level performance_levels[4]; -+}; -+ -+static struct sabi_config sabi_configs[] = { -+ { -+ .test_string = "SECLINUX", -+ -+ .main_function = 0x4c59, -+ -+ .header_offsets = { -+ .port = 0x00, -+ .re_mem = 0x02, -+ .iface_func = 0x03, -+ .en_mem = 0x04, -+ .data_offset = 0x05, -+ .data_segment = 0x07, -+ }, -+ -+ .commands = { -+ .get_brightness = 0x00, -+ .set_brightness = 0x01, -+ -+ .get_wireless_button = 0x02, -+ .set_wireless_button = 0x03, -+ -+ .get_backlight = 0x04, -+ .set_backlight = 0x05, -+ -+ .get_recovery_mode = 0x06, -+ .set_recovery_mode = 0x07, -+ -+ .get_performance_level = 0x08, -+ .set_performance_level = 0x09, -+ -+ .set_linux = 0x0a, -+ }, -+ -+ .performance_levels = { -+ { -+ .name = "silent", -+ .value = 0, -+ }, -+ { -+ .name = "normal", -+ .value = 1, -+ }, -+ { }, -+ }, -+ }, -+ { -+ .test_string = "SwSmi@", -+ -+ .main_function = 0x5843, -+ -+ .header_offsets = { -+ .port = 0x00, -+ .re_mem = 0x04, -+ .iface_func = 0x02, -+ .en_mem = 0x03, -+ .data_offset = 0x05, -+ .data_segment = 0x07, -+ }, -+ -+ .commands = { -+ .get_brightness = 0x10, -+ .set_brightness = 0x11, -+ -+ .get_wireless_button = 0x12, -+ .set_wireless_button = 0x13, -+ -+ .get_backlight = 0x2d, -+ .set_backlight = 0x2e, -+ -+ .get_recovery_mode = 0xff, -+ .set_recovery_mode = 0xff, -+ -+ .get_performance_level = 0x31, -+ .set_performance_level = 0x32, -+ -+ .set_linux = 0xff, -+ }, -+ -+ .performance_levels = { -+ { -+ .name = "normal", -+ .value = 0, -+ }, -+ { -+ .name = "silent", -+ .value = 1, -+ }, -+ { -+ .name = "overclock", -+ .value = 2, -+ }, -+ { }, -+ }, -+ }, -+ { }, -+}; -+ -+static struct sabi_config *sabi_config; -+ - static void __iomem *sabi; - static void __iomem *sabi_iface; - static void __iomem *f0000_segment; -@@ -109,21 +231,21 @@ MODULE_PARM_DESC(debug, "Debug enabled or not"); - static int sabi_get_command(u8 command, struct sabi_retval *sretval) - { - int retval = 0; -- u16 port = readw(sabi + SABI_HEADER_PORT); -+ u16 port = readw(sabi + sabi_config->header_offsets.port); - - mutex_lock(&sabi_mutex); - - /* enable memory to be able to write to it */ -- outb(readb(sabi + SABI_HEADER_EN_MEM), port); -+ outb(readb(sabi + sabi_config->header_offsets.en_mem), port); - - /* write out the command */ -- writew(MAIN_FUNCTION, sabi_iface + SABI_IFACE_MAIN); -+ writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); - writew(command, sabi_iface + SABI_IFACE_SUB); - writeb(0, sabi_iface + SABI_IFACE_COMPLETE); -- outb(readb(sabi + SABI_HEADER_IFACEFUNC), port); -+ outb(readb(sabi + sabi_config->header_offsets.iface_func), port); - - /* write protect memory to make it safe */ -- outb(readb(sabi + SABI_HEADER_RE_MEM), port); -+ outb(readb(sabi + sabi_config->header_offsets.re_mem), port); - - /* see if the command actually succeeded */ - if (readb(sabi_iface + SABI_IFACE_COMPLETE) == 0xaa && -@@ -156,22 +278,22 @@ exit: - static int sabi_set_command(u8 command, u8 data) - { - int retval = 0; -- u16 port = readw(sabi + SABI_HEADER_PORT); -+ u16 port = readw(sabi + sabi_config->header_offsets.port); - - mutex_lock(&sabi_mutex); - - /* enable memory to be able to write to it */ -- outb(readb(sabi + SABI_HEADER_EN_MEM), port); -+ outb(readb(sabi + sabi_config->header_offsets.en_mem), port); - - /* write out the command */ -- writew(MAIN_FUNCTION, sabi_iface + SABI_IFACE_MAIN); -+ writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); - writew(command, sabi_iface + SABI_IFACE_SUB); - writeb(0, sabi_iface + SABI_IFACE_COMPLETE); - writeb(data, sabi_iface + SABI_IFACE_DATA); -- outb(readb(sabi + SABI_HEADER_IFACEFUNC), port); -+ outb(readb(sabi + sabi_config->header_offsets.iface_func), port); - - /* write protect memory to make it safe */ -- outb(readb(sabi + SABI_HEADER_RE_MEM), port); -+ outb(readb(sabi + sabi_config->header_offsets.re_mem), port); - - /* see if the command actually succeeded */ - if (readb(sabi_iface + SABI_IFACE_COMPLETE) == 0xaa && -@@ -194,21 +316,21 @@ static void test_backlight(void) - { - struct sabi_retval sretval; - -- sabi_get_command(GET_BACKLIGHT, &sretval); -+ sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); - -- sabi_set_command(SET_BACKLIGHT, 0); -+ sabi_set_command(sabi_config->commands.set_backlight, 0); - printk(KERN_DEBUG "backlight should be off\n"); - -- sabi_get_command(GET_BACKLIGHT, &sretval); -+ sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); - - msleep(1000); - -- sabi_set_command(SET_BACKLIGHT, 1); -+ sabi_set_command(sabi_config->commands.set_backlight, 1); - printk(KERN_DEBUG "backlight should be on\n"); - -- sabi_get_command(GET_BACKLIGHT, &sretval); -+ sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); - } - -@@ -216,21 +338,21 @@ static void test_wireless(void) - { - struct sabi_retval sretval; - -- sabi_get_command(GET_WIRELESS_BUTTON, &sretval); -+ sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); - -- sabi_set_command(SET_WIRELESS_BUTTON, 0); -+ sabi_set_command(sabi_config->commands.set_wireless_button, 0); - printk(KERN_DEBUG "wireless led should be off\n"); - -- sabi_get_command(GET_WIRELESS_BUTTON, &sretval); -+ sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); - - msleep(1000); - -- sabi_set_command(SET_WIRELESS_BUTTON, 1); -+ sabi_set_command(sabi_config->commands.set_wireless_button, 1); - printk(KERN_DEBUG "wireless led should be on\n"); - -- sabi_get_command(GET_WIRELESS_BUTTON, &sretval); -+ sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); - } - -@@ -240,7 +362,8 @@ static u8 read_brightness(void) - int user_brightness = 0; - int retval; - -- retval = sabi_get_command(GET_BRIGHTNESS, &sretval); -+ retval = sabi_get_command(sabi_config->commands.get_brightness, -+ &sretval); - if (!retval) - user_brightness = sretval.retval[0]; - if (user_brightness != 0) -@@ -250,7 +373,8 @@ static u8 read_brightness(void) - - static void set_brightness(u8 user_brightness) - { -- sabi_set_command(SET_BRIGHTNESS, user_brightness + 1); -+ sabi_set_command(sabi_config->commands.set_brightness, -+ user_brightness + 1); - } - - static int get_brightness(struct backlight_device *bd) -@@ -263,9 +387,9 @@ static int update_status(struct backlight_device *bd) - set_brightness(bd->props.brightness); - - if (bd->props.power == FB_BLANK_UNBLANK) -- sabi_set_command(SET_BACKLIGHT, 1); -+ sabi_set_command(sabi_config->commands.set_backlight, 1); - else -- sabi_set_command(SET_BACKLIGHT, 0); -+ sabi_set_command(sabi_config->commands.set_backlight, 0); - return 0; - } - -@@ -282,9 +406,9 @@ static int rfkill_set(void *data, bool blocked) - * blocked == true is off - */ - if (blocked) -- sabi_set_command(SET_WIRELESS_BUTTON, 0); -+ sabi_set_command(sabi_config->commands.set_wireless_button, 0); - else -- sabi_set_command(SET_WIRELESS_BUTTON, 1); -+ sabi_set_command(sabi_config->commands.set_wireless_button, 1); - - return 0; - } -@@ -317,47 +441,49 @@ static void destroy_wireless(void) - rfkill_destroy(rfk); - } - --static ssize_t get_silent_state(struct device *dev, -- struct device_attribute *attr, char *buf) -+static ssize_t get_performance_level(struct device *dev, -+ struct device_attribute *attr, char *buf) - { - struct sabi_retval sretval; - int retval; -+ int i; - - /* Read the state */ -- retval = sabi_get_command(GET_PERFORMANCE_LEVEL, &sretval); -+ retval = sabi_get_command(sabi_config->commands.get_performance_level, -+ &sretval); - if (retval) - return retval; - - /* The logic is backwards, yeah, lots of fun... */ -- if (sretval.retval[0] == 0) -- retval = 1; -- else -- retval = 0; -- return sprintf(buf, "%d\n", retval); -+ for (i = 0; sabi_config->performance_levels[i].name; ++i) { -+ if (sretval.retval[0] == sabi_config->performance_levels[i].value) -+ return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); -+ } -+ return sprintf(buf, "%s\n", "unknown"); - } - --static ssize_t set_silent_state(struct device *dev, -+static ssize_t set_performance_level(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) - { -- char value; -- - if (count >= 1) { -- value = buf[0]; -- if ((value == '0') || (value == 'n') || (value == 'N')) { -- /* Turn speed up */ -- sabi_set_command(SET_PERFORMANCE_LEVEL, 0x01); -- } else if ((value == '1') || (value == 'y') || (value == 'Y')) { -- /* Turn speed down */ -- sabi_set_command(SET_PERFORMANCE_LEVEL, 0x00); -- } else { -- return -EINVAL; -+ int i; -+ for (i = 0; sabi_config->performance_levels[i].name; ++i) { -+ struct sabi_performance_level *level = -+ &sabi_config->performance_levels[i]; -+ if (!strncasecmp(level->name, buf, strlen(level->name))) { -+ sabi_set_command(sabi_config->commands.set_performance_level, -+ level->value); -+ break; -+ } - } -+ if (!sabi_config->performance_levels[i].name) -+ return -EINVAL; - } - return count; - } --static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO, -- get_silent_state, set_silent_state); -+static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, -+ get_performance_level, set_performance_level); - - - static int __init dmi_check_cb(const struct dmi_system_id *id) -@@ -388,18 +514,113 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { - }, - .callback = dmi_check_cb, - }, -+ { -+ .ident = "X125", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, -+ "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "X125"), -+ DMI_MATCH(DMI_BOARD_NAME, "X125"), -+ }, -+ .callback = dmi_check_cb, -+ }, -+ { -+ .ident = "NC10", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, -+ "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), -+ DMI_MATCH(DMI_BOARD_NAME, "NC10"), -+ }, -+ .callback = dmi_check_cb, -+ }, -+ { -+ .ident = "NP-Q45", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, -+ "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), -+ DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), -+ }, -+ .callback = dmi_check_cb, -+ }, -+ { -+ .ident = "X360", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, -+ "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "X360"), -+ DMI_MATCH(DMI_BOARD_NAME, "X360"), -+ }, -+ .callback = dmi_check_cb, -+ }, -+ { -+ .ident = "R518", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, -+ "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "R518"), -+ DMI_MATCH(DMI_BOARD_NAME, "R518"), -+ }, -+ .callback = dmi_check_cb, -+ }, -+ { -+ .ident = "N150/N210/N220", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, -+ "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), -+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), -+ }, -+ .callback = dmi_check_cb, -+ }, -+ { -+ .ident = "R530/R730", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), -+ DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), -+ }, -+ .callback = dmi_check_cb, -+ }, -+ { -+ .ident = "NF110/NF210/NF310", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), -+ DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), -+ }, -+ .callback = dmi_check_cb, -+ }, - { }, - }; - MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); - -+static int find_signature(void __iomem *memcheck, const char *testStr) -+{ -+ int i = 0; -+ int loca; -+ -+ for (loca = 0; loca < 0xffff; loca++) { -+ char temp = readb(memcheck + loca); -+ -+ if (temp == testStr[i]) { -+ if (i == strlen(testStr)-1) -+ break; -+ ++i; -+ } else { -+ i = 0; -+ } -+ } -+ return loca; -+} -+ - static int __init samsung_init(void) - { - struct backlight_properties props; - struct sabi_retval sretval; -- const char *testStr = "SECLINUX"; -- void __iomem *memcheck; - unsigned int ifaceP; -- int pStr; -+ int i; - int loca; - int retval; - -@@ -414,50 +635,44 @@ static int __init samsung_init(void) - return -EINVAL; - } - -- /* Try to find the signature "SECLINUX" in memory to find the header */ -- pStr = 0; -- memcheck = f0000_segment; -- for (loca = 0; loca < 0xffff; loca++) { -- char temp = readb(memcheck + loca); -- -- if (temp == testStr[pStr]) { -- if (pStr == strlen(testStr)-1) -- break; -- ++pStr; -- } else { -- pStr = 0; -- } -+ /* Try to find one of the signatures in memory to find the header */ -+ for (i = 0; sabi_configs[i].test_string != 0; ++i) { -+ sabi_config = &sabi_configs[i]; -+ loca = find_signature(f0000_segment, sabi_config->test_string); -+ if (loca != 0xffff) -+ break; - } -+ - if (loca == 0xffff) { - printk(KERN_ERR "This computer does not support SABI\n"); - goto error_no_signature; -- } -+ } - - /* point to the SMI port Number */ - loca += 1; -- sabi = (memcheck + loca); -+ sabi = (f0000_segment + loca); - - if (debug) { - printk(KERN_DEBUG "This computer supports SABI==%x\n", - loca + 0xf0000 - 6); - printk(KERN_DEBUG "SABI header:\n"); - printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", -- readw(sabi + SABI_HEADER_PORT)); -+ readw(sabi + sabi_config->header_offsets.port)); - printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", -- readb(sabi + SABI_HEADER_IFACEFUNC)); -+ readb(sabi + sabi_config->header_offsets.iface_func)); - printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", -- readb(sabi + SABI_HEADER_EN_MEM)); -+ readb(sabi + sabi_config->header_offsets.en_mem)); - printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", -- readb(sabi + SABI_HEADER_RE_MEM)); -+ readb(sabi + sabi_config->header_offsets.re_mem)); - printk(KERN_DEBUG " SABI data offset = 0x%04x\n", -- readw(sabi + SABI_HEADER_DATA_OFFSET)); -+ readw(sabi + sabi_config->header_offsets.data_offset)); - printk(KERN_DEBUG " SABI data segment = 0x%04x\n", -- readw(sabi + SABI_HEADER_DATA_SEGMENT)); -+ readw(sabi + sabi_config->header_offsets.data_segment)); - } - - /* Get a pointer to the SABI Interface */ -- ifaceP = (readw(sabi + SABI_HEADER_DATA_SEGMENT) & 0x0ffff) << 4; -- ifaceP += readw(sabi + SABI_HEADER_DATA_OFFSET) & 0x0ffff; -+ ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; -+ ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; - sabi_iface = ioremap(ifaceP, 16); - if (!sabi_iface) { - printk(KERN_ERR "Can't remap %x\n", ifaceP); -@@ -470,15 +685,19 @@ static int __init samsung_init(void) - test_backlight(); - test_wireless(); - -- retval = sabi_get_command(GET_BRIGHTNESS, &sretval); -+ retval = sabi_get_command(sabi_config->commands.get_brightness, -+ &sretval); - printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); - } - - /* Turn on "Linux" mode in the BIOS */ -- retval = sabi_set_command(SET_LINUX, 0x81); -- if (retval) { -- printk(KERN_ERR KBUILD_MODNAME ": Linux mode was not set!\n"); -- goto error_no_platform; -+ if (sabi_config->commands.set_linux != 0xff) { -+ retval = sabi_set_command(sabi_config->commands.set_linux, -+ 0x81); -+ if (retval) { -+ printk(KERN_ERR KBUILD_MODNAME ": Linux mode was not set!\n"); -+ goto error_no_platform; -+ } - } - - /* knock up a platform device to hang stuff off of */ -@@ -503,7 +722,7 @@ static int __init samsung_init(void) - if (retval) - goto error_no_rfk; - -- retval = device_create_file(&sdev->dev, &dev_attr_silent); -+ retval = device_create_file(&sdev->dev, &dev_attr_performance_level); - if (retval) - goto error_file_create; - -@@ -530,9 +749,10 @@ error_no_signature: - static void __exit samsung_exit(void) - { - /* Turn off "Linux" mode in the BIOS */ -- sabi_set_command(SET_LINUX, 0x80); -+ if (sabi_config->commands.set_linux != 0xff) -+ sabi_set_command(sabi_config->commands.set_linux, 0x80); - -- device_remove_file(&sdev->dev, &dev_attr_silent); -+ device_remove_file(&sdev->dev, &dev_attr_performance_level); - backlight_device_unregister(backlight_device); - destroy_wireless(); - iounmap(sabi_iface); diff --git a/patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch b/patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch deleted file mode 100644 index e356768..0000000 --- a/patches.drivers/tg3-5785-and-57780-asic-revs-not-working.patch +++ /dev/null @@ -1,193 +0,0 @@ -From: Matt Carlson -Subject: tg3: 5785 and 57780 asic revs not working -References: bnc#580780 -Patch-mainline: Never - -There is a known problem with phylib that causes a lot of problems. -Phylib does not load phy modules as it detects devices on the MDIO bus. -If the phylib module gets loaded as a dependancy of tg3, there will be -no opportunity to load the needed broadcom.ko module before tg3 requests -phylib to probe the MDIO bus. The result will be that tg3 will fail to -attach to 5785 and 57780 devices. - -There are several known solutions to this problem. (None of these -should go upstream. The upstream fix should be to get phylib to load -modules for devices it encounters.) Only one of them need be applied. - -1) Statically link in the broadcom.ko module into the kernel. - -2) Add the following to /etc/modprobe.d/local.conf or its equivalent: - -install tg3 /sbin/modprobe broadcom; /sbin/modprobe --ignore-install tg3 - -3) Apply the following patch: - -Signed-off-by: Brandon Philips - ---- - drivers/net/tg3.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ - drivers/net/tg3.h | 9 +++++ - 2 files changed, 92 insertions(+) - ---- a/drivers/net/tg3.c -+++ b/drivers/net/tg3.c -@@ -1998,6 +1998,58 @@ static int tg3_phy_reset(struct tg3 *tp) - tg3_phy_toggle_apd(tp, false); - - out: -+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610 || -+ (tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610M) { -+ u32 reg; -+ -+ /* Enable SM_DSP clock and tx 6dB coding. */ -+ reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | -+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA | -+ MII_TG3_AUXCTL_ACTL_TX_6DB; -+ tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); -+ -+ reg = MII_TG3_DSP_EXP8_REJ2MHz; -+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, reg); -+ -+ /* Apply workaround to A0 revision parts only. */ -+ if (tp->phy_id == TG3_PHY_ID_BCM50610 || -+ tp->phy_id == TG3_PHY_ID_BCM50610M) { -+ tg3_phydsp_write(tp, 0x001F, 0x0300); -+ tg3_phydsp_write(tp, 0x601F, 0x0002); -+ tg3_phydsp_write(tp, 0x0F75, 0x003C); -+ tg3_phydsp_write(tp, 0x0F96, 0x0010); -+ tg3_phydsp_write(tp, 0x0F97, 0x0C0C); -+ } -+ -+ /* Turn off SM_DSP clock. */ -+ reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | -+ MII_TG3_AUXCTL_ACTL_TX_6DB; -+ tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); -+ -+ /* Clear all mode configuration bits. */ -+ reg = MII_TG3_MISC_SHDW_WREN | -+ MII_TG3_MISC_SHDW_RGMII_SEL; -+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); -+ } -+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM57780) { -+ u32 reg; -+ -+ /* Enable SM_DSP clock and tx 6dB coding. */ -+ reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | -+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA | -+ MII_TG3_AUXCTL_ACTL_TX_6DB; -+ tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); -+ -+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_EXP75); -+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, ®); -+ reg |= MII_TG3_DSP_EXP75_SUP_CM_OSC; -+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, reg); -+ -+ /* Turn off SM_DSP clock. */ -+ reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | -+ MII_TG3_AUXCTL_ACTL_TX_6DB; -+ tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); -+ } - if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_phydsp_write(tp, 0x201f, 0x2aaa); -@@ -2054,6 +2106,22 @@ out: - /* adjust output voltage */ - tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); - } -+ else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { -+ u32 brcmtest; -+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &brcmtest) && -+ !tg3_writephy(tp, MII_TG3_FET_TEST, -+ brcmtest | MII_TG3_FET_SHADOW_EN)) { -+ u32 val, reg = MII_TG3_FET_SHDW_AUXMODE4; -+ -+ if (!tg3_readphy(tp, reg, &val)) { -+ val &= ~MII_TG3_FET_SHDW_AM4_LED_MASK; -+ val |= MII_TG3_FET_SHDW_AM4_LED_MODE1; -+ tg3_writephy(tp, reg, val); -+ } -+ -+ tg3_writephy(tp, MII_TG3_FET_TEST, brcmtest); -+ } -+ } - - tg3_phy_toggle_automdix(tp, 1); - tg3_phy_set_wirespeed(tp); -@@ -3288,6 +3356,15 @@ relink: - - tg3_phy_eee_adjust(tp, current_link_up); - -+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { -+ if (tp->link_config.active_speed == SPEED_10) -+ tw32(MAC_MI_STAT, -+ MAC_MI_STAT_10MBPS_MODE | -+ MAC_MI_STAT_LNKSTAT_ATTN_ENAB); -+ else -+ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); -+ } -+ - if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { - /* Polled via timer. */ - tw32_f(MAC_EVENT, 0); -@@ -13411,9 +13488,11 @@ static int __devinit tg3_get_invariants( - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) - tp->coalesce_mode |= HOSTCC_MODE_32BYTE; - -+#if 0 - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; -+#endif - - err = tg3_mdio_init(tp); - if (err) -@@ -14203,6 +14282,10 @@ static char * __devinit tg3_phy_string(s - case TG3_PHY_ID_BCM5718S: return "5718S"; - case TG3_PHY_ID_BCM57765: return "57765"; - case TG3_PHY_ID_BCM5719C: return "5719C"; -+ case TG3_PHY_ID_BCM50610: return "50610"; -+ case TG3_PHY_ID_BCM50610M: return "50610M"; -+ case TG3_PHY_ID_BCMAC131: return "AC131"; -+ case TG3_PHY_ID_BCM57780: return "57780"; - case TG3_PHY_ID_BCM8002: return "8002/serdes"; - case 0: return "serdes"; - default: return "unknown"; ---- a/drivers/net/tg3.h -+++ b/drivers/net/tg3.h -@@ -2072,6 +2072,7 @@ - #define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 - #define MII_TG3_DSP_EXP8_AEDW 0x0200 - #define MII_TG3_DSP_EXP75 0x0f75 -+#define MII_TG3_DSP_EXP75_SUP_CM_OSC 0x0001 - #define MII_TG3_DSP_EXP96 0x0f96 - #define MII_TG3_DSP_EXP97 0x0f97 - -@@ -2127,6 +2128,8 @@ - #define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010 - #define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 - -+#define MII_TG3_MISC_SHDW_RGMII_SEL 0x2c00 -+ - #define MII_TG3_TEST1 0x1e - #define MII_TG3_TEST1_TRIM_EN 0x0010 - #define MII_TG3_TEST1_CRC_EN 0x8000 -@@ -2144,6 +2147,8 @@ - #define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 - - #define MII_TG3_FET_SHDW_AUXMODE4 0x1a -+#define MII_TG3_FET_SHDW_AM4_LED_MODE1 0x0001 -+#define MII_TG3_FET_SHDW_AM4_LED_MASK 0x0003 - #define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008 - - #define MII_TG3_FET_SHDW_AUXSTAT2 0x1b -@@ -2922,6 +2927,10 @@ struct tg3 { - #define TG3_PHY_ID_BCM5719C 0x5c0d8a20 - #define TG3_PHY_ID_BCM5906 0xdc00ac40 - #define TG3_PHY_ID_BCM8002 0x60010140 -+#define TG3_PHY_ID_BCM50610 0xbc050d60 -+#define TG3_PHY_ID_BCM50610M 0xbc050d70 -+#define TG3_PHY_ID_BCMAC131 0xbc050c70 -+#define TG3_PHY_ID_BCM57780 0x5c0d8990 - #define TG3_PHY_ID_INVALID 0xffffffff - - #define PHY_ID_RTL8211C 0x001cc910 diff --git a/patches.drivers/tg3-entropy-source.patch b/patches.drivers/tg3-entropy-source.patch deleted file mode 100644 index 9295f2b..0000000 --- a/patches.drivers/tg3-entropy-source.patch +++ /dev/null @@ -1,61 +0,0 @@ -From: Brandon Philips -Subject: [PATCH] tg3: entropy source -Patch-mainline: never -References: FATE#307517 - -Signed-off-by: Brandon Philips - ---- - drivers/net/tg3.c | 13 +++++++++---- - 1 file changed, 9 insertions(+), 4 deletions(-) - ---- a/drivers/net/tg3.c -+++ b/drivers/net/tg3.c -@@ -15,7 +15,6 @@ - * notice is accompanying it. - */ - -- - #include - #include - #include -@@ -67,6 +66,10 @@ - - #include "tg3.h" - -+static int entropy = 0; -+module_param(entropy, int, 0); -+MODULE_PARM_DESC(entropy, "Allow tg3 to populate the /dev/random entropy pool"); -+ - #define DRV_MODULE_NAME "tg3" - #define TG3_MAJ_NUM 3 - #define TG3_MIN_NUM 116 -@@ -8590,10 +8593,13 @@ restart_timer: - static int tg3_request_irq(struct tg3 *tp, int irq_num) - { - irq_handler_t fn; -- unsigned long flags; -+ unsigned long flags = 0; - char *name; - struct tg3_napi *tnapi = &tp->napi[irq_num]; - -+ if (entropy) -+ flags = IRQF_SAMPLE_RANDOM; -+ - if (tp->irq_cnt == 1) - name = tp->dev->name; - else { -@@ -8606,12 +8612,11 @@ static int tg3_request_irq(struct tg3 *t - fn = tg3_msi; - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) - fn = tg3_msi_1shot; -- flags = IRQF_SAMPLE_RANDOM; - } else { - fn = tg3_interrupt; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) - fn = tg3_interrupt_tagged; -- flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; -+ flags |= IRQF_SHARED; - } - - return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); diff --git a/patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch b/patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch deleted file mode 100644 index 8c6b04d..0000000 --- a/patches.fixes/acpi_ec_sys_access_user_space_with_get_user.patch +++ /dev/null @@ -1,78 +0,0 @@ -From: Vasiliy Kulikov -Subject: acpi: ec_sys: access user space with get_user()/put_user() -Patch-Mainline: hopefully still 2.6.36 -References: none - -User space pointer may not be dereferenced. Use get_user()/put_user() -instead and check their return codes. - -Signed-off-by: Vasiliy Kulikov -Signed-off-by: Thomas Renninger ---- - Compile tested. - - drivers/acpi/ec_sys.c | 18 ++++++++++++++---- - 1 files changed, 14 insertions(+), 4 deletions(-) - -diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c -index 0e869b3..cc007d8 100644 ---- a/drivers/acpi/ec_sys.c -+++ b/drivers/acpi/ec_sys.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include "internal.h" - - MODULE_AUTHOR("Thomas Renninger "); -@@ -43,7 +44,6 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, - * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private; - */ - unsigned int size = EC_SPACE_SIZE; -- u8 *data = (u8 *) buf; - loff_t init_off = *off; - int err = 0; - -@@ -56,9 +56,15 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, - size = count; - - while (size) { -- err = ec_read(*off, &data[*off - init_off]); -+ u8 byte_read; -+ err = ec_read(*off, &byte_read); - if (err) - return err; -+ if (put_user(byte_read, buf + *off - init_off)) { -+ if (*off - init_off) -+ return *off - init_off; /* partial read */ -+ return -EFAULT; -+ } - *off += 1; - size--; - } -@@ -74,7 +80,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, - - unsigned int size = count; - loff_t init_off = *off; -- u8 *data = (u8 *) buf; - int err = 0; - - if (*off >= EC_SPACE_SIZE) -@@ -85,7 +90,12 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, - } - - while (size) { -- u8 byte_write = data[*off - init_off]; -+ u8 byte_write; -+ if (get_user(byte_write, buf + *off - init_off)) { -+ if (*off - init_off) -+ return *off - init_off; /* partial write */ -+ return -EFAULT; -+ } - err = ec_write(*off, byte_write); - if (err) - return err; --- -1.7.0.4 - diff --git a/patches.fixes/aggressive-zone-reclaim.patch b/patches.fixes/aggressive-zone-reclaim.patch deleted file mode 100644 index c0d1c8c..0000000 --- a/patches.fixes/aggressive-zone-reclaim.patch +++ /dev/null @@ -1,67 +0,0 @@ -From: Nick Piggin -Subject: be more aggressive with zone reclaims -References: bnc#476525 -Patch-mainline: no - -The zone reclaim design is not very good for parallel allocations. -The primary problem is that only one thread is allowed to perform -zone-reclaim at a time. If another thread needs memory from that -zone/node, then its zone-reclaim will fail and it will be forced -to fall back to allocating from another zone. - -Additionally, the default zone reclaim priority is insufficient -for massively parallel allocations. Lower ZONE_RECLAIM_PRIORITY -to fix it. This can result in higher latency spikes, but similar -kind of page allocation latency can often be encountered as -normal part of page reclaim when pagecache fills memory. - -Signed-off-by: Petr Tesarik - ---- - mm/vmscan.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -2515,7 +2515,7 @@ int zone_reclaim_mode __read_mostly; - * of a node considered for each zone_reclaim. 4 scans 1/16th of - * a zone. - */ --#define ZONE_RECLAIM_PRIORITY 4 -+#define ZONE_RECLAIM_PRIORITY 0 - - /* - * Percentage of pages in a zone that must be unmapped for zone_reclaim to -@@ -2620,6 +2620,8 @@ static int __zone_reclaim(struct zone *z - - slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); - if (slab_reclaimable > zone->min_slab_pages) { -+ unsigned long lru_pages = zone_reclaimable_pages(zone); -+ - /* - * shrink_slab() does not currently allow us to determine how - * many pages were freed in this zone. So we take the current -@@ -2630,10 +2632,7 @@ static int __zone_reclaim(struct zone *z - * Note that shrink_slab will free memory on all zones and may - * take a long time. - */ -- while (shrink_slab(sc.nr_scanned, gfp_mask, order) && -- zone_page_state(zone, NR_SLAB_RECLAIMABLE) > -- slab_reclaimable - nr_pages) -- ; -+ shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); - - /* - * Update nr_reclaimed by the number of slab pages we -@@ -2687,11 +2686,7 @@ int zone_reclaim(struct zone *zone, gfp_ - if (node_state(node_id, N_CPU) && node_id != numa_node_id()) - return ZONE_RECLAIM_NOSCAN; - -- if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) -- return ZONE_RECLAIM_NOSCAN; -- - ret = __zone_reclaim(zone, gfp_mask, order); -- zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); - - if (!ret) - count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); diff --git a/patches.fixes/bonding-Incorrect-TX-queue-offset.patch b/patches.fixes/bonding-Incorrect-TX-queue-offset.patch deleted file mode 100644 index b0c0980..0000000 --- a/patches.fixes/bonding-Incorrect-TX-queue-offset.patch +++ /dev/null @@ -1,61 +0,0 @@ -From fd0e435b0fe85622f167b84432552885a4856ac8 Mon Sep 17 00:00:00 2001 -From: Phil Oester -Date: Mon, 14 Mar 2011 06:22:04 +0000 -Subject: [PATCH] bonding: Incorrect TX queue offset -Git-commit: fd0e435b0fe85622f167b84432552885a4856ac8 -Patch-mainline: v2.6.39-rc1~468^2~15 -Reference: bnc#687116, CVE-2011-1581 - -When packets come in from a device with >= 16 receive queues -headed out a bonding interface, syslog gets filled with this: - - kernel: bond0 selects TX queue 16, but real number of TX queues is 16 - -because queue_mapping is offset by 1. Adjust return value -to account for the offset. - -This is a revision of my earlier patch (which did not use the -skb_rx_queue_* helpers - thanks to Ben for the suggestion). -Andy submitted a similar patch which emits a pr_warning on -invalid queue selection, but I believe the log spew is -not useful. We can revisit that question in the future, -but in the interim I believe fixing the core problem is -worthwhile. - -Signed-off-by: Phil Oester -Signed-off-by: Andy Gospodarek -Signed-off-by: David S. Miller -Signed-off-by: Brandon Philips - ---- - drivers/net/bonding/bond_main.c | 11 +++++++++-- - 1 files changed, 9 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c -index 3ad4f50..a93d941 100644 ---- a/drivers/net/bonding/bond_main.c -+++ b/drivers/net/bonding/bond_main.c -@@ -4341,11 +4341,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) - { - /* - * This helper function exists to help dev_pick_tx get the correct -- * destination queue. Using a helper function skips the a call to -+ * destination queue. Using a helper function skips a call to - * skb_tx_hash and will put the skbs in the queue we expect on their - * way down to the bonding driver. - */ -- return skb->queue_mapping; -+ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; -+ -+ if (unlikely(txq >= dev->real_num_tx_queues)) { -+ do -+ txq -= dev->real_num_tx_queues; -+ while (txq >= dev->real_num_tx_queues); -+ } -+ return txq; - } - - static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) --- -1.7.3.4 - diff --git a/patches.fixes/bridge-module-get-put.patch b/patches.fixes/bridge-module-get-put.patch deleted file mode 100644 index 53d42a8..0000000 --- a/patches.fixes/bridge-module-get-put.patch +++ /dev/null @@ -1,45 +0,0 @@ -From: jbeulich@novell.com -Subject: Module use count must be updated as bridges are created/destroyed -Patch-mainline: unknown -References: 267651 - -Otherwise 'modprobe -r' on a module having a dependency on bridge will -implicitly unload bridge, bringing down all connectivity that was using -bridges. - ---- - net/bridge/br_if.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - ---- a/net/bridge/br_if.c -+++ b/net/bridge/br_if.c -@@ -291,6 +291,11 @@ int br_add_bridge(struct net *net, const - if (!dev) - return -ENOMEM; - -+ if (!try_module_get(THIS_MODULE)) { -+ free_netdev(dev); -+ return -ENOENT; -+ } -+ - rtnl_lock(); - if (strchr(dev->name, '%')) { - ret = dev_alloc_name(dev, dev->name); -@@ -309,6 +314,8 @@ int br_add_bridge(struct net *net, const - unregister_netdevice(dev); - out: - rtnl_unlock(); -+ if (ret) -+ module_put(THIS_MODULE); - return ret; - - out_free: -@@ -340,6 +347,8 @@ int br_del_bridge(struct net *net, const - del_br(netdev_priv(dev), NULL); - - rtnl_unlock(); -+ if (ret == 0) -+ module_put(THIS_MODULE); - return ret; - } - diff --git a/patches.fixes/cdc-phonet-handle-empty-phonet-header.patch b/patches.fixes/cdc-phonet-handle-empty-phonet-header.patch deleted file mode 100644 index cebf493..0000000 --- a/patches.fixes/cdc-phonet-handle-empty-phonet-header.patch +++ /dev/null @@ -1,74 +0,0 @@ -From 468c3f924f043cad7a04f4f4d5224a2c9bc886c1 Mon Sep 17 00:00:00 2001 -From: Jiri Slaby -Date: Sun, 13 Mar 2011 06:54:31 +0000 -Subject: NET: cdc-phonet, handle empty phonet header -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit -Git-commit: 468c3f924f043cad7a04f4f4d5224a2c9bc886c1 -Patch-mainline: yes -References: bnc#673992 - -Currently, for N 5800 XM I get: -cdc_phonet: probe of 1-6:1.10 failed with error -22 - -It's because phonet_header is empty. Extra altsetting looks like -there: -E 05 24 00 01 10 03 24 ab 05 24 06 0a 0b 04 24 fd .$....$..$....$. -E 00 . - -I don't see the header used anywhere so just check if the phonet -descriptor is there, not the structure itself. - -Signed-off-by: Jiri Slaby -Cc: Rémi Denis-Courmont -Cc: David S. Miller -Acked-by: Rémi Denis-Courmont -Signed-off-by: David S. Miller ---- - drivers/net/usb/cdc-phonet.c | 9 +++------ - 1 files changed, 3 insertions(+), 6 deletions(-) - -diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c -index 4cf4e36..f967913 100644 ---- a/drivers/net/usb/cdc-phonet.c -+++ b/drivers/net/usb/cdc-phonet.c -@@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) - { - static const char ifname[] = "usbpn%d"; - const struct usb_cdc_union_desc *union_header = NULL; -- const struct usb_cdc_header_desc *phonet_header = NULL; - const struct usb_host_interface *data_desc; - struct usb_interface *data_intf; - struct usb_device *usbdev = interface_to_usbdev(intf); - struct net_device *dev; - struct usbpn_dev *pnd; - u8 *data; -+ int phonet = 0; - int len, err; - - data = intf->altsetting->extra; -@@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) - (struct usb_cdc_union_desc *)data; - break; - case 0xAB: -- if (phonet_header || dlen < 5) -- break; -- phonet_header = -- (struct usb_cdc_header_desc *)data; -+ phonet = 1; - break; - } - } -@@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) - len -= dlen; - } - -- if (!union_header || !phonet_header) -+ if (!union_header || !phonet) - return -EINVAL; - - data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); --- -1.7.4.1 - diff --git a/patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch b/patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch deleted file mode 100644 index d30e884..0000000 --- a/patches.fixes/cpufreq_ondemand_performance_optimise_default_settings.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: Thomas Renninger -Subject: CPUFREQ: ondemand: Limit default sampling rate to 300ms max. -References: bnc#464461 -Patch-Mainline: never, SLE11 only - -Modified for SP1 by Jiri Bohac - -HW cpufreq drivers (e.g. all non-acpi AMD) may report too high latency values. -The default sampling rate (how often the ondemand/conservative governor -checks for frequency adjustments) may therefore be much too high, -resulting in performance loss. - -Restrict default sampling rate to 300ms. 333ms sampling rate is field -tested with userspace governors, 300ms should be a fine maximum default -value for the ondemand kernel governor for all HW out there. - -Set default up_threshold to 40 on multi core systems. -This should avoid effects where two CPU intensive threads are waiting on -each other on separate cores. On a single core machine these would all be -processed on one core resulting in higher utilization of the one core. - ---- - drivers/cpufreq/cpufreq_ondemand.c | 24 ++++++++++++++++++++++++ - 1 file changed, 24 insertions(+) - ---- a/drivers/cpufreq/cpufreq_ondemand.c -+++ b/drivers/cpufreq/cpufreq_ondemand.c -@@ -35,6 +35,7 @@ - #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) - #define MIN_FREQUENCY_UP_THRESHOLD (11) - #define MAX_FREQUENCY_UP_THRESHOLD (100) -+#define MAX_DEFAULT_SAMPLING_RATE (300 * 1000U) - - /* - * The polling frequency of this governor depends on the capability of -@@ -736,6 +737,29 @@ static int cpufreq_governor_dbs(struct c - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); -+ /* -+ * Cut def_sampling rate to 300ms if it was above, -+ * still consider to not set it above latency -+ * transition * 100 -+ */ -+ if (dbs_tuners_ins.sampling_rate > MAX_DEFAULT_SAMPLING_RATE) { -+ dbs_tuners_ins.sampling_rate = -+ max(min_sampling_rate, MAX_DEFAULT_SAMPLING_RATE); -+ printk(KERN_INFO "CPUFREQ: ondemand sampling " -+ "rate set to %d ms\n", -+ dbs_tuners_ins.sampling_rate / 1000); -+ } -+ /* -+ * Be conservative in respect to performance. -+ * If an application calculates using two threads -+ * depending on each other, they will be run on several -+ * CPU cores resulting on 50% load on both. -+ * SLED might still want to prefer 80% up_threshold -+ * by default, but we cannot differ that here. -+ */ -+ if (num_online_cpus() > 1) -+ dbs_tuners_ins.up_threshold = -+ DEF_FREQUENCY_UP_THRESHOLD / 2; - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); diff --git a/patches.fixes/dm-mpath-reattach-dh b/patches.fixes/dm-mpath-reattach-dh deleted file mode 100644 index 11a98a3..0000000 --- a/patches.fixes/dm-mpath-reattach-dh +++ /dev/null @@ -1,29 +0,0 @@ -From: Hannes Reinecke -Subject: Reattach device handler for multipath devices -References: bnc#435688 -Patch-mainline: not yet - -The multipath daemon might have specified a different device_handler -than the one a device is attached to by default. -So we should try to re-attach with the user-specified device_handler -and only return an error if that fails. -And we should _not_ detach existing hardware handlers. This will -set the path to failed during failover. - -Signed-off-by: Hannes Reinecke list); -- if (m->hw_handler_name) -- scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); - dm_put_device(ti, pgpath->path.dev); - free_pgpath(pgpath); - } diff --git a/patches.fixes/dm-release-map_lock-before-set_disk_ro b/patches.fixes/dm-release-map_lock-before-set_disk_ro deleted file mode 100644 index 9dca021..0000000 --- a/patches.fixes/dm-release-map_lock-before-set_disk_ro +++ /dev/null @@ -1,37 +0,0 @@ -From: Nikanth Karthikesan -Subject: Release md->map_lock before set_disk_ro -Patch-mainline: No -References: bnc#556899 bnc#479784 - -Signed-off-by: Nikanth Karthikesan - -Calling set_disk_ro() with irqs disabled triggers a warning. - -set_disk_ro() can be called outside the -write_lock_irqsave(&md->map_lock)? And to get the -dm_table_get_mode(md->map), we just need to hold a reference -with dm_get_table() and dm_table_put() - ---- - drivers/md/dm.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -2174,12 +2174,15 @@ static struct dm_table *__bind(struct ma - old_map = md->map; - md->map = t; - dm_table_set_restrictions(t, q, limits); -+ write_unlock_irqrestore(&md->map_lock, flags); -+ -+ dm_table_get(md->map); - if (!(dm_table_get_mode(t) & FMODE_WRITE)) { - set_disk_ro(md->disk, 1); - } else { - set_disk_ro(md->disk, 0); - } -- write_unlock_irqrestore(&md->map_lock, flags); -+ dm_table_put(md->map); - - return old_map; - } diff --git a/patches.fixes/dm-table-switch-to-readonly b/patches.fixes/dm-table-switch-to-readonly deleted file mode 100644 index 4aa14bd..0000000 --- a/patches.fixes/dm-table-switch-to-readonly +++ /dev/null @@ -1,90 +0,0 @@ -From: Hannes Reinecke -Subject: dm multipath devices are not getting created for readonly devices -References: bnc#382705 -Patch-mainline: not yet - -Currently we cannot create device-mapper tables for multipath devices -whenever they are read-only. -This patch modifies the device-mapper to set the 'READ-ONLY' flag -automatically whenever a read-only is added to the table. - -Signed-off-by: Hannes Reinecke - ---- - drivers/md/dm-table.c | 10 +++++++++- - drivers/md/dm.c | 18 ++++++++++++++++-- - 2 files changed, 25 insertions(+), 3 deletions(-) - ---- a/drivers/md/dm-table.c -+++ b/drivers/md/dm-table.c -@@ -465,11 +465,19 @@ static int __table_get_device(struct dm_ - dd->dm_dev.mode = mode; - dd->dm_dev.bdev = NULL; - -- if ((r = open_dev(dd, dev, t->md))) { -+ r = open_dev(dd, dev, t->md); -+ if (r == -EROFS) { -+ dd->dm_dev.mode &= ~FMODE_WRITE; -+ r = open_dev(dd, dev, t->md); -+ } -+ if (r) { - kfree(dd); - return r; - } - -+ if (dd->dm_dev.mode != mode) -+ t->mode = dd->dm_dev.mode; -+ - format_dev_t(dd->dm_dev.name, dev); - - atomic_set(&dd->count, 0); ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -343,16 +343,25 @@ int dm_deleting_md(struct mapped_device - static int dm_blk_open(struct block_device *bdev, fmode_t mode) - { - struct mapped_device *md; -+ int retval = 0; - - spin_lock(&_minor_lock); - - md = bdev->bd_disk->private_data; -- if (!md) -+ if (!md) { -+ retval = -ENXIO; - goto out; -+ } - - if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) { - md = NULL; -+ retval = -ENXIO; -+ goto out; -+ } -+ if (get_disk_ro(md->disk) && (mode & FMODE_WRITE)) { -+ md = NULL; -+ retval = -EROFS; - goto out; - } - -@@ -364,7 +373,7 @@ out: - out: - spin_unlock(&_minor_lock); - -- return md ? 0 : -ENXIO; -+ return retval; - } - - static int dm_blk_close(struct gendisk *disk, fmode_t mode) -@@ -2165,6 +2174,11 @@ static struct dm_table *__bind(struct ma - old_map = md->map; - md->map = t; - dm_table_set_restrictions(t, q, limits); -+ if (!(dm_table_get_mode(t) & FMODE_WRITE)) { -+ set_disk_ro(md->disk, 1); -+ } else { -+ set_disk_ro(md->disk, 0); -+ } - write_unlock_irqrestore(&md->map_lock, flags); - - return old_map; diff --git a/patches.fixes/fix-nf_conntrack_slp b/patches.fixes/fix-nf_conntrack_slp deleted file mode 100644 index a548ac8..0000000 --- a/patches.fixes/fix-nf_conntrack_slp +++ /dev/null @@ -1,63 +0,0 @@ -From: Ludwig Nussel -Subject: make nf_conntrack_slp actually work -References: bnc#470963 -Patch-mainline: not yet, depends on patches.suse/netfilter-ip_conntrack_slp.patch - -Acked-by: Jeff Mahoney ---- - - net/netfilter/nf_conntrack_slp.c | 18 +++++++++++------- - 1 file changed, 11 insertions(+), 7 deletions(-) - ---- a/net/netfilter/nf_conntrack_slp.c -+++ b/net/netfilter/nf_conntrack_slp.c -@@ -47,15 +47,15 @@ static int help(struct sk_buff *skb, uns - struct nf_conn *ct, enum ip_conntrack_info ctinfo) - { - struct nf_conntrack_expect *exp; -- struct iphdr *iph = ip_hdr(skb); - struct rtable *rt = skb_rtable(skb); - struct in_device *in_dev; - __be32 mask = 0; -+ __be32 src = 0; - - /* we're only interested in locally generated packets */ - if (skb->sk == NULL) - goto out; -- if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) -+ if (rt == NULL || !(rt->rt_flags & (RTCF_MULTICAST|RTCF_BROADCAST))) - goto out; - if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) - goto out; -@@ -64,15 +64,18 @@ static int help(struct sk_buff *skb, uns - in_dev = __in_dev_get_rcu(rt->dst.dev); - if (in_dev != NULL) { - for_primary_ifa(in_dev) { -- if (ifa->ifa_broadcast == iph->daddr) { -- mask = ifa->ifa_mask; -- break; -- } -+ /* this is a hack as slp uses multicast we can't match -+ * the destination address to some broadcast address. So -+ * just take the first one. Better would be to install -+ * expectations for all addresses */ -+ mask = ifa->ifa_mask; -+ src = ifa->ifa_broadcast; -+ break; - } endfor_ifa(in_dev); - } - rcu_read_unlock(); - -- if (mask == 0) -+ if (mask == 0 || src == 0) - goto out; - - exp = nf_ct_expect_alloc(ct); -@@ -80,6 +83,7 @@ static int help(struct sk_buff *skb, uns - goto out; - - exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; -+ exp->tuple.src.u3.ip = src; - exp->tuple.src.u.udp.port = htons(SLP_PORT); - - exp->mask.src.u3.ip = mask; diff --git a/patches.fixes/flexcop-fix-registering-braindead-stupid-names b/patches.fixes/flexcop-fix-registering-braindead-stupid-names deleted file mode 100644 index b46e826..0000000 --- a/patches.fixes/flexcop-fix-registering-braindead-stupid-names +++ /dev/null @@ -1,43 +0,0 @@ -From: Kyle McMartin -Subject: flexcop: fix registering braindead stupid names -References: brc#575873 bnc#661429 -Patch-mainline: Unsubmitted by author - - This patch fixes an issue where the flexcop driver passes DRIVER_NAME to - request_irq, which ultimately sets up proc files. The invalid name - contains slashes so the proc file creation fails and we get a WARN_ON. - -Acked-by: Jeff Mahoney ---- - - drivers/media/dvb/b2c2/flexcop-pci.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - ---- a/drivers/media/dvb/b2c2/flexcop-pci.c -+++ b/drivers/media/dvb/b2c2/flexcop-pci.c -@@ -39,6 +39,7 @@ MODULE_PARM_DESC(debug, - - #define DRIVER_VERSION "0.1" - #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" -+#define FLEXCOP_MODULE_NAME "b2c2-flexcop" - #define DRIVER_AUTHOR "Patrick Boettcher " - - struct flexcop_pci { -@@ -299,7 +300,7 @@ static int flexcop_pci_init(struct flexc - return ret; - pci_set_master(fc_pci->pdev); - -- if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0) -+ if ((ret = pci_request_regions(fc_pci->pdev, FLEXCOP_MODULE_NAME)) != 0) - goto err_pci_disable_device; - - fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800); -@@ -313,7 +314,7 @@ static int flexcop_pci_init(struct flexc - pci_set_drvdata(fc_pci->pdev, fc_pci); - spin_lock_init(&fc_pci->irq_lock); - if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr, -- IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0) -+ IRQF_SHARED, FLEXCOP_MODULE_NAME, fc_pci)) != 0) - goto err_pci_iounmap; - - fc_pci->init_state |= FC_PCI_INIT; diff --git a/patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops b/patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops deleted file mode 100644 index b692fe3..0000000 --- a/patches.fixes/fs-partitions-efi-c-corrupted-guid-partition-tables-can-cause-kernel-oops +++ /dev/null @@ -1,54 +0,0 @@ -From: Timo Warns -Subject: fs/partitions/efi.c: corrupted GUID partition tables can cause kernel oops -References: bnc#687113 CVE-2011-1577 -Patch-mainline: Probably 2.6.39; In -mm already - -The kernel automatically evaluates partition tables of storage devices. -The code for evaluating GUID partitions (in fs/partitions/efi.c) contains -a bug that causes a kernel oops on certain corrupted GUID partition -tables. - -This bug has security impacts, because it allows, for example, to -prepare a storage device that crashes a kernel subsystem upon connecting -the device (e.g., a "USB Stick of (Partial) Death"). - - crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size)); - -computes a CRC32 checksum over gpt covering (*gpt)->header_size bytes. -There is no validation of (*gpt)->header_size before the efi_crc32 call. - -A corrupted partition table may have large values for (*gpt)->header_size. - In this case, the CRC32 computation access memory beyond the memory -allocated for gpt, which may cause a kernel heap overflow. - -Validate value of GUID partition table header size. - -Signed-off-by: Timo Warns -Cc: Matt Domsch -Cc: Eugene Teo -Cc: Dave Jones -Signed-off-by: Andrew Morton -Acked-by: Jeff Mahoney ---- - - fs/partitions/efi.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - ---- a/fs/partitions/efi.c -+++ b/fs/partitions/efi.c -@@ -310,6 +310,15 @@ static int is_gpt_valid(struct parsed_pa - goto fail; - } - -+ /* Check the GUID Partition Table header size */ -+ if (le32_to_cpu((*gpt)->header_size) > -+ bdev_logical_block_size(state->bdev)) { -+ pr_debug("GUID Partition Table Header size is wrong: %u > %u\n", -+ le32_to_cpu((*gpt)->header_size), -+ bdev_logical_block_size(state->bdev)); -+ goto fail; -+ } -+ - /* Check the GUID Partition Table CRC */ - origcrc = le32_to_cpu((*gpt)->header_crc32); - (*gpt)->header_crc32 = 0; diff --git a/patches.fixes/grab-swap-token-oops b/patches.fixes/grab-swap-token-oops deleted file mode 100644 index 45031a2..0000000 --- a/patches.fixes/grab-swap-token-oops +++ /dev/null @@ -1,30 +0,0 @@ -From: Dean Roe -Subject: Prevent NULL pointer deref in grab_swap_token -References: 159260 -Patch-mainline: not yet - -grab_swap_token() assumes that the current process has an mm struct, -which is not true for kernel threads invoking get_user_pages(). Since -this should be extremely rare, just return from grab_swap_token() -without doing anything. - -Signed-off-by: Dean Roe -Acked-by: mason@suse.de -Acked-by: okir@suse.de - - - mm/thrash.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/mm/thrash.c -+++ b/mm/thrash.c -@@ -31,6 +31,9 @@ void grab_swap_token(struct mm_struct *m - int current_interval; - - global_faults++; -+ if (mm == NULL) -+ return; -+ - - current_interval = global_faults - mm->faultstamp; - diff --git a/patches.fixes/hfs-avoid-crash-in-hfs_bnode_create b/patches.fixes/hfs-avoid-crash-in-hfs_bnode_create deleted file mode 100644 index 46da711..0000000 --- a/patches.fixes/hfs-avoid-crash-in-hfs_bnode_create +++ /dev/null @@ -1,30 +0,0 @@ -From: Jeff Mahoney -Subject: hfs: avoid crash in hfs_bnode_create -Patch-mainline: not yet -References: bnc#552250 - - Commit 634725a92938b0f282b17cec0b007dca77adebd2 removed the BUG_ON - in hfs_bnode_create in hfsplus. This patch removes it from the hfs - version and avoids an fsfuzzer crash. - -Signed-off-by: Jeff Mahoney -Acked-by: Jeff Mahoney ---- - fs/hfs/bnode.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/fs/hfs/bnode.c -+++ b/fs/hfs/bnode.c -@@ -413,7 +413,11 @@ struct hfs_bnode *hfs_bnode_create(struc - spin_lock(&tree->hash_lock); - node = hfs_bnode_findhash(tree, num); - spin_unlock(&tree->hash_lock); -- BUG_ON(node); -+ if (node) { -+ printk(KERN_CRIT "new node %u already hashed?\n", num); -+ WARN_ON(1); -+ return node; -+ } - node = __hfs_bnode_create(tree, num); - if (!node) - return ERR_PTR(-ENOMEM); diff --git a/patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch b/patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch deleted file mode 100644 index c376108..0000000 --- a/patches.fixes/hid-add-support-for-Skycable-0x3f07-wireless-present.patch +++ /dev/null @@ -1,102 +0,0 @@ -From: Jiri Kosina -Subject: [PATCH] HID: add support for Skycable 0x3f07 wireless presenter -References: bnc#681297 -Patch-mainline: not yet, queued in subsystem tree - -This device contains the very same bug in report descriptor as the -Ortek ones do (i.e. LogicalMinimum == 1, which is wrong for the key -array). - -As we have more reports for the Ortek devices, we are keeping the driver -name for now. Apparently there is a chip producer which sells chip with -this buggy descriptor to multiple vendors. Thus if such reports start -to come at highger frequency, we'll either have to rename the driver -accordingly, or come up with more generic workaround. - -Signed-off-by: Jiri Kosina ---- - drivers/hid/Kconfig | 9 +++++++-- - drivers/hid/hid-core.c | 1 + - drivers/hid/hid-ids.h | 3 +++ - drivers/hid/hid-ortek.c | 15 +++++++++++---- - 4 files changed, 22 insertions(+), 6 deletions(-) - ---- a/drivers/hid/Kconfig -+++ b/drivers/hid/Kconfig -@@ -325,10 +325,15 @@ config HID_NTRIG - Support for N-Trig touch screen. - - config HID_ORTEK -- tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad" -+ tristate "Ortek WKB-2000/Skycable wireless keyboard and mouse trackpad" - depends on USB_HID - ---help--- -- Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad. -+ There are certain devices which have LogicalMaximum wrong in the keyboard -+ usage page of their report descriptor. The most prevailing ones so far -+ are manufactured by Ortek, thus the name of the driver. Currently -+ supported devices by this driver are -+ - Ortek WKB-2000 -+ - Skycable wireless presenter - - config HID_PANTHERLORD - tristate "Pantherlord/GreenAsia game controller" ---- a/drivers/hid/hid-core.c -+++ b/drivers/hid/hid-core.c -@@ -1411,6 +1411,7 @@ static const struct hid_device_id hid_ha - { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, ---- a/drivers/hid/hid-ids.h -+++ b/drivers/hid/hid-ids.h -@@ -514,6 +514,9 @@ - #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 - #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 - -+#define USB_VENDOR_ID_SKYCABLE 0x1223 -+#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 -+ - #define USB_VENDOR_ID_SONY 0x054c - #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b - #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 ---- a/drivers/hid/hid-ortek.c -+++ b/drivers/hid/hid-ortek.c -@@ -1,9 +1,12 @@ --/* -- * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad). -- * Fixes LogicalMaximum error in USB report description, see -- * http://bugzilla.kernel.org/show_bug.cgi?id=14787 -+/* HID driver for various devices which are apparently based on the same chipset -+ * from certain vendor which produces chips that contain wrong LogicalMaximum -+ * value in their HID report descriptor. Currently supported devices are: -+ * -+ * Ortek WKB-2000 -+ * Skycable wireless presenter - * - * Copyright (c) 2010 Johnathon Harris -+ * Copyright (c) 2011 Jiri Kosina - */ - - /* -@@ -25,6 +28,9 @@ static __u8 *ortek_report_fixup(struct h - if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { - hid_info(hdev, "Fixing up Ortek WKB-2000 report descriptor\n"); - rdesc[55] = 0x92; -+ } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { -+ hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n"); -+ rdesc[53] = 0x65; - } - return rdesc; - } -@@ -32,6 +38,7 @@ static __u8 *ortek_report_fixup(struct h - static const struct hid_device_id ortek_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, - { } - }; - MODULE_DEVICE_TABLE(hid, ortek_devices); diff --git a/patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch b/patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch deleted file mode 100644 index e53df70..0000000 --- a/patches.fixes/ia64-configure-HAVE_UNSTABLE_SCHED_CLOCK-for-SGI_SN.patch +++ /dev/null @@ -1,44 +0,0 @@ -Date: Tue, 6 Jan 2009 10:27:41 -0600 -From: Dimitri Sivanich -To: linux-ia64@vger.kernel.org, Tony Luck , - Greg KH -Cc: linux-kernel@vger.kernel.org, - Peter Zijlstra , - Gregory Haskins , Nick Piggin , - Tony Luck , Robin Holt -Subject: configure HAVE_UNSTABLE_SCHED_CLOCK for SGI_SN systems -Patch-mainline: not yet - -Turn on CONFIG_HAVE_UNSTABLE_SCHED_CLOCK for SGI_SN. - -SGI Altix has unsynchronized itc clocks. This results in rq->clock -occasionally being set to a time in the past by a remote cpu. - -Note that it is possible that this problem may exist for other ia64 -machines as well, based on the following comment for sched_clock() in -arch/ia64/kernel/head.S: - - * Return a CPU-local timestamp in nano-seconds. This timestamp is - * NOT synchronized across CPUs its return value must never be - * compared against the values returned on another CPU. The usage in - * kernel/sched.c ensures that. - - -Signed-off-by: Dimitri Sivanich -Signed-off-by: Gregory Haskins - ---- - - arch/ia64/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/ia64/Kconfig -+++ b/arch/ia64/Kconfig -@@ -548,6 +548,7 @@ config IA64_MC_ERR_INJECT - - config SGI_SN - def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) -+ select HAVE_UNSTABLE_SCHED_CLOCK - - config IA64_ESI - bool "ESI (Extensible SAL Interface) support" diff --git a/patches.fixes/ia64-sparse-fixes.diff b/patches.fixes/ia64-sparse-fixes.diff deleted file mode 100644 index 7a7f7a3..0000000 --- a/patches.fixes/ia64-sparse-fixes.diff +++ /dev/null @@ -1,53 +0,0 @@ -From: Jan Blunck -Subject: ia64-kvm: fix sparse warnings -Patch-mainline: not yet - -This patch fixes some sparse warning about dubious one-bit signed bitfield. - -Signed-off-by: Jan Blunck ---- - arch/ia64/kvm/vti.h | 26 +++++++++++++------------- - 1 file changed, 13 insertions(+), 13 deletions(-) - ---- a/arch/ia64/kvm/vti.h -+++ b/arch/ia64/kvm/vti.h -@@ -83,13 +83,13 @@ - union vac { - unsigned long value; - struct { -- int a_int:1; -- int a_from_int_cr:1; -- int a_to_int_cr:1; -- int a_from_psr:1; -- int a_from_cpuid:1; -- int a_cover:1; -- int a_bsw:1; -+ unsigned int a_int:1; -+ unsigned int a_from_int_cr:1; -+ unsigned int a_to_int_cr:1; -+ unsigned int a_from_psr:1; -+ unsigned int a_from_cpuid:1; -+ unsigned int a_cover:1; -+ unsigned int a_bsw:1; - long reserved:57; - }; - }; -@@ -97,12 +97,12 @@ union vac { - union vdc { - unsigned long value; - struct { -- int d_vmsw:1; -- int d_extint:1; -- int d_ibr_dbr:1; -- int d_pmc:1; -- int d_to_pmd:1; -- int d_itm:1; -+ unsigned int d_vmsw:1; -+ unsigned int d_extint:1; -+ unsigned int d_ibr_dbr:1; -+ unsigned int d_pmc:1; -+ unsigned int d_to_pmd:1; -+ unsigned int d_itm:1; - long reserved:58; - }; - }; diff --git a/patches.fixes/input-add-acer-aspire-5710-to-nomux.patch b/patches.fixes/input-add-acer-aspire-5710-to-nomux.patch deleted file mode 100644 index 4baaa65..0000000 --- a/patches.fixes/input-add-acer-aspire-5710-to-nomux.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Jiri Kosina -Subject: Input: Add Acer Aspire 5710 to nomux blacklist -References: bnc#404881 -Patch-mainline: submitted - -Acer Aspire needs to be added to nomux blacklist, otherwise the touchpad -misbehaves. - -Signed-off-by: Jiri Kosina - ---- - drivers/input/serio/i8042-x86ia64io.h | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/drivers/input/serio/i8042-x86ia64io.h -+++ b/drivers/input/serio/i8042-x86ia64io.h -@@ -371,6 +371,13 @@ static const struct dmi_system_id __init - }, - }, - { -+ /* Acer Aspire 5710 */ -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), -+ }, -+ }, -+ { - /* Gericom Bellagio */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), diff --git a/patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices b/patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices deleted file mode 100644 index 36ad6e2..0000000 --- a/patches.fixes/iwlwifi-fix-tx-power-configuration-on-3945-and-4965-devices +++ /dev/null @@ -1,60 +0,0 @@ -From: Reinette Chatre -Date: Tue, 29 Jun 2010 14:24:51 -0700 -Subject: [PATCH] iwlwifi: fix TX power configuration on 3945 and 4965 devices -Patch-mainline: Probably 2.6.35 -References: bnc#619440 bnc#610421 - -When receiving a TX power change request from mac80211 the functions -servicing these requests for 3945 and 4965 uses information in the active -RXON. In iwl_mac_config the staging RXON is prepared based on various -directions from mac80211 and only at the end is the staging RXON committed -to the device and the active RXON updated. - -By servicing the TX power change request while servicing the other requests -that modify the staging RXON we loose the new information provided by mac80211. - -Fix this by servicing the TX power change request after the RXON has been committed -to the device and active RXON thus contains accurate information. - - -21 Jan 2011 jeffm: -This patch may have been superced by 2295c66b68ae160dde2e6e2dc4f3061105153bfc -but needs review. - -Signed-off-by: Reinette Chatre -Acked-by: Jeff Mahoney ---- - drivers/net/wireless/iwlwifi/iwl-core.c | 15 ++++++++------- - 1 file changed, 8 insertions(+), 7 deletions(-) - ---- a/drivers/net/wireless/iwlwifi/iwl-core.c -+++ b/drivers/net/wireless/iwlwifi/iwl-core.c -@@ -2160,13 +2160,6 @@ int iwl_mac_config(struct ieee80211_hw * - IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); - } - -- if (changed & IEEE80211_CONF_CHANGE_POWER) { -- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", -- priv->tx_power_user_lmt, conf->power_level); -- -- iwl_set_tx_power(priv, conf->power_level, false); -- } -- - if (!iwl_is_ready(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - goto out; -@@ -2181,6 +2174,14 @@ int iwl_mac_config(struct ieee80211_hw * - "Not re-sending same RXON configuration.\n"); - } - -+ if (changed & IEEE80211_CONF_CHANGE_POWER) { -+ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", -+ priv->tx_power_user_lmt, conf->power_level); -+ -+ iwl_set_tx_power(priv, conf->power_level, false); -+ } -+ -+ - out: - IWL_DEBUG_MAC80211(priv, "leave\n"); - mutex_unlock(&priv->mutex); diff --git a/patches.fixes/kvm-ioapic.patch b/patches.fixes/kvm-ioapic.patch deleted file mode 100644 index 6993ddb..0000000 --- a/patches.fixes/kvm-ioapic.patch +++ /dev/null @@ -1,21 +0,0 @@ -From: agraf@suse.de -Subject: Ignore apic polarity -Patch-mainline: unknown -References: bnc#556564 - ---- - virt/kvm/ioapic.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/virt/kvm/ioapic.c -+++ b/virt/kvm/ioapic.c -@@ -202,7 +202,8 @@ int kvm_ioapic_set_irq(struct kvm_ioapic - old_irr = ioapic->irr; - if (irq >= 0 && irq < IOAPIC_NUM_PINS) { - entry = ioapic->redirtbl[irq]; -- level ^= entry.fields.polarity; -+// polarity is always active high in qemu -+// level ^= entry.fields.polarity; - if (!level) - ioapic->irr &= ~mask; - else { diff --git a/patches.fixes/kvm-macos.patch b/patches.fixes/kvm-macos.patch deleted file mode 100644 index d6534af..0000000 --- a/patches.fixes/kvm-macos.patch +++ /dev/null @@ -1,73 +0,0 @@ -From: agraf@suse.de -Subject: Implement some missing intercepts so osx doesn't blow up -Patch-mainline: unknown -References: bnc#556564 - ---- - arch/x86/kvm/svm.c | 20 ++++++++++++++++++-- - arch/x86/kvm/x86.c | 4 +++- - 2 files changed, 21 insertions(+), 3 deletions(-) - ---- a/arch/x86/kvm/svm.c -+++ b/arch/x86/kvm/svm.c -@@ -2303,6 +2303,22 @@ static int skinit_interception(struct vc - return 1; - } - -+static int monitor_interception(struct vcpu_svm *svm) -+{ -+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; -+ skip_emulated_instruction(&svm->vcpu); -+ -+ return 1; -+} -+ -+static int mwait_interception(struct vcpu_svm *svm) -+{ -+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; -+ skip_emulated_instruction(&svm->vcpu); -+ -+ return kvm_emulate_halt(&svm->vcpu); -+} -+ - static int invalid_op_interception(struct vcpu_svm *svm) - { - kvm_queue_exception(&svm->vcpu, UD_VECTOR); -@@ -2722,8 +2738,8 @@ static int (*svm_exit_handlers[])(struct - [SVM_EXIT_CLGI] = clgi_interception, - [SVM_EXIT_SKINIT] = skinit_interception, - [SVM_EXIT_WBINVD] = emulate_on_interception, -- [SVM_EXIT_MONITOR] = invalid_op_interception, -- [SVM_EXIT_MWAIT] = invalid_op_interception, -+ [SVM_EXIT_MONITOR] = monitor_interception, -+ [SVM_EXIT_MWAIT] = mwait_interception, - [SVM_EXIT_XSETBV] = xsetbv_interception, - [SVM_EXIT_NPF] = pf_interception, - }; ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -1254,6 +1254,7 @@ int kvm_set_msr_common(struct kvm_vcpu * - case MSR_VM_HSAVE_PA: - case MSR_AMD64_PATCH_LOADER: - break; -+ case 0xe2: - case 0x200 ... 0x2ff: - return set_msr_mtrr(vcpu, msr, data); - case MSR_IA32_APICBASE: -@@ -1512,6 +1513,7 @@ int kvm_get_msr_common(struct kvm_vcpu * - case MSR_K8_INT_PENDING_MSG: - case MSR_AMD64_NB_CFG: - case MSR_FAM10H_MMIO_CONF_BASE: -+ case 0xe2: - data = 0; - break; - case MSR_MTRRcap: -@@ -1985,7 +1987,7 @@ static void do_cpuid_ent(struct kvm_cpui - 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); - /* cpuid 1.ecx */ - const u32 kvm_supported_word4_x86_features = -- F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | -+ F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64 */ | F(MWAIT) | - 0 /* DS-CPL, VMX, SMX, EST */ | - 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | - 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | diff --git a/patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files b/patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files deleted file mode 100644 index 45c1a75..0000000 --- a/patches.fixes/media-video-sn9c102-world-wirtable-sysfs-files +++ /dev/null @@ -1,37 +0,0 @@ -From: Vasiliy Kulikov -Date: Fri, 4 Feb 2011 09:23:33 -0300 -Subject: [media] video: sn9c102: world-wirtable sysfs files -Patch-mainline: v2.6.39-rc2 -Git-commit: 14ddc3188d50855ae2a419a6aced995e2834e5d4 -Introduced-by: v2.6.8-rc2 -References: bnc#673934 - -Don't allow everybody to change video settings. - -Signed-off-by: Vasiliy Kulikov -Acked-by: Mauro Carvalho Chehab -Acked-by: Luca Risolia -Signed-off-by: Mauro Carvalho Chehab -Acked-by: Jeff Mahoney ---- - drivers/media/video/sn9c102/sn9c102_core.c | 6 +++--- - 1 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c -index 84984f6..ce56a1c 100644 ---- a/drivers/media/video/sn9c102/sn9c102_core.c -+++ b/drivers/media/video/sn9c102/sn9c102_core.c -@@ -1430,9 +1430,9 @@ static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR, - sn9c102_show_i2c_reg, sn9c102_store_i2c_reg); - static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR, - sn9c102_show_i2c_val, sn9c102_store_i2c_val); --static DEVICE_ATTR(green, S_IWUGO, NULL, sn9c102_store_green); --static DEVICE_ATTR(blue, S_IWUGO, NULL, sn9c102_store_blue); --static DEVICE_ATTR(red, S_IWUGO, NULL, sn9c102_store_red); -+static DEVICE_ATTR(green, S_IWUSR, NULL, sn9c102_store_green); -+static DEVICE_ATTR(blue, S_IWUSR, NULL, sn9c102_store_blue); -+static DEVICE_ATTR(red, S_IWUSR, NULL, sn9c102_store_red); - static DEVICE_ATTR(frame_header, S_IRUGO, sn9c102_show_frame_header, NULL); - - - diff --git a/patches.fixes/misdn-add-support-for-group-membership-check b/patches.fixes/misdn-add-support-for-group-membership-check deleted file mode 100644 index 29022c1..0000000 --- a/patches.fixes/misdn-add-support-for-group-membership-check +++ /dev/null @@ -1,69 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] mISDN: Add support for group membership check -References: bnc#564423 -Patch-mainline: Unsubmitted, needs upstream consensus - - This patch adds a module parameter to allow a group access to the - mISDN devices. Otherwise, unpriviledged users on systems with ISDN hardware - have the ability to dial out, potentially causing expensive bills. - -Based on a different implementation by Patrick Koppen - -Acked-by: Jeff Mahoney ---- - - drivers/isdn/mISDN/core.c | 3 +++ - drivers/isdn/mISDN/core.h | 1 + - drivers/isdn/mISDN/socket.c | 8 ++++++++ - 3 files changed, 12 insertions(+) - ---- a/drivers/isdn/mISDN/core.c -+++ b/drivers/isdn/mISDN/core.c -@@ -21,10 +21,13 @@ - #include "core.h" - - static u_int debug; -+u_int misdn_permitted_gid; - - MODULE_AUTHOR("Karsten Keil"); - MODULE_LICENSE("GPL"); - module_param(debug, uint, S_IRUGO | S_IWUSR); -+module_param_named(gid, misdn_permitted_gid, uint, 0); -+MODULE_PARM_DESC(gid, "Unix group for accessing misdn socket (default 0)"); - - static u64 device_ids; - #define MAX_DEVICE_ID 63 ---- a/drivers/isdn/mISDN/core.h -+++ b/drivers/isdn/mISDN/core.h -@@ -17,6 +17,7 @@ - - extern struct mISDNdevice *get_mdevice(u_int); - extern int get_mdevice_count(void); -+extern u_int misdn_permitted_gid; - - /* stack status flag */ - #define mISDN_STACK_ACTION_MASK 0x0000ffff ---- a/drivers/isdn/mISDN/socket.c -+++ b/drivers/isdn/mISDN/socket.c -@@ -608,6 +608,10 @@ data_sock_create(struct net *net, struct - { - struct sock *sk; - -+ if(!capable(CAP_SYS_ADMIN) && (misdn_permitted_gid != current_gid()) -+ && (!in_group_p(misdn_permitted_gid))) -+ return -EPERM; -+ - if (sock->type != SOCK_DGRAM) - return -ESOCKTNOSUPPORT; - -@@ -690,6 +694,10 @@ base_sock_ioctl(struct socket *sock, uns - case IMSETDEVNAME: - { - struct mISDN_devrename dn; -+ if(!capable(CAP_SYS_ADMIN) -+ && (misdn_permitted_gid != current_gid()) -+ && (!in_group_p(misdn_permitted_gid))) -+ return -EPERM; - if (copy_from_user(&dn, (void __user *)arg, - sizeof(dn))) { - err = -EFAULT; diff --git a/patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack b/patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack deleted file mode 100644 index c3a3776..0000000 --- a/patches.fixes/netfilter-implement-rfc-1123-for-ftp-conntrack +++ /dev/null @@ -1,104 +0,0 @@ -From: Jeff Mahoney -Subject: netfilter: Implement RFC 1123 for FTP conntrack -References: bnc#466279 -Patch-mainline: Submitted via http://bugzilla.netfilter.org/show_bug.cgi?id=574 23 Jan 2011 - - The FTP conntrack code currently only accepts the following format for - the 227 response for PASV: - 227 Entering Passive Mode (148,100,81,40,31,161). - - It doesn't accept the following format from an obscure server: - 227 Data transfer will passively listen to 67,218,99,134,50,144 - - From RFC 1123: - The format of the 227 reply to a PASV command is not - well standardized. In particular, an FTP client cannot - assume that the parentheses shown on page 40 of RFC-959 - will be present (and in fact, Figure 3 on page 43 omits - them). Therefore, a User-FTP program that interprets - the PASV reply must scan the reply for the first digit - of the host and port numbers. - - This patch adds support for the RFC 1123 clarification by: - - Allowing a search filter to specify NUL as the terminator so that - try_number will return successfully if the array of numbers has been - filled when an unexpected character is encountered. - - Using space as the separator for the 227 reply and then scanning for - the first digit of the number sequence. The number sequence is parsed - out using the existing try_rfc959 but with a NUL terminator. - - Tracked in: https://bugzilla.novell.com/show_bug.cgi?id=466279 - -Reported-by: Mark Post -Signed-off-by: Jeff Mahoney ---- - net/netfilter/nf_conntrack_ftp.c | 36 +++++++++++++++++++++++++++++++----- - 1 file changed, 31 insertions(+), 5 deletions(-) - ---- a/net/netfilter/nf_conntrack_ftp.c -+++ b/net/netfilter/nf_conntrack_ftp.c -@@ -53,6 +53,7 @@ unsigned int (*nf_nat_ftp_hook)(struct s - EXPORT_SYMBOL_GPL(nf_nat_ftp_hook); - - static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char); -+static int try_rfc1123(const char *, size_t, struct nf_conntrack_man *, char); - static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char); - static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *, - char); -@@ -87,10 +88,10 @@ static struct ftp_search { - { - .pattern = "227 ", - .plen = sizeof("227 ") - 1, -- .skip = '(', -- .term = ')', -+ .skip = ' ', -+ .term = '\0', - .ftptype = NF_CT_FTP_PASV, -- .getnum = try_rfc959, -+ .getnum = try_rfc1123, - }, - { - .pattern = "229 ", -@@ -129,8 +130,9 @@ static int try_number(const char *data, - i++; - else { - /* Unexpected character; true if it's the -- terminator and we're finished. */ -- if (*data == term && i == array_size - 1) -+ terminator (or we don't care about one) -+ and we're finished. */ -+ if ((*data == term || !term) && i == array_size - 1) - return len; - - pr_debug("Char %u (got %u nums) `%u' unexpected\n", -@@ -160,6 +162,30 @@ static int try_rfc959(const char *data, - return length; - } - -+/* -+ * From RFC 1123: -+ * The format of the 227 reply to a PASV command is not -+ * well standardized. In particular, an FTP client cannot -+ * assume that the parentheses shown on page 40 of RFC-959 -+ * will be present (and in fact, Figure 3 on page 43 omits -+ * them). Therefore, a User-FTP program that interprets -+ * the PASV reply must scan the reply for the first digit -+ * of the host and port numbers. -+ */ -+static int try_rfc1123(const char *data, size_t dlen, -+ struct nf_conntrack_man *cmd, char term) -+{ -+ int i; -+ for (i = 0; i < dlen; i++) -+ if (isdigit(data[i])) -+ break; -+ -+ if (i == dlen) -+ return 0; -+ -+ return try_rfc959(data + i, dlen - i, cmd, 0); -+} -+ - /* Grab port: number up to delimiter */ - static int get_port(const char *data, int start, size_t dlen, char delim, - __be16 *port) diff --git a/patches.fixes/nfs-acl-caching.diff b/patches.fixes/nfs-acl-caching.diff deleted file mode 100644 index 3121700..0000000 --- a/patches.fixes/nfs-acl-caching.diff +++ /dev/null @@ -1,46 +0,0 @@ -From: Andreas Gruenbacher -Subject: "No acl" entry put in client-side acl cache instead of "not cached" -References: 171059 - -When the acl of a file is not cached and only the default acl of that -file is requested, a NULL "no acl" entry is put in the client-side acl -cache of nfs instead of ERR_PTR(-EAGAIN) "not cached". - -Signed-off-by: Andreas Gruenbacher - -Index: linux-2.6.16/fs/nfs/nfs3acl.c -=================================================================== ---- linux-2.6.16.orig/fs/nfs/nfs3acl.c -+++ linux-2.6.16/fs/nfs/nfs3acl.c -@@ -172,8 +172,10 @@ static void nfs3_cache_acls(struct inode - inode->i_ino, acl, dfacl); - spin_lock(&inode->i_lock); - __nfs3_forget_cached_acls(NFS_I(inode)); -- nfsi->acl_access = posix_acl_dup(acl); -- nfsi->acl_default = posix_acl_dup(dfacl); -+ if (!IS_ERR(acl)) -+ nfsi->acl_access = posix_acl_dup(acl); -+ if (!IS_ERR(dfacl)) -+ nfsi->acl_default = posix_acl_dup(dfacl); - spin_unlock(&inode->i_lock); - } - -@@ -250,7 +252,9 @@ struct posix_acl *nfs3_proc_getacl(struc - res.acl_access = NULL; - } - } -- nfs3_cache_acls(inode, res.acl_access, res.acl_default); -+ nfs3_cache_acls(inode, -+ (res.mask & NFS_ACL) ? res.acl_access : ERR_PTR(-EINVAL), -+ (res.mask & NFS_DFACL) ? res.acl_default : ERR_PTR(-EINVAL)); - - switch(type) { - case ACL_TYPE_ACCESS: -@@ -321,6 +325,7 @@ static int nfs3_proc_setacls(struct inod - switch (status) { - case 0: - status = nfs_refresh_inode(inode, &fattr); -+ nfs3_cache_acls(inode, acl, dfacl); - break; - case -EPFNOSUPPORT: - case -EPROTONOSUPPORT: diff --git a/patches.fixes/nfs-adaptive-readdir-plus b/patches.fixes/nfs-adaptive-readdir-plus deleted file mode 100644 index cc8f410..0000000 --- a/patches.fixes/nfs-adaptive-readdir-plus +++ /dev/null @@ -1,80 +0,0 @@ -From: NeilBrown -Subject: Make selection of 'readdir-plus' adapt to usage patterns. -Patch-mainline: not yet -References: bnc#678123 - -While the use of READDIRPLUS is significantly more efficient than -READDIR followed by many GETATTR calls, it is still less efficient -than just READDIR if the attributes are not required. - -We can get a hint as to whether the application requires attr information -by looking at whether any ->getattr calls are made between -->readdir calls. -If there are any, then getting the attributes seems to be worth while. - -This patch tracks whether there have been recent getattr calls on -children of a directory and uses that information to selectively -disable READDIRPLUS on that directory. - -The first 'readdir' call is always served using READDIRPLUS. -Subsequent calls only use READDIRPLUS if there was a getattr on a child -in the mean time. - -The locking of ->d_parent access needs to be reviewed. -As the bit is simply a hint, it isn't critical that it is set -on the "correct" parent if a rename is happening, but it is -critical that the 'set' doesn't set a bit in something that -isn't even an inode any more. - -Acked-by: NeilBrown -Signed-off-by: Neil Brown - ---- - fs/nfs/dir.c | 3 +++ - fs/nfs/inode.c | 9 +++++++++ - include/linux/nfs_fs.h | 4 ++++ - 3 files changed, 16 insertions(+) - ---- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/dir.c -+++ linux-2.6.37-openSUSE-11.4/fs/nfs/dir.c -@@ -802,6 +802,9 @@ static int nfs_readdir(struct file *filp - desc->dir_cookie = &nfs_file_open_context(filp)->dir_cookie; - desc->decode = NFS_PROTO(inode)->decode_dirent; - desc->plus = NFS_USE_READDIRPLUS(inode); -+ if (filp->f_pos > 0 && !test_bit(NFS_INO_SEEN_GETATTR, &NFS_I(inode)->flags)) -+ desc->plus = 0; -+ clear_bit(NFS_INO_SEEN_GETATTR, &NFS_I(inode)->flags); - - nfs_block_sillyrename(dentry); - res = nfs_revalidate_mapping(inode, filp->f_mapping); ---- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/inode.c -+++ linux-2.6.37-openSUSE-11.4/fs/nfs/inode.c -@@ -500,6 +500,15 @@ int nfs_getattr(struct vfsmount *mnt, st - struct inode *inode = dentry->d_inode; - int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; - int err; -+ struct dentry *p; -+ struct inode *pi; -+ -+ rcu_read_lock(); -+ p = dentry->d_parent; -+ pi = rcu_dereference(p)->d_inode; -+ if (pi && !test_bit(NFS_INO_SEEN_GETATTR, &NFS_I(pi)->flags)) -+ set_bit(NFS_INO_SEEN_GETATTR, &NFS_I(pi)->flags); -+ rcu_read_unlock(); - - /* Flush out writes to the server in order to update c/mtime. */ - if (S_ISREG(inode->i_mode)) { ---- linux-2.6.37-openSUSE-11.4.orig/include/linux/nfs_fs.h -+++ linux-2.6.37-openSUSE-11.4/include/linux/nfs_fs.h -@@ -220,6 +220,10 @@ struct nfs_inode { - #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ - #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ - #define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ -+#define NFS_INO_SEEN_GETATTR (8) /* flag to track if app is calling -+ * getattr in a directory during -+ * readdir -+ */ - - static inline struct nfs_inode *NFS_I(const struct inode *inode) - { diff --git a/patches.fixes/nfs-slot-table-alloc b/patches.fixes/nfs-slot-table-alloc deleted file mode 100644 index 14d3004..0000000 --- a/patches.fixes/nfs-slot-table-alloc +++ /dev/null @@ -1,31 +0,0 @@ -From: Michal Hocko -Subject: Don't fail allocations for the slot table when mounting an NFS filesystem -Patch-mainline: no -References: bnc#519820 - -When the *_slot_table_entries exceeds 111, the slot_table_size -exceeds 32K and an order-4 allocation is forced. This does not -retry nearly as much as order-3 so failure is more likely. -But mount and autofs in particular doesn't cope well with failure. -So force __GFP_REPEAT - the assumption is that people will only -set the slot_table_size sysctl large on a machine with plenty -of memory, so this should not block indefinitely. - -Acked-by: Neil Brown -Signed-off-by: Neil Brown - ---- - net/sunrpc/xprtsock.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/net/sunrpc/xprt.c -+++ b/net/sunrpc/xprt.c -@@ -2266,7 +2266,7 @@ static struct rpc_xprt *xs_setup_xprt(st - kref_init(&xprt->kref); - - xprt->max_reqs = max_req; -- xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); -+ xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL | __GFP_REPEAT); - if (xprt->slot == NULL) - goto out_free; - diff --git a/patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch b/patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch deleted file mode 100644 index a1442a4..0000000 --- a/patches.fixes/nfsd-06-sunrpc-cache-retry-cache-lookups-that-return-ETIMEDO.patch +++ /dev/null @@ -1,127 +0,0 @@ -Patch-mainline: submitted 04aug2009 -References: bnc#498708 -From: NeilBrown -Date: Tue, 4 Aug 2009 15:06:38 +1000 -Subject: [PATCH 08/12] sunrpc/cache: retry cache lookups that return -ETIMEDOUT - -If cache_check returns -ETIMEDOUT, then the cache item is not -up-to-date, but there is no pending upcall. -This could mean the data is not available, or it could mean that the -good data has been stored in a new cache item. - -So re-do the lookup and if that returns a new item, proceed using that -item. - -Signed-off-by: NeilBrown - ---- - fs/nfsd/export.c | 18 ++++++++++++++++++ - net/sunrpc/svcauth_unix.c | 23 ++++++++++++++++++++--- - 2 files changed, 38 insertions(+), 3 deletions(-) - ---- a/fs/nfsd/export.c -+++ b/fs/nfsd/export.c -@@ -794,9 +794,18 @@ exp_find_key(svc_client *clp, int fsid_t - memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); - - ek = svc_expkey_lookup(&key); -+ again: - if (ek == NULL) - return ERR_PTR(-ENOMEM); - err = cache_check(&svc_expkey_cache, &ek->h, reqp); -+ if (err == -ETIMEDOUT) { -+ struct svc_expkey *prev_ek = ek; -+ ek = svc_expkey_lookup(&key); -+ if (ek != prev_ek) -+ goto again; -+ if (ek) -+ cache_put(&ek->h, &svc_expkey_cache); -+ } - if (err) - return ERR_PTR(err); - return ek; -@@ -866,9 +875,18 @@ static svc_export *exp_get_by_name(svc_c - key.ex_path = *path; - - exp = svc_export_lookup(&key); -+ retry: - if (exp == NULL) - return ERR_PTR(-ENOMEM); - err = cache_check(&svc_export_cache, &exp->h, reqp); -+ if (err == -ETIMEDOUT) { -+ struct svc_export *prev_exp = exp; -+ exp = svc_export_lookup(&key); -+ if (exp != prev_exp) -+ goto retry; -+ if (exp) -+ cache_put(&exp->h, &svc_export_cache); -+ } - if (err) - return ERR_PTR(err); - return exp; ---- a/net/sunrpc/svcauth_unix.c -+++ b/net/sunrpc/svcauth_unix.c -@@ -663,13 +663,14 @@ static struct unix_gid *unix_gid_lookup( - - static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp) - { -- struct unix_gid *ug; -+ struct unix_gid *ug, *prevug; - struct group_info *gi; - int ret; - - ug = unix_gid_lookup(uid); - if (!ug) - return ERR_PTR(-EAGAIN); -+retry: - ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle); - switch (ret) { - case -ENOENT: -@@ -678,6 +679,13 @@ static struct group_info *unix_gid_find( - gi = get_group_info(ug->gi); - cache_put(&ug->h, &unix_gid_cache); - return gi; -+ case -ETIMEDOUT: -+ prevug = ug; -+ ug = unix_gid_lookup(uid); -+ if (ug != prevug) -+ goto retry; -+ if (ug) -+ cache_put(&ug->h, &unix_gid_cache); - default: - return ERR_PTR(-EAGAIN); - } -@@ -688,7 +696,7 @@ svcauth_unix_set_client(struct svc_rqst - { - struct sockaddr_in *sin; - struct sockaddr_in6 *sin6, sin6_storage; -- struct ip_map *ipm; -+ struct ip_map *ipm, *prev_ipm; - struct group_info *gi; - struct svc_cred *cred = &rqstp->rq_cred; - -@@ -714,14 +722,23 @@ svcauth_unix_set_client(struct svc_rqst - ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, - &sin6->sin6_addr); - -+ retry: - if (ipm == NULL) - return SVC_DENIED; - - switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { - default: - BUG(); -- case -EAGAIN: - case -ETIMEDOUT: -+ prev_ipm = ipm; -+ ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, -+ &sin6->sin6_addr); -+ if (ipm != prev_ipm) -+ goto retry; -+ if (ipm) -+ cache_put(&ipm->h, &ip_map_cache); -+ -+ case -EAGAIN: - return SVC_DROP; - case -ENOENT: - return SVC_DENIED; diff --git a/patches.fixes/novfs-LFS-initialization b/patches.fixes/novfs-LFS-initialization deleted file mode 100644 index 2945305..0000000 --- a/patches.fixes/novfs-LFS-initialization +++ /dev/null @@ -1,25 +0,0 @@ -From: Sankar P -Subject: fs: novfs: Initialize super-block with standard macros -Patch-mainline: no - -Initialize the super block's maxbytes with MAX_LFS_FILESIZE macro. - -Signed-off-by: Sankar P -Signed-off-by: Samrat Kannikar -Acked-by: Jan Kara - ---- - fs/novfs/inode.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -3825,7 +3825,7 @@ int novfs_fill_super(struct super_block - - SB->s_blocksize = PAGE_CACHE_SIZE; - SB->s_blocksize_bits = PAGE_CACHE_SHIFT; -- SB->s_maxbytes = 0xFFFFFFFFFFFFFFFFULL; /* Max file size */ -+ SB->s_maxbytes = MAX_LFS_FILESIZE; /* Max file size */ - SB->s_op = &novfs_ops; - SB->s_flags |= (MS_NODIRATIME | MS_NODEV | MS_POSIXACL); - SB->s_magic = NOVFS_MAGIC; diff --git a/patches.fixes/novfs-bdi-init.diff b/patches.fixes/novfs-bdi-init.diff deleted file mode 100644 index 2c3e274..0000000 --- a/patches.fixes/novfs-bdi-init.diff +++ /dev/null @@ -1,54 +0,0 @@ -From: Sankar P -Subject: novfs: backing device info initialization -References: bnc#623472 -Patch-mainline: no - -The patch initializes and destroys the backing device info struct -for the novfs properly. Fixes an OOPS as well. - -Acked-by: Jan Kara -Acked-by: Sankar P -Signed-off-by: Anders Johansson - ---- - fs/novfs/inode.c | 15 +++++++++++++++ - 1 file changed, 15 insertions(+) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -3977,6 +3977,17 @@ int __init init_novfs(void) - inHAX = 0; - inHAXTime = get_nanosecond_time(); - -+ retCode = bdi_init(&novfs_backing_dev_info); -+ -+ if(!retCode) -+ retCode = bdi_register(&novfs_backing_dev_info, NULL, "novfs-map"); -+ if (retCode) { -+ bdi_destroy(&novfs_backing_dev_info); -+ goto bdi_fail; -+ } -+ -+ -+ - retCode = novfs_proc_init(); - - novfs_profile_init(); -@@ -3992,6 +4003,8 @@ int __init init_novfs(void) - novfs_scope_exit(); - } - } -+ -+bdi_fail: - return (retCode); - } - -@@ -4007,6 +4020,8 @@ void __exit exit_novfs(void) - kfree(novfs_current_mnt); - novfs_current_mnt = NULL; - } -+ -+ bdi_destroy(&novfs_backing_dev_info); - } - - int novfs_lock_inode_cache(struct inode *i) diff --git a/patches.fixes/novfs-dentry-cache-limit.patch b/patches.fixes/novfs-dentry-cache-limit.patch deleted file mode 100644 index d398dfa..0000000 --- a/patches.fixes/novfs-dentry-cache-limit.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Samrat Kannikar -Subject: novfs: Remove dcache count restricting code -References: bnc#576026 -Patch-mainline: no - -Attached patch removes the code which restricts the -number of dir_cache entries, that are maintained by novfs - -Signed-off-by: Samrat Kannikar -Acked-by: Jan Kara - ---- - fs/novfs/inode.c | 15 --------------- - 1 file changed, 15 deletions(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -4345,8 +4345,6 @@ int novfs_add_inode_entry(struct inode * - struct inode_data *id; - struct novfs_dir_cache *new; - int retVal = -ENOMEM; -- struct novfs_dir_cache *todel; -- struct list_head *todeltmp; - - //SClark - DbgPrint("i: %p", i); -@@ -4382,19 +4380,6 @@ int novfs_add_inode_entry(struct inode * - memcpy(new->name, name->name, name->len); - new->name[new->nameLen] = '\0'; - list_add(&new->list, &id->DirCache); -- -- if (id->cntDC > 20) { -- todeltmp = id->DirCache.prev; -- todel = list_entry(todeltmp, struct novfs_dir_cache, list); -- -- list_del(&todel->list); -- -- kfree(todel); -- -- DCCount--; -- id->cntDC--; -- } -- - } - } - return (retVal); diff --git a/patches.fixes/novfs-err_ptr-fix.diff b/patches.fixes/novfs-err_ptr-fix.diff deleted file mode 100644 index 8bdc17b..0000000 --- a/patches.fixes/novfs-err_ptr-fix.diff +++ /dev/null @@ -1,34 +0,0 @@ -From: Anders Johansson -Subject: Oops in novfs:unlink_local -References: bnc#569071 -Patch-mainline: no - -Signed-off-by: Anders Johansson -Acked-by: Sankar P ---- - fs/novfs/daemon.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - ---- a/fs/novfs/daemon.c -+++ b/fs/novfs/daemon.c -@@ -2046,14 +2046,14 @@ static long local_unlink(const char *pat - } - dentry = lookup_one_len(name, nd.path.dentry, strlen(name)); - error = PTR_ERR(dentry); -- DbgPrint("dentry %p", dentry); -- if (!(dentry->d_inode->i_mode & S_IFLNK)) { -- DbgPrint("%s not a link", name); -- error=-ENOENT; -- goto exit1; -- } - - if (!IS_ERR(dentry)) { -+ DbgPrint("dentry %p", dentry); -+ if (!(dentry->d_inode->i_mode & S_IFLNK)) { -+ DbgPrint("%s not a link", name); -+ error=-ENOENT; -+ goto exit1; -+ } - /* Why not before? Because we want correct error value */ - if (nd.last.name[nd.last.len]) - goto slashes; diff --git a/patches.fixes/novfs-fix-inode-uid b/patches.fixes/novfs-fix-inode-uid deleted file mode 100644 index 440cd22..0000000 --- a/patches.fixes/novfs-fix-inode-uid +++ /dev/null @@ -1,34 +0,0 @@ -From: Sankar P -Subject: novfs: Get proper UID when looking up inode -Patch-mainline: no -References: bnc#486997 - -novfs is incorrectly assigning the rights in the /var/opt/novell/nclmnt -directory. This causes nwlogin mappings to fail. - -Patch below is ported for SLED 11 SP1 and it is already submitted for -OES 2 service packs. - -Ported-by: Sankar P -Signed-off-by: Pary D -Acked-by: Jan Kara - ---- - fs/novfs/inode.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -2760,7 +2760,11 @@ struct dentry *novfs_i_lookup(struct ino - } - - if (!inode && ino) { -- uid = novfs_scope_get_uid(id->Scope); -+ if (id && id->Scope) { -+ uid = novfs_scope_get_uid(id->Scope); -+ } else { -+ uid = novfs_scope_get_uid(novfs_get_scope(dentry)); -+ } - if (novfs_lock_inode_cache(dir)) { - inode = novfs_get_inode (dentry->d_sb, info->mode, 0, uid, ino, &name); - if (inode) { diff --git a/patches.fixes/novfs-fix-oops-in-scope-finding b/patches.fixes/novfs-fix-oops-in-scope-finding deleted file mode 100644 index 2f69bbc..0000000 --- a/patches.fixes/novfs-fix-oops-in-scope-finding +++ /dev/null @@ -1,31 +0,0 @@ -From: Sankar P -Subject: novfs: fix an oops in novfs scope-finding code -References: bnc#588579 -Patch-mainline: no - -This patch removes an attempt to dereference a NULL pointer, -on failed memory allocation. The addscope boolean takes care -of the ScopeUserName field's validity. - -Signed-off-by: Sankar P -Acked-by: Jan Kara ---- - fs/novfs/scope.c | 7 +++---- - 1 file changed, 3 insertions(+), 4 deletions(-) - ---- a/fs/novfs/scope.c -+++ b/fs/novfs/scope.c -@@ -170,10 +170,9 @@ static struct novfs_scope_list *Scope_Fi - kfree(scope); - scope = NULL; - } -- } -- -- if (addscope) { -- novfs_add_to_root(scope->ScopeUserName); -+ -+ if (scope && addscope) -+ novfs_add_to_root(scope->ScopeUserName); - } - } - diff --git a/patches.fixes/novfs-fragment-size-fix.patch b/patches.fixes/novfs-fragment-size-fix.patch deleted file mode 100644 index e17abcb..0000000 --- a/patches.fixes/novfs-fragment-size-fix.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Sankar P -Subject: novfs: NCP Fragments can be upto 64k in size. -References: bnc#625965 -Patch-mainline: No - -Increase the Maximum fragment size declaration, so as to -avoid wrong boundary checks. - -Signed-off-by: Sankar P - -diff --git a/fs/novfs/nwcapi.h b/fs/novfs/nwcapi.h -index 4b6fb99..8cd842a 100644 ---- a/fs/novfs/nwcapi.h -+++ b/fs/novfs/nwcapi.h -@@ -301,7 +301,7 @@ N_EXTERN_LIBRARY(NWRCODE) - #define MIN_NUM_REPLIES 1 - #define MAX_NUM_REQUESTS 4096 - #define MIN_NUM_REQUESTS 1 --#define MAX_FRAG_SIZE 4096 -+#define MAX_FRAG_SIZE 65536 - #define MIN_FRAG_SIZE 1 - #define MAX_INFO_LEN 4096 - #define MAX_DOMAIN_LEN MAX_NETWORK_NAME_LENGTH --- -1.7.3.2 - diff --git a/patches.fixes/novfs-incorrect-filesize-fix b/patches.fixes/novfs-incorrect-filesize-fix deleted file mode 100644 index b548268..0000000 --- a/patches.fixes/novfs-incorrect-filesize-fix +++ /dev/null @@ -1,39 +0,0 @@ -From: Sankar P -Subject: novfs: novfs reports incorrect file size -Patch-mainline: no -References: bnc#426536 - -While updating the inode, make sure that the s_blocks member -is updated to the number of 512 blocks units. - -This fixes the issue of novfs reporting invalid file sizes. - -Signed-off-by: Sankar P -Acked-by: Jan Kara - ---- - fs/novfs/inode.c | 13 +++++++++++-- - 1 file changed, 11 insertions(+), 2 deletions(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -2671,8 +2671,17 @@ void update_inode(struct inode *Inode, s - Inode->i_mtime = Info->mtime; - - if (Inode->i_size && Inode->i_sb->s_blocksize) { -- Inode->i_blocks = -- (unsigned long) (Info->size >> (loff_t) Inode->i_blkbits); -+ -+ /* -+ * Filling number of blocks as in NSS filesystem. -+ * The s_blocksize is initialized to PAGE_CACHE_SIZE in -+ * the super block initialization. -+ * -+ * Update i_blocks to have the number of 512 blocks -+ */ -+ Inode->i_blocks = (((loff_t)Info->size) + Inode->i_sb->s_blocksize - 1) -+ >> (loff_t)Inode->i_blkbits; -+ Inode->i_blocks = Inode->i_blocks << (PAGE_CACHE_SHIFT - 9); - Inode->i_bytes = Info->size & (Inode->i_sb->s_blocksize - 1); - - DbgPrint("i_sb->s_blocksize=%d", Inode->i_sb->s_blocksize); diff --git a/patches.fixes/novfs-lindent b/patches.fixes/novfs-lindent deleted file mode 100644 index d72aa4d..0000000 --- a/patches.fixes/novfs-lindent +++ /dev/null @@ -1,8008 +0,0 @@ -From: Sankar P -Subject: [PATCH] novfs: Lindent novfs sources. -Patch-mainline: never -References: none - -The sources of novfs had numerous coding-style issues. So I ran Lindent on the -sources with just one modification. - -I set the character limit to 132 instead of 80, as it is more readable and -suggested by Linus in a mailing-list. - -Signed-off-by: Sankar P -Acked-by: Jan Kara - ---- - fs/novfs/commands.h | 7 - fs/novfs/daemon.c | 559 +++++++-------------- - fs/novfs/file.c | 478 ++++++------------ - fs/novfs/inode.c | 1355 ++++++++++++++++------------------------------------ - fs/novfs/nwcapi.c | 601 +++++++---------------- - fs/novfs/nwcapi.h | 16 - fs/novfs/nwerror.h | 3 - fs/novfs/proc.c | 18 - fs/novfs/profile.c | 158 +----- - fs/novfs/scope.c | 89 +-- - fs/novfs/vfs.h | 258 +++------ - 11 files changed, 1148 insertions(+), 2394 deletions(-) - ---- a/fs/novfs/commands.h -+++ b/fs/novfs/commands.h -@@ -159,7 +159,6 @@ struct novfs_command_reply_header { - - }; - -- - struct novfs_delete_file_request { - struct novfs_command_request_header Command; - unsigned int isDirectory; -@@ -681,7 +680,6 @@ struct nwd_server_version { - unsigned short int uRevision; - }; - -- - #define MAX_ADDRESS_LENGTH 32 - - struct tagNwdTranAddrEx { -@@ -933,8 +931,7 @@ struct novfs_set_file_lock_reply { - - }; - -- --struct novfs_scope_list{ -+struct novfs_scope_list { - struct list_head ScopeList; - struct novfs_schandle ScopeId; - struct novfs_schandle SessionId; -@@ -952,4 +949,4 @@ struct novfs_scope_list{ - - #pragma pack(pop) - --#endif /* __NOVFS_COMMANDS_H */ -+#endif /* __NOVFS_COMMANDS_H */ ---- a/fs/novfs/daemon.c -+++ b/fs/novfs/daemon.c -@@ -89,17 +89,16 @@ struct drive_map { - char name[1]; - }; - --static void Queue_get(struct daemon_cmd * Que); --static void Queue_put(struct daemon_cmd * Que); -+static void Queue_get(struct daemon_cmd *Que); -+static void Queue_put(struct daemon_cmd *Que); - static void RemoveDriveMaps(void); --static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle); --static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle); -+static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle); -+static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle); - static int set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); - static int unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); - static int NwdGetMountPath(struct novfs_xplat *pdata); - static long local_unlink(const char *pathname); - -- - /*===[ Global variables ]=================================================*/ - static struct daemon_queue Daemon_Queue; - -@@ -130,7 +129,7 @@ void novfs_daemon_queue_exit(void) - /*++======================================================================*/ - static void novfs_daemon_timer(unsigned long data) - { -- struct daemon_cmd *que = (struct daemon_cmd *) data; -+ struct daemon_cmd *que = (struct daemon_cmd *)data; - - if (QUEUE_ACKED != que->status) { - que->status = QUEUE_TIMEOUT; -@@ -140,10 +139,7 @@ static void novfs_daemon_timer(unsigned - - /*++======================================================================*/ - int Queue_Daemon_Command(void *request, -- unsigned long reqlen, -- void *data, -- int dlen, -- void **reply, unsigned long * replen, int interruptible) -+ unsigned long reqlen, void *data, int dlen, void **reply, unsigned long *replen, int interruptible) - { - struct daemon_cmd *que; - int retCode = 0; -@@ -167,15 +163,14 @@ int Queue_Daemon_Command(void *request, - - que->sequence = atomic_inc_return(&Sequence); - -- ((struct novfs_command_request_header *) request)->SequenceNumber = -- que->sequence; -+ ((struct novfs_command_request_header *)request)->SequenceNumber = que->sequence; - - /* - * Setup and start que timer - */ - init_timer(&que->timer); - que->timer.expires = jiffies + (HZ * Daemon_Command_Timeout); -- que->timer.data = (unsigned long) que; -+ que->timer.data = (unsigned long)que; - que->timer.function = novfs_daemon_timer; - add_timer(&que->timer); - -@@ -222,12 +217,9 @@ int Queue_Daemon_Command(void *request, - - if (QUEUE_ACKED == que->status) { - que->status = QUEUE_WAITING; -- mod_timer(&que->timer, -- jiffies + -- (HZ * 2 * Daemon_Command_Timeout)); -+ mod_timer(&que->timer, jiffies + (HZ * 2 * Daemon_Command_Timeout)); - if (interruptible) { -- retCode = -- down_interruptible(&que->semaphore); -+ retCode = down_interruptible(&que->semaphore); - } else { - down(&que->semaphore); - } -@@ -273,13 +265,13 @@ int Queue_Daemon_Command(void *request, - return (retCode); - } - --static void Queue_get(struct daemon_cmd * Que) -+static void Queue_get(struct daemon_cmd *Que) - { - DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference)); - atomic_inc(&Que->reference); - } - --static void Queue_put(struct daemon_cmd * Que) -+static void Queue_put(struct daemon_cmd *Que) - { - - DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference)); -@@ -308,14 +300,14 @@ struct daemon_cmd *get_next_queue(int Se - DbgPrint("que=0x%p", Daemon_Queue.list.next); - - spin_lock(&Daemon_Queue.lock); -- que = (struct daemon_cmd *) Daemon_Queue.list.next; -+ que = (struct daemon_cmd *)Daemon_Queue.list.next; - -- while (que && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) -+ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next) - && (que->status != QUEUE_SENDING)) { -- que = (struct daemon_cmd *) que->list.next; -+ que = (struct daemon_cmd *)que->list.next; - } - -- if ((NULL == que) || (que == (struct daemon_cmd *) & Daemon_Queue.list) -+ if ((NULL == que) || (que == (struct daemon_cmd *)&Daemon_Queue.list) - || (que->status != QUEUE_SENDING)) { - que = NULL; - } else if (Set_Queue_Waiting) { -@@ -339,15 +331,15 @@ static struct daemon_cmd *find_queue(uns - DbgPrint("0x%x", sequence); - - spin_lock(&Daemon_Queue.lock); -- que = (struct daemon_cmd *) Daemon_Queue.list.next; -+ que = (struct daemon_cmd *)Daemon_Queue.list.next; - -- while (que && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) -+ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next) - && (que->sequence != sequence)) { -- que = (struct daemon_cmd *) que->list.next; -+ que = (struct daemon_cmd *)que->list.next; - } - - if ((NULL == que) -- || (que == (struct daemon_cmd *) & Daemon_Queue.list.next) -+ || (que == (struct daemon_cmd *)&Daemon_Queue.list.next) - || (que->sequence != sequence)) { - que = NULL; - } -@@ -364,8 +356,7 @@ static struct daemon_cmd *find_queue(uns - - int novfs_daemon_open_control(struct inode *Inode, struct file *File) - { -- DbgPrint("pid=%d Count=%d", current->pid, -- atomic_read(&Daemon_Open_Count)); -+ DbgPrint("pid=%d Count=%d", current->pid, atomic_read(&Daemon_Open_Count)); - atomic_inc(&Daemon_Open_Count); - - return (0); -@@ -375,8 +366,7 @@ int novfs_daemon_close_control(struct in - { - struct daemon_cmd *que; - -- DbgPrint("pid=%d Count=%d", current->pid, -- atomic_read(&Daemon_Open_Count)); -+ DbgPrint("pid=%d Count=%d", current->pid, atomic_read(&Daemon_Open_Count)); - - if (atomic_dec_and_test(&Daemon_Open_Count)) { - /* -@@ -384,15 +374,14 @@ int novfs_daemon_close_control(struct in - */ - - spin_lock(&Daemon_Queue.lock); -- que = (struct daemon_cmd *) Daemon_Queue.list.next; -+ que = (struct daemon_cmd *)Daemon_Queue.list.next; - -- while (que -- && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) -+ while (que && (que != (struct daemon_cmd *)&Daemon_Queue.list.next) - && (que->status != QUEUE_DONE)) { - que->status = QUEUE_TIMEOUT; - up(&que->semaphore); - -- que = (struct daemon_cmd *) que->list.next; -+ que = (struct daemon_cmd *)que->list.next; - } - spin_unlock(&Daemon_Queue.lock); - -@@ -441,44 +430,29 @@ ssize_t novfs_daemon_cmd_send(struct fil - if (DLREAD == dlist->rwflag) { - bcnt = dlist->len; - DbgPrint("page=0x%p " -- "offset=0x%p len=%d", -- i, dlist->page, -- dlist->offset, dlist->len); -+ "offset=0x%p len=%d", i, dlist->page, dlist->offset, dlist->len); - if ((bcnt + retValue) <= len) { - void *km_adr = NULL; - - if (dlist->page) { -- km_adr = -- kmap(dlist-> -- page); -+ km_adr = kmap(dlist->page); - vadr = km_adr; -- vadr += -- (unsigned long) -- dlist-> -- offset; -+ vadr += (unsigned long) -+ dlist->offset; - } else { -- vadr = -- dlist-> -- offset; -+ vadr = dlist->offset; - } - -- ccnt = -- copy_to_user(buf, -- vadr, -- bcnt); -+ ccnt = copy_to_user(buf, vadr, bcnt); - -- DbgPrint("Copy %d from 0x%p to 0x%p.", -- bcnt, vadr, buf); -+ DbgPrint("Copy %d from 0x%p to 0x%p.", bcnt, vadr, buf); - if (bcnt > 0x80) -- novfs_dump(0x80, -- vadr); -+ novfs_dump(0x80, vadr); - else -- novfs_dump(bcnt, -- vadr); -+ novfs_dump(bcnt, vadr); - - if (km_adr) { -- kunmap(dlist-> -- page); -+ kunmap(dlist->page); - } - - retValue += bcnt; -@@ -497,10 +471,8 @@ ssize_t novfs_daemon_cmd_send(struct fil - retValue = -EAGAIN; - break; - } else { -- if ((error = -- down_interruptible(&Daemon_Queue.semaphore))) { -- DbgPrint("after down_interruptible error...%d", -- error); -+ if ((error = down_interruptible(&Daemon_Queue.semaphore))) { -+ DbgPrint("after down_interruptible error...%d", error); - retValue = -EINTR; - break; - } -@@ -515,7 +487,7 @@ ssize_t novfs_daemon_cmd_send(struct fil - return (retValue); - } - --ssize_t novfs_daemon_recv_reply(struct file *file, const char *buf, size_t nbytes, loff_t * ppos) -+ssize_t novfs_daemon_recv_reply(struct file * file, const char *buf, size_t nbytes, loff_t * ppos) - { - struct daemon_cmd *que; - size_t retValue = 0; -@@ -526,8 +498,7 @@ ssize_t novfs_daemon_recv_reply(struct f - char *vadr; - int i; - -- DbgPrint("buf=0x%p nbytes=%d ppos=%llx", buf, -- nbytes, *ppos); -+ DbgPrint("buf=0x%p nbytes=%d ppos=%llx", buf, nbytes, *ppos); - - /* - * Get sequence number from reply buffer -@@ -557,30 +528,22 @@ ssize_t novfs_daemon_recv_reply(struct f - int thiscopy, left = nbytes; - retValue = 0; - -- DbgPrint("dlist=0x%p count=%d", -- dlist, que->datalen); -- for (i = 0; -- (i < que->datalen) && (retValue < nbytes); -- i++, dlist++) { -+ DbgPrint("dlist=0x%p count=%d", dlist, que->datalen); -+ for (i = 0; (i < que->datalen) && (retValue < nbytes); i++, dlist++) { - __DbgPrint("\n" -- " dlist[%d].page: 0x%p\n" -- " dlist[%d].offset: 0x%p\n" -- " dlist[%d].len: 0x%x\n" -- " dlist[%d].rwflag: 0x%x\n", -- i, dlist->page, i, -- dlist->offset, i, dlist->len, -- i, dlist->rwflag); -+ " dlist[%d].page: 0x%p\n" -+ " dlist[%d].offset: 0x%p\n" -+ " dlist[%d].len: 0x%x\n" -+ " dlist[%d].rwflag: 0x%x\n", -+ i, dlist->page, i, dlist->offset, i, dlist->len, i, dlist->rwflag); - - if (DLWRITE == dlist->rwflag) { - void *km_adr = NULL; - - if (dlist->page) { -- km_adr = -- kmap(dlist->page); -+ km_adr = kmap(dlist->page); - vadr = km_adr; -- vadr += -- (unsigned long) dlist-> -- offset; -+ vadr += (unsigned long)dlist->offset; - } else { - vadr = dlist->offset; - } -@@ -590,9 +553,7 @@ ssize_t novfs_daemon_recv_reply(struct f - thiscopy = left; - dlist->len = left; - } -- cpylen = -- copy_from_user(vadr, buf, -- thiscopy); -+ cpylen = copy_from_user(vadr, buf, thiscopy); - - if (thiscopy > 0x80) - novfs_dump(0x80, vadr); -@@ -617,9 +578,7 @@ ssize_t novfs_daemon_recv_reply(struct f - que->reply = reply; - que->replen = nbytes; - -- retValue -= -- copy_from_user(reply, buf, -- retValue); -+ retValue -= copy_from_user(reply, buf, retValue); - if (retValue > 0x80) - novfs_dump(0x80, reply); - else -@@ -646,7 +605,7 @@ ssize_t novfs_daemon_recv_reply(struct f - } - - int novfs_do_login(struct ncl_string *Server, struct ncl_string *Username, --struct ncl_string *Password, void **lgnId, struct novfs_schandle *Session) -+ struct ncl_string *Password, void **lgnId, struct novfs_schandle *Session) - { - struct novfs_login_user_request *cmd; - struct novfs_login_user_reply *reply; -@@ -660,31 +619,30 @@ struct ncl_string *Password, void **lgnI - if (!cmd) - return -ENOMEM; - -- data = (unsigned char *) cmd + sizeof(*cmd); -+ data = (unsigned char *)cmd + sizeof(*cmd); - cmd->Command.CommandType = VFS_COMMAND_LOGIN_USER; - cmd->Command.SequenceNumber = 0; - memcpy(&cmd->Command.SessionId, Session, sizeof(*Session)); - - cmd->srvNameType = Server->type; - cmd->serverLength = Server->len; -- cmd->serverOffset = (unsigned long) (data - (unsigned char *) cmd); -+ cmd->serverOffset = (unsigned long)(data - (unsigned char *)cmd); - memcpy(data, Server->buffer, Server->len); - data += Server->len; - - cmd->usrNameType = Username->type; - cmd->userNameLength = Username->len; -- cmd->userNameOffset = (unsigned long) (data - (unsigned char *) cmd); -+ cmd->userNameOffset = (unsigned long)(data - (unsigned char *)cmd); - memcpy(data, Username->buffer, Username->len); - data += Username->len; - - cmd->pwdNameType = Password->type; - cmd->passwordLength = Password->len; -- cmd->passwordOffset = (unsigned long) (data - (unsigned char *) cmd); -+ cmd->passwordOffset = (unsigned long)(data - (unsigned char *)cmd); - memcpy(data, Password->buffer, Password->len); - data += Password->len; - -- retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (reply->Reply.ErrorCode) { - retCode = reply->Reply.ErrorCode; -@@ -720,8 +678,7 @@ int novfs_daemon_logout(struct qstr *Ser - cmd->length = Server->len; - memcpy(cmd->Name, Server->name, Server->len); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (reply->Reply.ErrorCode) { - retCode = -EIO; -@@ -745,18 +702,15 @@ int novfs_daemon_getpwuid(uid_t uid, int - SC_INITIALIZE(cmd.Command.SessionId); - cmd.uid = uid; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (reply->Reply.ErrorCode) { - retCode = -EIO; - } else { - retCode = 0; - memset(uname, 0, unamelen); -- replylen = -- replylen - offsetof(struct -- novfs_getpwuid_reply, UserName); -+ replylen = replylen - offsetof(struct -+ novfs_getpwuid_reply, UserName); - if (replylen) { - if (replylen > unamelen) { - retCode = -EINVAL; -@@ -782,15 +736,13 @@ int novfs_daemon_getversion(char *Buf, i - cmd.Command.SequenceNumber = 0; - SC_INITIALIZE(cmd.Command.SessionId); - -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (reply->Reply.ErrorCode) { - retVal = -EIO; - } else { -- retVal = -- replylen - offsetof(struct -- novfs_get_version_reply, Version); -+ retVal = replylen - offsetof(struct -+ novfs_get_version_reply, Version); - if (retVal < length) { - memcpy(Buf, reply->Version, retVal); - Buf[retVal] = '\0'; -@@ -819,18 +771,19 @@ static int daemon_login(struct novfs_log - server.len = lLogin.Server.length; - server.type = NWC_STRING_TYPE_ASCII; - if (!copy_from_user((void *)server.buffer, lLogin.Server.data, server.len)) { -- username.buffer = kmalloc(lLogin.UserName.length, GFP_KERNEL); -+ username.buffer = kmalloc(lLogin.UserName.length, GFP_KERNEL); - if (username.buffer) { - username.len = lLogin.UserName.length; - username.type = NWC_STRING_TYPE_ASCII; - if (!copy_from_user((void *)username.buffer, lLogin.UserName.data, username.len)) { -- password.buffer = kmalloc(lLogin.Password.length, GFP_KERNEL); -- if (password.buffer) -- { -+ password.buffer = kmalloc(lLogin.Password.length, GFP_KERNEL); -+ if (password.buffer) { - password.len = lLogin.Password.length; - password.type = NWC_STRING_TYPE_ASCII; -- if (!copy_from_user((void *)password.buffer, lLogin.Password.data, password.len)) { -- retCode = novfs_do_login (&server, &username, &password, NULL, Session); -+ if (!copy_from_user -+ ((void *)password.buffer, lLogin.Password.data, password.len)) { -+ retCode = -+ novfs_do_login(&server, &username, &password, NULL, Session); - if (!retCode) { - char *username; - username = novfs_scope_get_username(); -@@ -874,7 +827,7 @@ exit: - return (retCode); - } - --int novfs_daemon_create_sessionId(struct novfs_schandle * SessionId) -+int novfs_daemon_create_sessionId(struct novfs_schandle *SessionId) - { - struct novfs_create_context_request cmd; - struct novfs_create_context_reply *reply; -@@ -887,12 +840,9 @@ int novfs_daemon_create_sessionId(struct - cmd.Command.SequenceNumber = 0; - SC_INITIALIZE(cmd.Command.SessionId); - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { -- if (!reply->Reply.ErrorCode -- && replylen > sizeof(struct novfs_command_reply_header)) { -+ if (!reply->Reply.ErrorCode && replylen > sizeof(struct novfs_command_reply_header)) { - *SessionId = reply->SessionId; - retCode = 0; - } else { -@@ -913,16 +863,13 @@ int novfs_daemon_destroy_sessionId(struc - unsigned long replylen = 0; - int retCode = 0; - -- DbgPrint("0x%p:%p", SessionId.hTypeId, -- SessionId.hId); -+ DbgPrint("0x%p:%p", SessionId.hTypeId, SessionId.hId); - - cmd.Command.CommandType = VFS_COMMAND_DESTROY_CONTEXT; - cmd.Command.SequenceNumber = 0; - cmd.Command.SessionId = SessionId; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (!reply->Reply.ErrorCode) { - struct drive_map *dm; -@@ -956,24 +903,20 @@ int novfs_daemon_destroy_sessionId(struc - } - - int novfs_daemon_get_userspace(struct novfs_schandle SessionId, uint64_t * TotalSize, -- uint64_t * Free, uint64_t * TotalEnties, -- uint64_t * FreeEnties) -+ uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties) - { - struct novfs_get_user_space cmd; - struct novfs_get_user_space_reply *reply; - unsigned long replylen = 0; - int retCode = 0; - -- DbgPrint("0x%p:%p", SessionId.hTypeId, -- SessionId.hId); -+ DbgPrint("0x%p:%p", SessionId.hTypeId, SessionId.hId); - - cmd.Command.CommandType = VFS_COMMAND_GET_USER_SPACE; - cmd.Command.SequenceNumber = 0; - cmd.Command.SessionId = SessionId; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (!reply->Reply.ErrorCode) { - -@@ -1024,9 +967,7 @@ int novfs_daemon_set_mnt_point(char *Pat - - replylen = 0; - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (!reply->Reply.ErrorCode) { - retCode = 0; -@@ -1070,9 +1011,7 @@ int novfs_daemon_debug_cmd_send(char *Co - - replylen = 0; - -- retCode = -- Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - kfree(reply); - } -@@ -1092,7 +1031,7 @@ int novfs_daemon_ioctl(struct inode *ino - - switch (cmd) { - case IOC_LOGIN: -- retCode = daemon_login((struct novfs_login *) arg, &session_id); -+ retCode = daemon_login((struct novfs_login *)arg, &session_id); - break; - - case IOC_LOGOUT: -@@ -1113,9 +1052,7 @@ int novfs_daemon_ioctl(struct inode *ino - buf = kmalloc(io.length + 1, GFP_KERNEL); - if (buf) { - buf[0] = 0; -- cpylen = -- copy_from_user(buf, io.data, -- io.length); -+ cpylen = copy_from_user(buf, io.data, io.length); - buf[io.length] = '\0'; - DbgPrint("%s", buf); - kfree(buf); -@@ -1129,8 +1066,7 @@ int novfs_daemon_ioctl(struct inode *ino - { - struct novfs_xplat data; - -- cpylen = -- copy_from_user(&data, (void *)arg, sizeof(data)); -+ cpylen = copy_from_user(&data, (void *)arg, sizeof(data)); - retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000); - - switch (data.xfunction) { -@@ -1148,18 +1084,16 @@ int novfs_daemon_ioctl(struct inode *ino - return (retCode); - } - --static int daemon_added_resource(struct daemon_handle * DHandle, int Type, void *CHandle, -- unsigned char * FHandle, unsigned long Mode, u_long Size) -+static int daemon_added_resource(struct daemon_handle *DHandle, int Type, void *CHandle, -+ unsigned char *FHandle, unsigned long Mode, u_long Size) - { - struct daemon_resource *resource; - - if (FHandle) - DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x " -- "Mode=0x%x Size=%d", DHandle, Type, CHandle, -- *(u32 *) & FHandle[2], Mode, Size); -+ "Mode=0x%x Size=%d", DHandle, Type, CHandle, *(u32 *) & FHandle[2], Mode, Size); - else -- DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p\n", -- DHandle, Type, CHandle); -+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p\n", DHandle, Type, CHandle); - - resource = kmalloc(sizeof(struct daemon_resource), GFP_KERNEL); - if (!resource) -@@ -1168,8 +1102,7 @@ static int daemon_added_resource(struct - resource->type = Type; - resource->connection = CHandle; - if (FHandle) -- memcpy(resource->handle, FHandle, -- sizeof(resource->handle)); -+ memcpy(resource->handle, FHandle, sizeof(resource->handle)); - else - memset(resource->handle, 0, sizeof(resource->handle)); - resource->mode = Mode; -@@ -1181,23 +1114,20 @@ static int daemon_added_resource(struct - return 0; - } - --static int daemon_remove_resource(struct daemon_handle * DHandle, int Type, void *CHandle, -- unsigned long FHandle) -+static int daemon_remove_resource(struct daemon_handle *DHandle, int Type, void *CHandle, unsigned long FHandle) - { - struct daemon_resource *resource; - struct list_head *l; - int retVal = -ENOMEM; - -- DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x", -- DHandle, Type, CHandle, FHandle); -+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x", DHandle, Type, CHandle, FHandle); - - write_lock(&DHandle->lock); - - list_for_each(l, &DHandle->list) { - resource = list_entry(l, struct daemon_resource, list); - -- if ((Type == resource->type) && -- (resource->connection == CHandle)) { -+ if ((Type == resource->type) && (resource->connection == CHandle)) { - DbgPrint("Found resource=0x%p", resource); - l = l->prev; - list_del(&resource->list); -@@ -1240,34 +1170,26 @@ int novfs_daemon_lib_close(struct inode - - DbgPrint("inode=0x%p file=0x%p", inode, file); - if (file->private_data) { -- dh = (struct daemon_handle *) file->private_data; -+ dh = (struct daemon_handle *)file->private_data; - - list_for_each(l, &dh->list) { - resource = list_entry(l, struct daemon_resource, list); - - if (DH_TYPE_STREAM == resource->type) { -- novfs_close_stream(resource->connection, -- resource->handle, -- dh->session); -+ novfs_close_stream(resource->connection, resource->handle, dh->session); - } else if (DH_TYPE_CONNECTION == resource->type) { -- cmd = (struct novfs_xplat_call_request *) commanddata; -- cmdlen = -- offsetof(struct novfs_xplat_call_request, -- data) + sizeof(struct nwd_close_conn); -- cmd->Command.CommandType = -- VFS_COMMAND_XPLAT_CALL; -+ cmd = (struct novfs_xplat_call_request *)commanddata; -+ cmdlen = offsetof(struct novfs_xplat_call_request, data) + sizeof(struct nwd_close_conn); -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; - cmd->Command.SessionId = dh->session; - cmd->NwcCommand = NWC_CLOSE_CONN; - - cmd->dataLen = sizeof(struct nwd_close_conn); -- nwdClose = (struct nwd_close_conn *) cmd->data; -- nwdClose->ConnHandle = -- (void *) resource->connection; -- -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, -- 0, (void **)&reply, -- &replylen, 0); -+ nwdClose = (struct nwd_close_conn *)cmd->data; -+ nwdClose->ConnHandle = (void *)resource->connection; -+ -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); - if (reply) - kfree(reply); - } -@@ -1282,8 +1204,7 @@ int novfs_daemon_lib_close(struct inode - return (0); - } - --ssize_t novfs_daemon_lib_read(struct file * file, char *buf, size_t len, -- loff_t * off) -+ssize_t novfs_daemon_lib_read(struct file * file, char *buf, size_t len, loff_t * off) - { - struct daemon_handle *dh; - struct daemon_resource *resource; -@@ -1297,16 +1218,13 @@ ssize_t novfs_daemon_lib_read(struct fil - dh = file->private_data; - read_lock(&dh->lock); - if (&dh->list != dh->list.next) { -- resource = -- list_entry(dh->list.next, struct daemon_resource, list); -+ resource = list_entry(dh->list.next, struct daemon_resource, list); - - if (DH_TYPE_STREAM == resource->type) { - while (len > 0 && (offset < resource->size)) { - thisread = len; - if (novfs_read_stream -- (resource->connection, -- resource->handle, buf, &thisread, -- &offset, 1, dh->session) -+ (resource->connection, resource->handle, buf, &thisread, &offset, 1, dh->session) - || !thisread) { - break; - } -@@ -1324,8 +1242,7 @@ ssize_t novfs_daemon_lib_read(struct fil - return (totalread); - } - --ssize_t novfs_daemon_lib_write(struct file * file, const char *buf, size_t len, -- loff_t * off) -+ssize_t novfs_daemon_lib_write(struct file * file, const char *buf, size_t len, loff_t * off) - { - struct daemon_handle *dh; - struct daemon_resource *resource; -@@ -1340,21 +1257,15 @@ ssize_t novfs_daemon_lib_write(struct fi - dh = file->private_data; - write_lock(&dh->lock); - if (&dh->list != dh->list.next) { -- resource = -- list_entry(dh->list.next, struct daemon_resource, list); -+ resource = list_entry(dh->list.next, struct daemon_resource, list); - - if ((DH_TYPE_STREAM == resource->type) && (len >= 0)) { - totalwrite = 0; - do { - thiswrite = len; - status = -- novfs_write_stream(resource-> -- connection, -- resource->handle, -- (void *)buf, -- &thiswrite, -- &offset, -- dh->session); -+ novfs_write_stream(resource->connection, -+ resource->handle, (void *)buf, &thiswrite, &offset, dh->session); - if (status || !thiswrite) { - /* - * If len is zero then the file will have just been -@@ -1397,8 +1308,7 @@ loff_t novfs_daemon_lib_llseek(struct fi - dh = file->private_data; - read_lock(&dh->lock); - if (&dh->list != dh->list.next) { -- resource = -- list_entry(dh->list.next, struct daemon_resource, list); -+ resource = list_entry(dh->list.next, struct daemon_resource, list); - - if (DH_TYPE_STREAM == resource->type) { - switch (origin) { -@@ -1457,20 +1367,14 @@ int novfs_daemon_lib_ioctl(struct inode - } io; - char *buf; - io.length = 0; -- cpylen = -- copy_from_user(&io, (void *)arg, -- sizeof(io)); -+ cpylen = copy_from_user(&io, (void *)arg, sizeof(io)); - if (io.length <= 0 || io.length > 1024) - return -EINVAL; - if (io.length) { -- buf = -- kmalloc(io.length + 1, -- GFP_KERNEL); -+ buf = kmalloc(io.length + 1, GFP_KERNEL); - if (buf) { - buf[0] = 0; -- cpylen = -- copy_from_user(buf, io.data, -- io.length); -+ cpylen = copy_from_user(buf, io.data, io.length); - buf[io.length] = '\0'; - __DbgPrint("%s", buf); - kfree(buf); -@@ -1484,243 +1388,177 @@ int novfs_daemon_lib_ioctl(struct inode - { - struct novfs_xplat data; - -- cpylen = -- copy_from_user(&data, (void *)arg, -- sizeof(data)); -+ cpylen = copy_from_user(&data, (void *)arg, sizeof(data)); - retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000); - - switch (data.xfunction) { - case NWC_OPEN_CONN_BY_NAME: - DbgIocCall("NwOpenConnByName"); -- retCode = -- novfs_open_conn_by_name(&data, -- &handle, dh->session); -+ retCode = novfs_open_conn_by_name(&data, &handle, dh->session); - if (!retCode) -- daemon_added_resource(dh, -- DH_TYPE_CONNECTION,handle, 0, 0, 0); -+ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0); - break; - - case NWC_OPEN_CONN_BY_ADDRESS: - DbgIocCall("NwOpenConnByAddress"); -- retCode = -- novfs_open_conn_by_addr(&data, &handle, -- dh->session); -+ retCode = novfs_open_conn_by_addr(&data, &handle, dh->session); - if (!retCode) -- daemon_added_resource(dh, -- DH_TYPE_CONNECTION, -- handle, 0, -- 0, 0); -+ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0); - break; - - case NWC_OPEN_CONN_BY_REFERENCE: - - DbgIocCall("NwOpenConnByReference"); -- retCode = -- novfs_open_conn_by_ref(&data, &handle, -- dh->session); -+ retCode = novfs_open_conn_by_ref(&data, &handle, dh->session); - if (!retCode) -- daemon_added_resource(dh, -- DH_TYPE_CONNECTION, -- handle, 0, -- 0, 0); -+ daemon_added_resource(dh, DH_TYPE_CONNECTION, handle, 0, 0, 0); - break; - - case NWC_SYS_CLOSE_CONN: - DbgIocCall("NwSysCloseConn"); -- retCode = -- novfs_sys_conn_close(&data, (unsigned long *)&handle, dh->session); -+ retCode = novfs_sys_conn_close(&data, (unsigned long *)&handle, dh->session); - daemon_remove_resource(dh, DH_TYPE_CONNECTION, handle, 0); - break; - - case NWC_CLOSE_CONN: - DbgIocCall("NwCloseConn"); -- retCode = -- novfs_conn_close(&data, &handle, -- dh->session); -- daemon_remove_resource(dh, -- DH_TYPE_CONNECTION, -- handle, 0); -+ retCode = novfs_conn_close(&data, &handle, dh->session); -+ daemon_remove_resource(dh, DH_TYPE_CONNECTION, handle, 0); - break; - - case NWC_LOGIN_IDENTITY: -- DbgIocCall("" -- "NwLoginIdentity"); -- retCode = -- novfs_login_id(&data, dh->session); -+ DbgIocCall("" "NwLoginIdentity"); -+ retCode = novfs_login_id(&data, dh->session); - break; - - case NWC_RAW_NCP_REQUEST: -- DbgIocCall("[VFS XPLAT] Send Raw " -- "NCP Request"); -+ DbgIocCall("[VFS XPLAT] Send Raw " "NCP Request"); - retCode = novfs_raw_send(&data, dh->session); - break; - - case NWC_AUTHENTICATE_CONN_WITH_ID: -- DbgIocCall("[VFS XPLAT] Authenticate " -- "Conn With ID"); -- retCode = -- novfs_auth_conn(&data, -- dh->session); -+ DbgIocCall("[VFS XPLAT] Authenticate " "Conn With ID"); -+ retCode = novfs_auth_conn(&data, dh->session); - break; - - case NWC_UNAUTHENTICATE_CONN: -- DbgIocCall("[VFS XPLAT] UnAuthenticate " -- "Conn With ID"); -- retCode = -- novfs_unauthenticate(&data, -- dh->session); -+ DbgIocCall("[VFS XPLAT] UnAuthenticate " "Conn With ID"); -+ retCode = novfs_unauthenticate(&data, dh->session); - break; - - case NWC_LICENSE_CONN: - DbgIocCall("Call NwLicenseConn"); -- retCode = -- novfs_license_conn(&data, dh->session); -+ retCode = novfs_license_conn(&data, dh->session); - break; - - case NWC_LOGOUT_IDENTITY: - DbgIocCall("NwLogoutIdentity"); -- retCode = -- novfs_logout_id(&data, -- dh->session); -+ retCode = novfs_logout_id(&data, dh->session); - break; - - case NWC_UNLICENSE_CONN: - DbgIocCall("NwUnlicense"); -- retCode = -- novfs_unlicense_conn(&data, dh->session); -+ retCode = novfs_unlicense_conn(&data, dh->session); - break; - - case NWC_GET_CONN_INFO: - DbgIocCall("NwGetConnInfo"); -- retCode = -- novfs_get_conn_info(&data, dh->session); -+ retCode = novfs_get_conn_info(&data, dh->session); - break; - - case NWC_SET_CONN_INFO: - DbgIocCall("NwSetConnInfo"); -- retCode = -- novfs_set_conn_info(&data, dh->session); -+ retCode = novfs_set_conn_info(&data, dh->session); - break; - - case NWC_SCAN_CONN_INFO: - DbgIocCall("NwScanConnInfo"); -- retCode = -- novfs_scan_conn_info(&data, dh->session); -+ retCode = novfs_scan_conn_info(&data, dh->session); - break; - - case NWC_GET_IDENTITY_INFO: - DbgIocCall("NwGetIdentityInfo"); -- retCode = -- novfs_get_id_info(&data, -- dh->session); -+ retCode = novfs_get_id_info(&data, dh->session); - break; - - case NWC_GET_REQUESTER_VERSION: - DbgIocCall("NwGetDaemonVersion"); -- retCode = -- novfs_get_daemon_ver(&data, -- dh->session); -+ retCode = novfs_get_daemon_ver(&data, dh->session); - break; - - case NWC_GET_PREFERRED_DS_TREE: - DbgIocCall("NwcGetPreferredDsTree"); -- retCode = -- novfs_get_preferred_DS_tree(&data, -- dh->session); -+ retCode = novfs_get_preferred_DS_tree(&data, dh->session); - break; - - case NWC_SET_PREFERRED_DS_TREE: - DbgIocCall("NwcSetPreferredDsTree"); -- retCode = -- novfs_set_preferred_DS_tree(&data, -- dh->session); -+ retCode = novfs_set_preferred_DS_tree(&data, dh->session); - break; - - case NWC_GET_DEFAULT_NAME_CONTEXT: - DbgIocCall("NwcGetDefaultNameContext"); -- retCode = -- novfs_get_default_ctx(&data, -- dh->session); -+ retCode = novfs_get_default_ctx(&data, dh->session); - break; - - case NWC_SET_DEFAULT_NAME_CONTEXT: - DbgIocCall("NwcSetDefaultNameContext"); -- retCode = -- novfs_set_default_ctx(&data, -- dh->session); -+ retCode = novfs_set_default_ctx(&data, dh->session); - break; - - case NWC_QUERY_FEATURE: - DbgIocCall("NwQueryFeature"); -- retCode = -- novfs_query_feature(&data, dh->session); -+ retCode = novfs_query_feature(&data, dh->session); - break; - - case NWC_GET_TREE_MONITORED_CONN_REF: - DbgIocCall("NwcGetTreeMonitoredConn"); -- retCode = -- novfs_get_tree_monitored_conn(&data, -- dh-> -- session); -+ retCode = novfs_get_tree_monitored_conn(&data, dh->session); - break; - - case NWC_ENUMERATE_IDENTITIES: - DbgIocCall("NwcEnumerateIdentities"); -- retCode = -- novfs_enum_ids(&data, -- dh->session); -+ retCode = novfs_enum_ids(&data, dh->session); - break; - - case NWC_CHANGE_KEY: - DbgIocCall("NwcChangeAuthKey"); -- retCode = -- novfs_change_auth_key(&data, -- dh->session); -+ retCode = novfs_change_auth_key(&data, dh->session); - break; - - case NWC_CONVERT_LOCAL_HANDLE: - DbgIocCall("NwdConvertLocalHandle"); -- retCode = -- NwdConvertLocalHandle(&data, dh); -+ retCode = NwdConvertLocalHandle(&data, dh); - break; - - case NWC_CONVERT_NETWARE_HANDLE: - DbgIocCall("NwdConvertNetwareHandle"); -- retCode = -- NwdConvertNetwareHandle(&data, dh); -+ retCode = NwdConvertNetwareHandle(&data, dh); - break; - - case NWC_SET_PRIMARY_CONN: - DbgIocCall("NwcSetPrimaryConn"); -- retCode = -- novfs_set_pri_conn(&data, -- dh->session); -+ retCode = novfs_set_pri_conn(&data, dh->session); - break; - - case NWC_GET_PRIMARY_CONN: - DbgIocCall("NwcGetPrimaryConn"); -- retCode = -- novfs_get_pri_conn(&data, -- dh->session); -+ retCode = novfs_get_pri_conn(&data, dh->session); - break; - - case NWC_MAP_DRIVE: - DbgIocCall("NwcMapDrive"); -- retCode = -- set_map_drive(&data, dh->session); -+ retCode = set_map_drive(&data, dh->session); - break; - - case NWC_UNMAP_DRIVE: - DbgIocCall("NwcUnMapDrive"); -- retCode = -- unmap_drive(&data, dh->session); -+ retCode = unmap_drive(&data, dh->session); - break; - - case NWC_ENUMERATE_DRIVES: - DbgIocCall("NwcEnumerateDrives"); -- retCode = -- novfs_enum_drives(&data, -- dh->session); -+ retCode = novfs_enum_drives(&data, dh->session); - break; - - case NWC_GET_MOUNT_PATH: -@@ -1730,22 +1568,17 @@ int novfs_daemon_lib_ioctl(struct inode - - case NWC_GET_BROADCAST_MESSAGE: - DbgIocCall("NwdGetBroadcastMessage"); -- retCode = -- novfs_get_bcast_msg(&data, -- dh->session); -+ retCode = novfs_get_bcast_msg(&data, dh->session); - break; - - case NWC_SET_KEY: - DbgIocCall("NwdSetKey"); -- retCode = -- novfs_set_key_value(&data, dh->session); -+ retCode = novfs_set_key_value(&data, dh->session); - break; - - case NWC_VERIFY_KEY: - DbgIocCall("NwdVerifyKey"); -- retCode = -- novfs_verify_key_value(&data, -- dh->session); -+ retCode = novfs_verify_key_value(&data, dh->session); - break; - - case NWC_RAW_NCP_REQUEST_ALL: -@@ -1757,8 +1590,7 @@ int novfs_daemon_lib_ioctl(struct inode - - } - -- DbgPrint("[NOVFS XPLAT] status Code = %X\n", -- retCode); -+ DbgPrint("[NOVFS XPLAT] status Code = %X\n", retCode); - break; - } - } -@@ -1767,8 +1599,7 @@ int novfs_daemon_lib_ioctl(struct inode - return (retCode); - } - --unsigned int novfs_daemon_poll(struct file *file, -- struct poll_table_struct *poll_table) -+unsigned int novfs_daemon_poll(struct file *file, struct poll_table_struct *poll_table) - { - struct daemon_cmd *que; - unsigned int mask = POLLOUT | POLLWRNORM; -@@ -1779,7 +1610,7 @@ unsigned int novfs_daemon_poll(struct fi - return mask; - } - --static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle) -+static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle) - { - int retVal; - struct nwc_convert_netware_handle nh; -@@ -1787,20 +1618,16 @@ static int NwdConvertNetwareHandle(struc - - DbgPrint("DHandle=0x%p", DHandle); - -- cpylen = -- copy_from_user(&nh, pdata->reqData, -- sizeof(struct nwc_convert_netware_handle)); -+ cpylen = copy_from_user(&nh, pdata->reqData, sizeof(struct nwc_convert_netware_handle)); - - retVal = - daemon_added_resource(DHandle, DH_TYPE_STREAM, -- Uint32toHandle(nh.ConnHandle), -- nh.NetWareHandle, nh.uAccessMode, -- nh.uFileSize); -+ Uint32toHandle(nh.ConnHandle), nh.NetWareHandle, nh.uAccessMode, nh.uFileSize); - - return (retVal); - } - --static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle) -+static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle *DHandle) - { - int retVal = NWE_REQUESTER_FAILURE; - struct daemon_resource *resource; -@@ -1816,14 +1643,12 @@ static int NwdConvertLocalHandle(struct - resource = list_entry(l, struct daemon_resource, list); - - if (DH_TYPE_STREAM == resource->type) { -- lh.uConnReference = -- HandletoUint32(resource->connection); -+ lh.uConnReference = HandletoUint32(resource->connection); - - //sgled memcpy(lh.NwWareHandle, resource->handle, sizeof(resource->handle)); - memcpy(lh.NetWareHandle, resource->handle, sizeof(resource->handle)); //sgled - if (pdata->repLen >= sizeof(struct nwc_convert_local_handle)) { -- cpylen = copy_to_user(pdata->repData, &lh, -- sizeof(struct nwc_convert_local_handle)); -+ cpylen = copy_to_user(pdata->repData, &lh, sizeof(struct nwc_convert_local_handle)); - retVal = 0; - } else { - retVal = NWE_BUFFER_OVERFLOW; -@@ -1855,9 +1680,7 @@ static int NwdGetMountPath(struct novfs_ - retVal = NWE_BUFFER_OVERFLOW; - } else { - if (mp.pMountPath) { -- cpylen = -- copy_to_user(mp.pMountPath, -- novfs_current_mnt, len); -+ cpylen = copy_to_user(mp.pMountPath, novfs_current_mnt, len); - } - retVal = 0; - } -@@ -1888,8 +1711,7 @@ static int set_map_drive(struct novfs_xp - return -EFAULT; - if (symInfo.linkOffsetLength > MAX_NAME_LEN) - return -EINVAL; -- drivemap = kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, -- GFP_KERNEL); -+ drivemap = kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, GFP_KERNEL); - if (!drivemap) - return -ENOMEM; - -@@ -1898,22 +1720,19 @@ static int set_map_drive(struct novfs_xp - cpylen = copy_from_user(drivemap->name, path, symInfo.linkOffsetLength); - - drivemap->session = Session; -- drivemap->hash = full_name_hash(drivemap->name, -- symInfo.linkOffsetLength - 1); -+ drivemap->hash = full_name_hash(drivemap->name, symInfo.linkOffsetLength - 1); - drivemap->namelen = symInfo.linkOffsetLength - 1; - DbgPrint("hash=0x%lx path=%s", drivemap->hash, drivemap->name); - -- dm = (struct drive_map *) & DriveMapList.next; -+ dm = (struct drive_map *)&DriveMapList.next; - - down(&DriveMapLock); - - list_for_each(list, &DriveMapList) { - dm = list_entry(list, struct drive_map, list); - __DbgPrint("%s: dm=0x%p\n" -- " hash: 0x%lx\n" -- " namelen: %d\n" -- " name: %s\n", __func__, -- dm, dm->hash, dm->namelen, dm->name); -+ " hash: 0x%lx\n" -+ " namelen: %d\n" " name: %s\n", __func__, dm, dm->hash, dm->namelen, dm->name); - - if (drivemap->hash == dm->hash) { - if (0 == strcmp(dm->name, drivemap->name)) { -@@ -1926,15 +1745,12 @@ static int set_map_drive(struct novfs_xp - } - - if (dm) { -- if ((dm == (struct drive_map *) & DriveMapList) || -- (dm->hash < drivemap->hash)) { -+ if ((dm == (struct drive_map *)&DriveMapList) || (dm->hash < drivemap->hash)) { - list_add(&drivemap->list, &dm->list); - } else { -- list_add_tail(&drivemap->list, -- &dm->list); -+ list_add_tail(&drivemap->list, &dm->list); - } -- } -- else -+ } else - kfree(drivemap); - up(&DriveMapLock); - return (retVal); -@@ -1949,7 +1765,6 @@ static int unmap_drive(struct novfs_xpla - struct list_head *list; - unsigned long hash; - -- - retVal = novfs_unmap_drive(pdata, Session); - if (retVal) - return retVal; -@@ -1960,7 +1775,7 @@ static int unmap_drive(struct novfs_xpla - path = kmalloc(symInfo.linkLen, GFP_KERNEL); - if (!path) - return -ENOMEM; -- if (copy_from_user(path,((struct nwc_unmap_drive_ex *) pdata->reqData)->linkData, symInfo.linkLen)) { -+ if (copy_from_user(path, ((struct nwc_unmap_drive_ex *)pdata->reqData)->linkData, symInfo.linkLen)) { - kfree(path); - return -EFAULT; - } -@@ -1975,9 +1790,7 @@ static int unmap_drive(struct novfs_xpla - list_for_each(list, &DriveMapList) { - dm = list_entry(list, struct drive_map, list); - __DbgPrint("%s: dm=0x%p %s\n" -- " hash: 0x%x\n" -- " namelen: %d\n", __func__, -- dm, dm->name, dm->hash, dm->namelen); -+ " hash: 0x%x\n" " namelen: %d\n", __func__, dm, dm->name, dm->hash, dm->namelen); - - if (hash == dm->hash) { - if (0 == strcmp(dm->name, path)) { -@@ -1991,9 +1804,7 @@ static int unmap_drive(struct novfs_xpla - - if (dm) { - __DbgPrint("%s: Remove dm=0x%p %s\n" -- " hash: 0x%x\n" -- " namelen: %d\n", __func__, -- dm, dm->name, dm->hash, dm->namelen); -+ " hash: 0x%x\n" " namelen: %d\n", __func__, dm, dm->name, dm->hash, dm->namelen); - list_del(&dm->list); - kfree(dm); - } -@@ -2012,10 +1823,8 @@ static void RemoveDriveMaps(void) - dm = list_entry(list, struct drive_map, list); - - __DbgPrint("%s: dm=0x%p\n" -- " hash: 0x%x\n" -- " namelen: %d\n" -- " name: %s\n", __func__, -- dm, dm->hash, dm->namelen, dm->name); -+ " hash: 0x%x\n" -+ " namelen: %d\n" " name: %s\n", __func__, dm, dm->hash, dm->namelen, dm->name); - local_unlink(dm->name); - list = list->prev; - list_del(&dm->list); -@@ -2044,10 +1853,10 @@ static long local_unlink(const char *pat - goto exit1; - mutex_lock(&nd.path.dentry->d_inode->i_mutex); - /* Get the filename of pathname */ -- name=c=(char *)pathname; -- while (*c!='\0') { -- if (*c=='/') -- name=++c; -+ name = c = (char *)pathname; -+ while (*c != '\0') { -+ if (*c == '/') -+ name = ++c; - c++; - } - dentry = lookup_one_len(name, nd.path.dentry, strlen(name)); -@@ -2057,7 +1866,7 @@ static long local_unlink(const char *pat - DbgPrint("dentry %p", dentry); - if (!(dentry->d_inode->i_mode & S_IFLNK)) { - DbgPrint("%s not a link", name); -- error=-ENOENT; -+ error = -ENOENT; - goto exit1; - } - /* Why not before? Because we want correct error value */ -@@ -2072,7 +1881,7 @@ static long local_unlink(const char *pat - goto exit2; - error = vfs_unlink(nd.path.dentry->d_inode, dentry); - mnt_drop_write(nd.path.mnt); -- exit2: -+exit2: - dput(dentry); - } - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); -@@ -2084,8 +1893,6 @@ exit1: - return error; - - slashes: -- error = !dentry->d_inode ? -ENOENT : -- S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; -+ error = !dentry->d_inode ? -ENOENT : S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; - goto exit2; - } -- ---- a/fs/novfs/file.c -+++ b/fs/novfs/file.c -@@ -27,11 +27,11 @@ - #include "commands.h" - #include "nwerror.h" - --static ssize_t novfs_tree_read(struct file * file, char *buf, size_t len, loff_t * off); -+static ssize_t novfs_tree_read(struct file *file, char *buf, size_t len, loff_t * off); - extern struct dentry_operations novfs_dentry_operations; - - static struct file_operations novfs_tree_operations = { -- read:novfs_tree_read, -+read: novfs_tree_read, - }; - - /* -@@ -44,7 +44,7 @@ static int StripTrailingDots = 1; - int novfs_get_alltrees(struct dentry *parent) - { - unsigned char *p; -- struct novfs_command_reply_header * reply = NULL; -+ struct novfs_command_reply_header *reply = NULL; - unsigned long replylen = 0; - struct novfs_command_request_header cmd; - int retCode; -@@ -63,8 +63,7 @@ int novfs_get_alltrees(struct dentry *pa - DbgPrint("reply=0x%p replylen=%d", reply, replylen); - if (reply) { - novfs_dump(replylen, reply); -- if (!reply->ErrorCode -- && (replylen > sizeof(struct novfs_command_reply_header))) { -+ if (!reply->ErrorCode && (replylen > sizeof(struct novfs_command_reply_header))) { - p = (char *)reply + 8; - while (*p) { - DbgPrint("%s", p); -@@ -92,7 +91,7 @@ int novfs_get_alltrees(struct dentry *pa - return (retCode); - } - --static ssize_t novfs_tree_read(struct file * file, char *buf, size_t len, loff_t * off) -+static ssize_t novfs_tree_read(struct file *file, char *buf, size_t len, loff_t * off) - { - if (file->f_pos != 0) { - return (0); -@@ -103,7 +102,7 @@ static ssize_t novfs_tree_read(struct fi - return (5); - } - --int novfs_get_servers(unsigned char ** ServerList, struct novfs_schandle SessionId) -+int novfs_get_servers(unsigned char **ServerList, struct novfs_schandle SessionId) - { - struct novfs_get_connected_server_list req; - struct novfs_get_connected_server_list_reply *reply = NULL; -@@ -115,15 +114,13 @@ int novfs_get_servers(unsigned char ** S - req.Command.CommandType = VFS_COMMAND_GET_CONNECTED_SERVER_LIST; - req.Command.SessionId = SessionId; - -- retCode = -- Queue_Daemon_Command(&req, sizeof(req), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&req, sizeof(req), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - DbgPrint("reply"); - replylen -= sizeof(struct novfs_command_reply_header); - if (!reply->Reply.ErrorCode && replylen) { - memcpy(reply, reply->List, replylen); -- *ServerList = (unsigned char *) reply; -+ *ServerList = (unsigned char *)reply; - retCode = 0; - } else { - kfree(reply); -@@ -133,8 +130,7 @@ int novfs_get_servers(unsigned char ** S - return (retCode); - } - --int novfs_get_vols(struct qstr *Server, unsigned char ** VolumeList, -- struct novfs_schandle SessionId) -+int novfs_get_vols(struct qstr *Server, unsigned char **VolumeList, struct novfs_schandle SessionId) - { - struct novfs_get_server_volume_list *req; - struct novfs_get_server_volume_list_reply *reply = NULL; -@@ -151,9 +147,7 @@ int novfs_get_vols(struct qstr *Server, - memcpy(req->Name, Server->name, Server->len); - req->Command.SessionId = SessionId; - -- retCode = -- Queue_Daemon_Command(req, reqlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(req, reqlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - DbgPrint("reply"); - novfs_dump(replylen, reply); -@@ -161,7 +155,7 @@ int novfs_get_vols(struct qstr *Server, - - if (!reply->Reply.ErrorCode && replylen) { - memcpy(reply, reply->List, replylen); -- *VolumeList = (unsigned char *) reply; -+ *VolumeList = (unsigned char *)reply; - retCode = 0; - } else { - kfree(reply); -@@ -172,11 +166,11 @@ int novfs_get_vols(struct qstr *Server, - return (retCode); - } - --int novfs_get_file_info(unsigned char * Path, struct novfs_entry_info * Info, struct novfs_schandle SessionId) -+int novfs_get_file_info(unsigned char *Path, struct novfs_entry_info *Info, struct novfs_schandle SessionId) - { - struct novfs_verify_file_reply *reply = NULL; - unsigned long replylen = 0; -- struct novfs_verify_file_request * cmd; -+ struct novfs_verify_file_request *cmd; - int cmdlen; - int retCode = -ENOENT; - int pathlen; -@@ -195,7 +189,7 @@ int novfs_get_file_info(unsigned char * - if ('.' == Path[pathlen - 1]) - pathlen--; - } -- cmdlen = offsetof(struct novfs_verify_file_request,path) + pathlen; -+ cmdlen = offsetof(struct novfs_verify_file_request, path) + pathlen; - cmd = kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_VERIFY_FILE; -@@ -204,10 +198,7 @@ int novfs_get_file_info(unsigned char * - cmd->pathLen = pathlen; - memcpy(cmd->path, Path, cmd->pathLen); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, -- (void *)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - -@@ -217,38 +208,31 @@ int novfs_get_file_info(unsigned char * - Info->type = 3; - Info->mode = S_IRWXU; - -- if (reply-> -- fileMode & NW_ATTRIBUTE_DIRECTORY) { -+ if (reply->fileMode & NW_ATTRIBUTE_DIRECTORY) { - Info->mode |= S_IFDIR; - } else { - Info->mode |= S_IFREG; - } - -- if (reply-> -- fileMode & NW_ATTRIBUTE_READ_ONLY) { -+ if (reply->fileMode & NW_ATTRIBUTE_READ_ONLY) { - Info->mode &= ~(S_IWUSR); - } - - Info->uid = current_euid(); - Info->gid = current_egid(); - Info->size = reply->fileSize; -- Info->atime.tv_sec = -- reply->lastAccessTime; -+ Info->atime.tv_sec = reply->lastAccessTime; - Info->atime.tv_nsec = 0; - Info->mtime.tv_sec = reply->modifyTime; - Info->mtime.tv_nsec = 0; - Info->ctime.tv_sec = reply->createTime; - Info->ctime.tv_nsec = 0; - DbgPrint("replylen=%d sizeof(VERIFY_FILE_REPLY)=%d", -- replylen, -- sizeof(struct novfs_verify_file_reply)); -- if (replylen > -- sizeof(struct novfs_verify_file_reply)) { -- unsigned int *lp = -- &reply->fileMode; -+ replylen, sizeof(struct novfs_verify_file_reply)); -+ if (replylen > sizeof(struct novfs_verify_file_reply)) { -+ unsigned int *lp = &reply->fileMode; - lp++; -- DbgPrint("extra data 0x%x", -- *lp); -+ DbgPrint("extra data 0x%x", *lp); - Info->mtime.tv_nsec = *lp; - } - retCode = 0; -@@ -265,8 +249,7 @@ int novfs_get_file_info(unsigned char * - } - - int novfs_getx_file_info(char *Path, const char *Name, char *buffer, -- ssize_t buffer_size, ssize_t * dataLen, -- struct novfs_schandle SessionId) -+ ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId) - { - struct novfs_xa_get_reply *reply = NULL; - unsigned long replylen = 0; -@@ -277,14 +260,13 @@ int novfs_getx_file_info(char *Path, con - int namelen = strlen(Name); - int pathlen = strlen(Path); - -- DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i", -- Path, pathlen, Name, namelen); -+ DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i", Path, pathlen, Name, namelen); - - if (namelen > MAX_XATTR_NAME_LEN) - return -ENOATTR; - -- cmdlen = offsetof(struct novfs_xa_get_request, data) + pathlen + 1 + namelen + 1; // two '\0' -- cmd = (struct novfs_xa_get_request *) kmalloc(cmdlen, GFP_KERNEL); -+ cmdlen = offsetof(struct novfs_xa_get_request, data)+pathlen + 1 + namelen + 1; // two '\0' -+ cmd = (struct novfs_xa_get_request *)kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_GET_EXTENDED_ATTRIBUTE; - cmd->Command.SequenceNumber = 0; -@@ -297,35 +279,27 @@ int novfs_getx_file_info(char *Path, con - memcpy(cmd->data + cmd->pathLen + 1, Name, cmd->nameLen + 1); - - DbgPrint("xattr: PXA_GET_REQUEST BEGIN"); -- DbgPrint("xattr: Queue_Daemon_Command %d", -- cmd->Command.CommandType); -- DbgPrint("xattr: Command.SessionId = %d", -- cmd->Command.SessionId); -+ DbgPrint("xattr: Queue_Daemon_Command %d", cmd->Command.CommandType); -+ DbgPrint("xattr: Command.SessionId = %d", cmd->Command.SessionId); - DbgPrint("xattr: pathLen = %d", cmd->pathLen); - DbgPrint("xattr: Path = %s", cmd->data); - DbgPrint("xattr: nameLen = %d", cmd->nameLen); - DbgPrint("xattr: name = %s", (cmd->data + cmd->pathLen + 1)); - DbgPrint("xattr: PXA_GET_REQUEST END"); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - - if (reply->Reply.ErrorCode) { -- DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", -- reply->Reply.ErrorCode, -- reply->Reply.ErrorCode); -+ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", reply->Reply.ErrorCode, reply->Reply.ErrorCode); - DbgPrint("xattr: replylen=%d", replylen); - - retCode = -ENOATTR; - } else { - -- *dataLen = -- replylen - sizeof(struct novfs_command_reply_header); -- DbgPrint("xattr: replylen=%u, dataLen=%u", -- replylen, *dataLen); -+ *dataLen = replylen - sizeof(struct novfs_command_reply_header); -+ DbgPrint("xattr: replylen=%u, dataLen=%u", replylen, *dataLen); - - if (buffer_size >= *dataLen) { - DbgPrint("xattr: copying to buffer from &reply->pData"); -@@ -353,8 +327,7 @@ int novfs_getx_file_info(char *Path, con - } - - int novfs_setx_file_info(char *Path, const char *Name, const void *Value, -- unsigned long valueLen, unsigned long *bytesWritten, -- int flags, struct novfs_schandle SessionId) -+ unsigned long valueLen, unsigned long *bytesWritten, int flags, struct novfs_schandle SessionId) - { - struct novfs_xa_set_reply *reply = NULL; - unsigned long replylen = 0; -@@ -371,8 +344,8 @@ int novfs_setx_file_info(char *Path, con - if (namelen > MAX_XATTR_NAME_LEN) - return -ENOATTR; - -- cmdlen = offsetof(struct novfs_xa_set_request, data) + pathlen + 1 + namelen + 1 + valueLen; -- cmd = (struct novfs_xa_set_request *) kmalloc(cmdlen, GFP_KERNEL); -+ cmdlen = offsetof(struct novfs_xa_set_request, data)+pathlen + 1 + namelen + 1 + valueLen; -+ cmd = (struct novfs_xa_set_request *)kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_SET_EXTENDED_ATTRIBUTE; - cmd->Command.SequenceNumber = 0; -@@ -386,14 +359,11 @@ int novfs_setx_file_info(char *Path, con - memcpy(cmd->data + cmd->pathLen + 1, Name, cmd->nameLen + 1); - - cmd->valueLen = valueLen; -- memcpy(cmd->data + cmd->pathLen + 1 + cmd->nameLen + 1, Value, -- valueLen); -+ memcpy(cmd->data + cmd->pathLen + 1 + cmd->nameLen + 1, Value, valueLen); - - DbgPrint("xattr: PXA_SET_REQUEST BEGIN"); -- DbgPrint("attr: Queue_Daemon_Command %d", -- cmd->Command.CommandType); -- DbgPrint("xattr: Command.SessionId = %d", -- cmd->Command.SessionId); -+ DbgPrint("attr: Queue_Daemon_Command %d", cmd->Command.CommandType); -+ DbgPrint("xattr: Command.SessionId = %d", cmd->Command.SessionId); - DbgPrint("xattr: pathLen = %d", cmd->pathLen); - DbgPrint("xattr: Path = %s", cmd->data); - DbgPrint("xattr: nameLen = %d", cmd->nameLen); -@@ -402,26 +372,20 @@ int novfs_setx_file_info(char *Path, con - - DbgPrint("xattr: PXA_SET_REQUEST END"); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - - if (reply->Reply.ErrorCode) { -- DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", -- reply->Reply.ErrorCode, -- reply->Reply.ErrorCode); -+ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", reply->Reply.ErrorCode, reply->Reply.ErrorCode); - DbgPrint("xattr: replylen=%d", replylen); - - retCode = -reply->Reply.ErrorCode; //-ENOENT; - } else { - - DbgPrint("xattr: replylen=%u, real len = %u", -- replylen, -- replylen - sizeof(struct novfs_command_reply_header)); -- memcpy(bytesWritten, &reply->pData, -- replylen - sizeof(struct novfs_command_reply_header)); -+ replylen, replylen - sizeof(struct novfs_command_reply_header)); -+ memcpy(bytesWritten, &reply->pData, replylen - sizeof(struct novfs_command_reply_header)); - - retCode = 0; - } -@@ -437,8 +401,7 @@ int novfs_setx_file_info(char *Path, con - return retCode; - } - --int novfs_listx_file_info(char *Path, char *buffer, ssize_t buffer_size, -- ssize_t * dataLen, struct novfs_schandle SessionId) -+int novfs_listx_file_info(char *Path, char *buffer, ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId) - { - struct novfs_xa_list_reply *reply = NULL; - unsigned long replylen = 0; -@@ -451,7 +414,7 @@ int novfs_listx_file_info(char *Path, ch - - *dataLen = 0; - cmdlen = offsetof(struct novfs_verify_file_request, path) + pathlen; -- cmd = (struct novfs_verify_file_request *) kmalloc(cmdlen, GFP_KERNEL); -+ cmd = (struct novfs_verify_file_request *)kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_LIST_EXTENDED_ATTRIBUTES; - cmd->Command.SequenceNumber = 0; -@@ -459,40 +422,30 @@ int novfs_listx_file_info(char *Path, ch - cmd->pathLen = pathlen; - memcpy(cmd->path, Path, cmd->pathLen + 1); //+ '\0' - DbgPrint("xattr: PVERIFY_FILE_REQUEST BEGIN"); -- DbgPrint("xattr: Queue_Daemon_Command %d", -- cmd->Command.CommandType); -- DbgPrint("xattr: Command.SessionId = %d", -- cmd->Command.SessionId); -+ DbgPrint("xattr: Queue_Daemon_Command %d", cmd->Command.CommandType); -+ DbgPrint("xattr: Command.SessionId = %d", cmd->Command.SessionId); - DbgPrint("xattr: pathLen = %d", cmd->pathLen); - DbgPrint("xattr: Path = %s", cmd->path); - DbgPrint("xattr: PVERIFY_FILE_REQUEST END"); - -- retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, -- (void *)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - - if (reply->Reply.ErrorCode) { -- DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", -- reply->Reply.ErrorCode, -- reply->Reply.ErrorCode); -+ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", reply->Reply.ErrorCode, reply->Reply.ErrorCode); - DbgPrint("xattr: replylen=%d", replylen); - - retCode = -ENOENT; - } else { -- *dataLen = -- replylen - sizeof(struct novfs_command_reply_header); -- DbgPrint("xattr: replylen=%u, dataLen=%u", -- replylen, *dataLen); -+ *dataLen = replylen - sizeof(struct novfs_command_reply_header); -+ DbgPrint("xattr: replylen=%u, dataLen=%u", replylen, *dataLen); - - if (buffer_size >= *dataLen) { -- DbgPrint("xattr: copying to buffer " -- "from &reply->pData"); -+ DbgPrint("xattr: copying to buffer " "from &reply->pData"); - memcpy(buffer, &reply->pData, *dataLen); - } else { -- DbgPrint("xattr: (!!!) buffer is " -- "smaller then reply\n"); -+ DbgPrint("xattr: (!!!) buffer is " "smaller then reply\n"); - retCode = -ERANGE; - } - DbgPrint("xattr: /dumping buffer"); -@@ -513,8 +466,7 @@ int novfs_listx_file_info(char *Path, ch - return retCode; - } - --static int begin_directory_enumerate(unsigned char * Path, int PathLen, void ** EnumHandle, -- struct novfs_schandle SessionId) -+static int begin_directory_enumerate(unsigned char *Path, int PathLen, void **EnumHandle, struct novfs_schandle SessionId) - { - struct novfs_begin_enumerate_directory_request *cmd; - struct novfs_begin_enumerate_directory_reply *reply = NULL; -@@ -524,7 +476,7 @@ static int begin_directory_enumerate(uns - *EnumHandle = 0; - - cmdlen = offsetof(struct -- novfs_begin_enumerate_directory_request, path) + PathLen; -+ novfs_begin_enumerate_directory_request, path) + PathLen; - cmd = kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_START_ENUMERATE; -@@ -534,9 +486,7 @@ static int begin_directory_enumerate(uns - cmd->pathLen = PathLen; - memcpy(cmd->path, Path, PathLen); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - /* - * retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, 0); - */ -@@ -569,9 +519,7 @@ int novfs_end_directory_enumerate(void * - - cmd.enumerateHandle = EnumHandle; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, 0); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, 0); - if (reply) { - retCode = 0; - if (reply->Reply.ErrorCode) { -@@ -583,14 +531,14 @@ int novfs_end_directory_enumerate(void * - return (retCode); - } - --static int directory_enumerate_ex(void ** EnumHandle, struct novfs_schandle SessionId, int *Count, -- struct novfs_entry_info **PInfo, int Interrupt) -+static int directory_enumerate_ex(void **EnumHandle, struct novfs_schandle SessionId, int *Count, -+ struct novfs_entry_info **PInfo, int Interrupt) - { - struct novfs_enumerate_directory_ex_request cmd; - struct novfs_enumerate_directory_ex_reply *reply = NULL; - unsigned long replylen = 0; - int retCode = 0; -- struct novfs_entry_info * info; -+ struct novfs_entry_info *info; - struct novfs_enumerate_directory_ex_data *data; - int isize; - -@@ -606,9 +554,7 @@ static int directory_enumerate_ex(void * - cmd.pathLen = 0; - cmd.path[0] = '\0'; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, Interrupt); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, Interrupt); - - if (reply) { - retCode = 0; -@@ -617,88 +563,60 @@ static int directory_enumerate_ex(void * - * error but there could still be valid data. - */ - -- if (!reply->Reply.ErrorCode || -- ((replylen > sizeof(struct novfs_command_reply_header)) && -- (reply->enumCount > 0))) { -+ if (!reply->Reply.ErrorCode || ((replylen > sizeof(struct novfs_command_reply_header)) && (reply->enumCount > 0))) { - DbgPrint("isize=%d", replylen); - data = -- (struct novfs_enumerate_directory_ex_data *) ((char *)reply + -- sizeof -- (struct novfs_enumerate_directory_ex_reply)); -- isize = -- replylen - sizeof(struct novfs_enumerate_directory_ex_reply *) - -- reply->enumCount * -- offsetof(struct -- novfs_enumerate_directory_ex_data, name); -- isize += -- (reply->enumCount * -- offsetof(struct novfs_entry_info, name)); -+ (struct novfs_enumerate_directory_ex_data *)((char *)reply + -+ sizeof(struct novfs_enumerate_directory_ex_reply)); -+ isize = replylen - sizeof(struct novfs_enumerate_directory_ex_reply *) - reply->enumCount * offsetof(struct -+ novfs_enumerate_directory_ex_data, -+ name); -+ isize += (reply->enumCount * offsetof(struct novfs_entry_info, name)); - - if (PInfo) { - *PInfo = info = kmalloc(isize, GFP_KERNEL); - if (*PInfo) { -- DbgPrint("data=0x%p info=0x%p", -- data, info); -+ DbgPrint("data=0x%p info=0x%p", data, info); - *Count = reply->enumCount; - do { -- DbgPrint("data=0x%p length=%d", -- data); -+ DbgPrint("data=0x%p length=%d", data); - - info->type = 3; - info->mode = S_IRWXU; - -- if (data-> -- mode & -- NW_ATTRIBUTE_DIRECTORY) { -+ if (data->mode & NW_ATTRIBUTE_DIRECTORY) { - info->mode |= S_IFDIR; - info->mode |= S_IXUSR; - } else { - info->mode |= S_IFREG; - } - -- if (data-> -- mode & -- NW_ATTRIBUTE_READ_ONLY) { -- info->mode &= -- ~(S_IWUSR); -+ if (data->mode & NW_ATTRIBUTE_READ_ONLY) { -+ info->mode &= ~(S_IWUSR); - } - -- if (data-> -- mode & NW_ATTRIBUTE_EXECUTE) -- { -+ if (data->mode & NW_ATTRIBUTE_EXECUTE) { - info->mode |= S_IXUSR; - } - - info->uid = current_euid(); - info->gid = current_egid(); - info->size = data->size; -- info->atime.tv_sec = -- data->lastAccessTime; -+ info->atime.tv_sec = data->lastAccessTime; - info->atime.tv_nsec = 0; -- info->mtime.tv_sec = -- data->modifyTime; -+ info->mtime.tv_sec = data->modifyTime; - info->mtime.tv_nsec = 0; -- info->ctime.tv_sec = -- data->createTime; -+ info->ctime.tv_sec = data->createTime; - info->ctime.tv_nsec = 0; -- info->namelength = -- data->nameLen; -- memcpy(info->name, data->name, -- data->nameLen); -- data = -- (struct novfs_enumerate_directory_ex_data *) -- & data->name[data->nameLen]; -- replylen = -- (int)((char *)&info-> -- name[info-> -- namelength] - -- (char *)info); -+ info->namelength = data->nameLen; -+ memcpy(info->name, data->name, data->nameLen); -+ data = (struct novfs_enumerate_directory_ex_data *) -+ &data->name[data->nameLen]; -+ replylen = (int)((char *)&info->name[info->namelength] - (char *)info); - DbgPrint("info=0x%p", info); - novfs_dump(replylen, info); - -- info = -- (struct novfs_entry_info *) & info-> -- name[info->namelength]; -+ info = (struct novfs_entry_info *)&info->name[info->namelength]; - - } while (--reply->enumCount); - } -@@ -717,9 +635,8 @@ static int directory_enumerate_ex(void * - return (retCode); - } - --int novfs_get_dir_listex(unsigned char * Path, void ** EnumHandle, int *Count, -- struct novfs_entry_info **Info, -- struct novfs_schandle SessionId) -+int novfs_get_dir_listex(unsigned char *Path, void **EnumHandle, int *Count, -+ struct novfs_entry_info **Info, struct novfs_schandle SessionId) - { - int retCode = -ENOENT; - -@@ -728,20 +645,16 @@ int novfs_get_dir_listex(unsigned char * - if (Info) - *Info = NULL; - -- if ((void *) - 1 == *EnumHandle) { -+ if ((void *)-1 == *EnumHandle) { - return (-ENODATA); - } - - if (0 == *EnumHandle) { -- retCode = -- begin_directory_enumerate(Path, strlen(Path), EnumHandle, -- SessionId); -+ retCode = begin_directory_enumerate(Path, strlen(Path), EnumHandle, SessionId); - } - - if (*EnumHandle) { -- retCode = -- directory_enumerate_ex(EnumHandle, SessionId, Count, Info, -- INTERRUPTIBLE); -+ retCode = directory_enumerate_ex(EnumHandle, SessionId, Count, Info, INTERRUPTIBLE); - if (retCode) { - novfs_end_directory_enumerate(*EnumHandle, SessionId); - retCode = 0; -@@ -751,9 +664,7 @@ int novfs_get_dir_listex(unsigned char * - return (retCode); - } - --int novfs_open_file(unsigned char * Path, int Flags, struct novfs_entry_info * Info, -- void ** Handle, -- struct novfs_schandle SessionId) -+int novfs_open_file(unsigned char *Path, int Flags, struct novfs_entry_info *Info, void **Handle, struct novfs_schandle SessionId) - { - struct novfs_open_file_request *cmd; - struct novfs_open_file_reply *reply; -@@ -817,19 +728,15 @@ int novfs_open_file(unsigned char * Path - cmd->pathLen = pathlen; - memcpy(cmd->path, Path, pathlen); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - if (reply->Reply.ErrorCode) { - if (NWE_OBJECT_EXISTS == reply->Reply.ErrorCode) { - retCode = -EEXIST; -- } else if (NWE_ACCESS_DENIED == -- reply->Reply.ErrorCode) { -+ } else if (NWE_ACCESS_DENIED == reply->Reply.ErrorCode) { - retCode = -EACCES; -- } else if (NWE_FILE_IN_USE == -- reply->Reply.ErrorCode) { -+ } else if (NWE_FILE_IN_USE == reply->Reply.ErrorCode) { - retCode = -EBUSY; - } else { - retCode = -ENOENT; -@@ -847,7 +754,7 @@ int novfs_open_file(unsigned char * Path - return (retCode); - } - --int novfs_create(unsigned char * Path, int DirectoryFlag, struct novfs_schandle SessionId) -+int novfs_create(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId) - { - struct novfs_create_file_request *cmd; - struct novfs_create_file_reply *reply; -@@ -875,9 +782,7 @@ int novfs_create(unsigned char * Path, i - cmd->pathlength = pathlen; - memcpy(cmd->path, Path, pathlen); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - retCode = 0; -@@ -906,9 +811,7 @@ int novfs_close_file(void *Handle, struc - - cmd.handle = Handle; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, 0); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, 0); - if (reply) { - retCode = 0; - if (reply->Reply.ErrorCode) { -@@ -919,11 +822,10 @@ int novfs_close_file(void *Handle, struc - return (retCode); - } - --int novfs_read_file(void *Handle, unsigned char * Buffer, size_t * Bytes, -- loff_t * Offset, struct novfs_schandle SessionId) -+int novfs_read_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) - { - struct novfs_read_file_request cmd; -- struct novfs_read_file_reply * reply = NULL; -+ struct novfs_read_file_reply *reply = NULL; - unsigned long replylen = 0; - int retCode = 0; - size_t len; -@@ -931,10 +833,9 @@ int novfs_read_file(void *Handle, unsign - len = *Bytes; - *Bytes = 0; - -- if (offsetof(struct novfs_read_file_reply, data) + len -- > novfs_max_iosize) { -+ if (offsetof(struct novfs_read_file_reply, data) + len > novfs_max_iosize) { - len = novfs_max_iosize - offsetof(struct -- novfs_read_file_reply, data); -+ novfs_read_file_reply, data); - len = (len / PAGE_SIZE) * PAGE_SIZE; - } - -@@ -946,9 +847,7 @@ int novfs_read_file(void *Handle, unsign - cmd.len = len; - cmd.offset = *Offset; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - DbgPrint("Queue_Daemon_Command 0x%x replylen=%d", retCode, replylen); - -@@ -961,11 +860,10 @@ int novfs_read_file(void *Handle, unsign - } - } else { - replylen -= offsetof(struct -- novfs_read_file_reply, data); -+ novfs_read_file_reply, data); - - if (replylen > 0) { -- replylen -= -- copy_to_user(Buffer, reply->data, replylen); -+ replylen -= copy_to_user(Buffer, reply->data, replylen); - *Bytes = replylen; - } - } -@@ -981,11 +879,10 @@ int novfs_read_file(void *Handle, unsign - } - - int novfs_read_pages(void *Handle, struct novfs_data_list *DList, -- int DList_Cnt, size_t * Bytes, loff_t * Offset, -- struct novfs_schandle SessionId) -+ int DList_Cnt, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) - { - struct novfs_read_file_request cmd; -- struct novfs_read_file_reply * reply = NULL; -+ struct novfs_read_file_reply *reply = NULL; - struct novfs_read_file_reply lreply; - unsigned long replylen = 0; - int retCode = 0; -@@ -995,8 +892,7 @@ int novfs_read_pages(void *Handle, struc - *Bytes = 0; - - DbgPrint("Handle=0x%p Dlst=0x%p Dlcnt=%d Bytes=%d Offset=%lld " -- "SessionId=0x%p:%p", Handle, DList, DList_Cnt, len, *Offset, -- SessionId.hTypeId, SessionId.hId); -+ "SessionId=0x%p:%p", Handle, DList, DList_Cnt, len, *Offset, SessionId.hTypeId, SessionId.hId); - - cmd.Command.CommandType = VFS_COMMAND_READ_FILE; - cmd.Command.SequenceNumber = 0; -@@ -1014,9 +910,7 @@ int novfs_read_pages(void *Handle, struc - DList[0].len = offsetof(struct novfs_read_file_reply, data); - DList[0].rwflag = DLWRITE; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), DList, DList_Cnt, -- (void *)&reply, &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), DList, DList_Cnt, (void *)&reply, &replylen, INTERRUPTIBLE); - - DbgPrint("Queue_Daemon_Command 0x%x", retCode); - -@@ -1033,7 +927,7 @@ int novfs_read_pages(void *Handle, struc - } - } - *Bytes = replylen - offsetof(struct -- novfs_read_file_reply, data); -+ novfs_read_file_reply, data); - } - - if (reply) { -@@ -1045,8 +939,7 @@ int novfs_read_pages(void *Handle, struc - return (retCode); - } - --int novfs_write_file(void *Handle, unsigned char * Buffer, size_t * Bytes, -- loff_t * Offset, struct novfs_schandle SessionId) -+int novfs_write_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) - { - struct novfs_write_file_request cmd; - struct novfs_write_file_reply *reply = NULL; -@@ -1082,9 +975,7 @@ int novfs_write_file(void *Handle, unsig - - DbgPrint("cmdlen=%ld len=%ld", cmdlen, len); - -- npage = -- (((unsigned long)Buffer & ~PAGE_MASK) + len + -- (PAGE_SIZE - 1)) >> PAGE_SHIFT; -+ npage = (((unsigned long)Buffer & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - - dlist = kmalloc(sizeof(struct novfs_data_list) * (npage + 1), GFP_KERNEL); - if (NULL == dlist) { -@@ -1121,8 +1012,7 @@ int novfs_write_file(void *Handle, unsig - dlist[0].len = len; - } - -- DbgPrint("page=0x%p offset=0x%p len=%d", -- dlist[0].page, dlist[0].offset, dlist[0].len); -+ DbgPrint("page=0x%p offset=0x%p len=%d", dlist[0].page, dlist[0].offset, dlist[0].len); - - boff = dlist[0].len; - -@@ -1140,8 +1030,7 @@ int novfs_write_file(void *Handle, unsig - dlist[i].rwflag = DLREAD; - - boff += dlist[i].len; -- DbgPrint("%d: page=0x%p offset=0x%p len=%d", i, -- dlist[i].page, dlist[i].offset, dlist[i].len); -+ DbgPrint("%d: page=0x%p offset=0x%p len=%d", i, dlist[i].page, dlist[i].offset, dlist[i].len); - } - - dlist[i].page = NULL; -@@ -1152,10 +1041,7 @@ int novfs_write_file(void *Handle, unsig - - DbgPrint("Buffer=0x%p boff=0x%x len=%d", Buffer, boff, len); - -- retCode = -- Queue_Daemon_Command(&cmd, cmdlen, dlist, res, -- (void *)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, cmdlen, dlist, res, (void *)&reply, &replylen, INTERRUPTIBLE); - - } else { - char *kdata; -@@ -1175,10 +1061,7 @@ int novfs_write_file(void *Handle, unsig - dlist[1].len = sizeof(lreply); - dlist[1].rwflag = DLWRITE; - -- retCode = -- Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, -- (void *)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, &replylen, INTERRUPTIBLE); - - kfree(kdata); - } -@@ -1218,8 +1101,7 @@ int novfs_write_file(void *Handle, unsig - kfree(pages); - kfree(dlist); - -- DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, -- retCode); -+ DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, retCode); - - return (retCode); - } -@@ -1245,8 +1127,7 @@ int novfs_write_page(void *Handle, struc - int retCode = 0, cmdlen; - struct novfs_data_list dlst[2]; - -- DbgPrint("Handle=0x%p Page=0x%p Index=%lu SessionId=0x%llx", -- Handle, Page, Page->index, SessionId); -+ DbgPrint("Handle=0x%p Page=0x%p Index=%lu SessionId=0x%llx", Handle, Page, Page->index, SessionId); - - dlst[0].page = NULL; - dlst[0].offset = &lreply; -@@ -1268,9 +1149,7 @@ int novfs_write_page(void *Handle, struc - cmd.len = PAGE_CACHE_SIZE; - cmd.offset = (loff_t) Page->index << PAGE_CACHE_SHIFT;; - -- retCode = -- Queue_Daemon_Command(&cmd, cmdlen, &dlst, 2, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, cmdlen, &dlst, 2, (void *)&reply, &replylen, INTERRUPTIBLE); - if (!retCode) { - if (reply) { - memcpy(&lreply, reply, sizeof(lreply)); -@@ -1314,8 +1193,7 @@ int novfs_write_pages(void *Handle, stru - size_t len; - - DbgPrint("Handle=0x%p Dlst=0x%p Dlcnt=%d Bytes=%d Offset=%lld " -- "SessionId=0x%llx\n", Handle, DList, DList_Cnt, Bytes, -- Offset, SessionId); -+ "SessionId=0x%llx\n", Handle, DList, DList_Cnt, Bytes, Offset, SessionId); - - DList[0].page = NULL; - DList[0].offset = &lreply; -@@ -1334,10 +1212,7 @@ int novfs_write_pages(void *Handle, stru - cmd.len = len; - cmd.offset = Offset; - -- retCode = -- Queue_Daemon_Command(&cmd, cmdlen, DList, DList_Cnt, -- (void *)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, cmdlen, DList, DList_Cnt, (void *)&reply, &replylen, INTERRUPTIBLE); - if (!retCode) { - if (reply) { - memcpy(&lreply, reply, sizeof(lreply)); -@@ -1369,9 +1244,8 @@ int novfs_write_pages(void *Handle, stru - return (retCode); - } - --int novfs_read_stream(void *ConnHandle, unsigned char * Handle, u_char * Buffer, -- size_t * Bytes, loff_t * Offset, int User, -- struct novfs_schandle SessionId) -+int novfs_read_stream(void *ConnHandle, unsigned char *Handle, u_char * Buffer, -+ size_t * Bytes, loff_t * Offset, int User, struct novfs_schandle SessionId) - { - struct novfs_read_stream_request cmd; - struct novfs_read_stream_reply *reply = NULL; -@@ -1382,10 +1256,9 @@ int novfs_read_stream(void *ConnHandle, - len = *Bytes; - *Bytes = 0; - -- if (offsetof(struct novfs_read_file_reply, data) + len -- > novfs_max_iosize) { -+ if (offsetof(struct novfs_read_file_reply, data) + len > novfs_max_iosize) { - len = novfs_max_iosize - offsetof(struct -- novfs_read_file_reply, data); -+ novfs_read_file_reply, data); - len = (len / PAGE_SIZE) * PAGE_SIZE; - } - -@@ -1398,9 +1271,7 @@ int novfs_read_stream(void *ConnHandle, - cmd.len = len; - cmd.offset = *Offset; - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - DbgPrint("Queue_Daemon_Command 0x%x replylen=%d", retCode, replylen); - -@@ -1410,12 +1281,10 @@ int novfs_read_stream(void *ConnHandle, - retCode = -EIO; - } else { - replylen -= offsetof(struct -- novfs_read_stream_reply, data); -+ novfs_read_stream_reply, data); - if (replylen > 0) { - if (User) { -- replylen -= -- copy_to_user(Buffer, reply->data, -- replylen); -+ replylen -= copy_to_user(Buffer, reply->data, replylen); - } else { - memcpy(Buffer, reply->data, replylen); - } -@@ -1431,11 +1300,11 @@ int novfs_read_stream(void *ConnHandle, - return (retCode); - } - --int novfs_write_stream(void *ConnHandle, unsigned char * Handle, u_char * Buffer, -+int novfs_write_stream(void *ConnHandle, unsigned char *Handle, u_char * Buffer, - size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) - { -- struct novfs_write_stream_request * cmd; -- struct novfs_write_stream_reply * reply = NULL; -+ struct novfs_write_stream_request *cmd; -+ struct novfs_write_stream_reply *reply = NULL; - unsigned long replylen = 0; - int retCode = 0, cmdlen; - size_t len; -@@ -1449,7 +1318,7 @@ int novfs_write_stream(void *ConnHandle, - if (cmdlen > novfs_max_iosize) { - cmdlen = novfs_max_iosize; - len = cmdlen - offsetof(struct -- novfs_write_stream_request, data); -+ novfs_write_stream_request, data); - } - - DbgPrint("cmdlen=%d len=%d", cmdlen, len); -@@ -1472,9 +1341,7 @@ int novfs_write_stream(void *ConnHandle, - cmd->len = len; - cmd->offset = *Offset; - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - switch (reply->Reply.ErrorCode) { - case 0: -@@ -1493,8 +1360,7 @@ int novfs_write_stream(void *ConnHandle, - retCode = -EIO; - break; - } -- DbgPrint("reply->bytesWritten=0x%lx", -- reply->bytesWritten); -+ DbgPrint("reply->bytesWritten=0x%lx", reply->bytesWritten); - *Bytes = reply->bytesWritten; - kfree(reply); - } -@@ -1505,7 +1371,7 @@ int novfs_write_stream(void *ConnHandle, - return (retCode); - } - --int novfs_close_stream(void *ConnHandle, unsigned char * Handle, struct novfs_schandle SessionId) -+int novfs_close_stream(void *ConnHandle, unsigned char *Handle, struct novfs_schandle SessionId) - { - struct novfs_close_stream_request cmd; - struct novfs_close_stream_reply *reply; -@@ -1519,9 +1385,7 @@ int novfs_close_stream(void *ConnHandle, - cmd.connection = ConnHandle; - memcpy(cmd.handle, Handle, sizeof(cmd.handle)); - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, 0); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, 0); - if (reply) { - retCode = 0; - if (reply->Reply.ErrorCode) { -@@ -1532,7 +1396,7 @@ int novfs_close_stream(void *ConnHandle, - return (retCode); - } - --int novfs_delete(unsigned char * Path, int DirectoryFlag, struct novfs_schandle SessionId) -+int novfs_delete(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId) - { - struct novfs_delete_file_request *cmd; - struct novfs_delete_file_reply *reply; -@@ -1557,9 +1421,7 @@ int novfs_delete(unsigned char * Path, i - cmd->pathlength = pathlen; - memcpy(cmd->path, Path, pathlen); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = 0; - if (reply->Reply.ErrorCode) { -@@ -1571,7 +1433,7 @@ int novfs_delete(unsigned char * Path, i - retCode = -EACCES; - else if ((reply->Reply.ErrorCode & 0xFFFF) == 0x0513) - retCode = -ENOTEMPTY; -- else -+ else - retCode = -EIO; - } - kfree(reply); -@@ -1583,8 +1445,7 @@ int novfs_delete(unsigned char * Path, i - return (retCode); - } - --int novfs_trunc(unsigned char * Path, int PathLen, -- struct novfs_schandle SessionId) -+int novfs_trunc(unsigned char *Path, int PathLen, struct novfs_schandle SessionId) - { - struct novfs_truncate_file_request *cmd; - struct novfs_truncate_file_reply *reply = NULL; -@@ -1596,7 +1457,7 @@ int novfs_trunc(unsigned char * Path, in - PathLen--; - } - cmdlen = offsetof(struct novfs_truncate_file_request, path) -- + PathLen; -+ + PathLen; - cmd = kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_TRUNCATE_FILE; -@@ -1606,9 +1467,7 @@ int novfs_trunc(unsigned char * Path, in - cmd->pathLen = PathLen; - memcpy(cmd->path, Path, PathLen); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - if (reply->Reply.ErrorCode) { - retCode = -EIO; -@@ -1622,8 +1481,7 @@ int novfs_trunc(unsigned char * Path, in - return (retCode); - } - --int novfs_trunc_ex(void *Handle, loff_t Offset, -- struct novfs_schandle SessionId) -+int novfs_trunc_ex(void *Handle, loff_t Offset, struct novfs_schandle SessionId) - { - struct novfs_write_file_request cmd; - struct novfs_write_file_reply *reply = NULL; -@@ -1641,9 +1499,7 @@ int novfs_trunc_ex(void *Handle, loff_t - cmd.len = 0; - cmd.offset = Offset; - -- retCode = -- Queue_Daemon_Command(&cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - DbgPrint("retCode=0x%x reply=0x%p", retCode, reply); - -@@ -1680,9 +1536,8 @@ int novfs_trunc_ex(void *Handle, loff_t - return (retCode); - } - --int novfs_rename_file(int DirectoryFlag, unsigned char * OldName, int OldLen, -- unsigned char * NewName, int NewLen, -- struct novfs_schandle SessionId) -+int novfs_rename_file(int DirectoryFlag, unsigned char *OldName, int OldLen, -+ unsigned char *NewName, int NewLen, struct novfs_schandle SessionId) - { - struct novfs_rename_file_request cmd; - struct novfs_rename_file_reply *reply; -@@ -1690,11 +1545,10 @@ int novfs_rename_file(int DirectoryFlag, - int retCode; - - __DbgPrint("%s:\n" -- " DirectoryFlag: %d\n" -- " OldName: %.*s\n" -- " NewName: %.*s\n" -- " SessionId: 0x%llx\n", __func__, -- DirectoryFlag, OldLen, OldName, NewLen, NewName, SessionId); -+ " DirectoryFlag: %d\n" -+ " OldName: %.*s\n" -+ " NewName: %.*s\n" -+ " SessionId: 0x%llx\n", __func__, DirectoryFlag, OldLen, OldName, NewLen, NewName, SessionId); - - cmd.Command.CommandType = VFS_COMMAND_RENAME_FILE; - cmd.Command.SequenceNumber = 0; -@@ -1715,9 +1569,7 @@ int novfs_rename_file(int DirectoryFlag, - cmd.oldnameLen = OldLen; - memcpy(cmd.oldname, OldName, OldLen); - -- retCode = -- Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = 0; - if (reply->Reply.ErrorCode) { -@@ -1728,8 +1580,7 @@ int novfs_rename_file(int DirectoryFlag, - return (retCode); - } - --int novfs_set_attr(unsigned char * Path, struct iattr *Attr, -- struct novfs_schandle SessionId) -+int novfs_set_attr(unsigned char *Path, struct iattr *Attr, struct novfs_schandle SessionId) - { - struct novfs_set_file_info_request *cmd; - struct novfs_set_file_info_reply *reply; -@@ -1743,7 +1594,7 @@ int novfs_set_attr(unsigned char * Path, - pathlen--; - } - -- cmdlen = offsetof(struct novfs_set_file_info_request,path) + pathlen; -+ cmdlen = offsetof(struct novfs_set_file_info_request, path) + pathlen; - cmd = kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_SET_FILE_INFO; -@@ -1765,9 +1616,7 @@ int novfs_set_attr(unsigned char * Path, - cmd->pathlength = pathlen; - memcpy(cmd->path, Path, pathlen); - -- retCode = -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -- &replylen, INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - switch (reply->Reply.ErrorCode) { - case 0: -@@ -1795,8 +1644,7 @@ int novfs_set_attr(unsigned char * Path, - return (retCode); - } - --int novfs_get_file_cache_flag(unsigned char * Path, -- struct novfs_schandle SessionId) -+int novfs_get_file_cache_flag(unsigned char *Path, struct novfs_schandle SessionId) - { - struct novfs_get_cache_flag *cmd; - struct novfs_get_cache_flag_reply *reply = NULL; -@@ -1813,10 +1661,9 @@ int novfs_get_file_cache_flag(unsigned c - if ('.' == Path[pathlen - 1]) - pathlen--; - } -- cmdlen = offsetof(struct novfs_get_cache_flag, path) + -- pathlen; -+ cmdlen = offsetof(struct novfs_get_cache_flag, path) + pathlen; - cmd = (struct novfs_get_cache_flag *) -- kmalloc(cmdlen, GFP_KERNEL); -+ kmalloc(cmdlen, GFP_KERNEL); - if (cmd) { - cmd->Command.CommandType = VFS_COMMAND_GET_CACHE_FLAG; - cmd->Command.SequenceNumber = 0; -@@ -1824,9 +1671,7 @@ int novfs_get_file_cache_flag(unsigned c - cmd->pathLen = pathlen; - memcpy(cmd->path, Path, cmd->pathLen); - -- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, -- (void *)&reply, &replylen, -- INTERRUPTIBLE); -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - -@@ -1851,8 +1696,7 @@ int novfs_get_file_cache_flag(unsigned c - * - * Notes: lock type - fcntl - */ --int novfs_set_file_lock(struct novfs_schandle SessionId, void *Handle, -- unsigned char fl_type, loff_t fl_start, loff_t fl_len) -+int novfs_set_file_lock(struct novfs_schandle SessionId, void *Handle, unsigned char fl_type, loff_t fl_start, loff_t fl_len) - { - struct novfs_set_file_lock_request *cmd; - struct novfs_set_file_lock_reply *reply = NULL; -@@ -1863,8 +1707,7 @@ int novfs_set_file_lock(struct novfs_sch - - DbgPrint("SessionId: 0x%llx\n", SessionId); - -- cmd = -- (struct novfs_set_file_lock_request *) kmalloc(sizeof(struct novfs_set_file_lock_request), GFP_KERNEL); -+ cmd = (struct novfs_set_file_lock_request *)kmalloc(sizeof(struct novfs_set_file_lock_request), GFP_KERNEL); - - if (cmd) { - DbgPrint("2"); -@@ -1887,20 +1730,17 @@ int novfs_set_file_lock(struct novfs_sch - DbgPrint("3"); - - DbgPrint("BEGIN dump arguments"); -- DbgPrint("Queue_Daemon_Command %d", -- cmd->Command.CommandType); -+ DbgPrint("Queue_Daemon_Command %d", cmd->Command.CommandType); - DbgPrint("cmd->handle = 0x%p", cmd->handle); - DbgPrint("cmd->fl_type = %u", cmd->fl_type); - DbgPrint("cmd->fl_start = 0x%X", cmd->fl_start); - DbgPrint("cmd->fl_len = 0x%X", cmd->fl_len); -- DbgPrint("sizeof(SET_FILE_LOCK_REQUEST) = %u", -- sizeof(struct novfs_set_file_lock_request)); -+ DbgPrint("sizeof(SET_FILE_LOCK_REQUEST) = %u", sizeof(struct novfs_set_file_lock_request)); - DbgPrint("END dump arguments"); - - retCode = - Queue_Daemon_Command(cmd, sizeof(struct novfs_set_file_lock_request), -- NULL, 0, (void *)&reply, &replylen, -- INTERRUPTIBLE); -+ NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); - DbgPrint("4"); - - if (reply) { ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -36,7 +36,6 @@ - /*===[ Include files specific to this module ]============================*/ - #include "vfs.h" - -- - struct inode_data { - void *Scope; - unsigned long Flags; -@@ -45,7 +44,7 @@ struct inode_data { - unsigned long cntDC; - struct list_head DirCache; - struct semaphore DirCacheLock; -- void * FileHandle; -+ void *FileHandle; - int CacheFlag; - char Name[1]; /* Needs to be last entry */ - }; -@@ -57,12 +56,10 @@ struct inode_data { - static unsigned long novfs_internal_hash(struct qstr *name); - static int novfs_d_add(struct dentry *p, struct dentry *d, struct inode *i, int add); - --static int novfs_get_sb(struct file_system_type *Fstype, int Flags, -- const char *Dev_name, void *Data, struct vfsmount *Mnt); -+static int novfs_get_sb(struct file_system_type *Fstype, int Flags, const char *Dev_name, void *Data, struct vfsmount *Mnt); - - static void novfs_kill_sb(struct super_block *SB); - -- - /* - * Declared dentry_operations - */ -@@ -81,8 +78,7 @@ int novfs_dir_release(struct inode *inod - loff_t novfs_dir_lseek(struct file *file, loff_t offset, int origin); - ssize_t novfs_dir_read(struct file *file, char *buf, size_t len, loff_t * off); - void addtodentry(struct dentry *Parent, unsigned char *List, int Level); --int novfs_filldir(void *data, const char *name, int namelen, loff_t off, -- ino_t ino, unsigned ftype); -+int novfs_filldir(void *data, const char *name, int namelen, loff_t off, ino_t ino, unsigned ftype); - int novfs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir); - int novfs_dir_fsync(struct file *file, int datasync); - -@@ -90,19 +86,14 @@ int novfs_dir_fsync(struct file *file, i - * Declared address space operations - */ - int novfs_a_writepage(struct page *page, struct writeback_control *wbc); --int novfs_a_writepages(struct address_space *mapping, -- struct writeback_control *wbc); -+int novfs_a_writepages(struct address_space *mapping, struct writeback_control *wbc); - int novfs_a_write_begin(struct file *file, struct address_space *mapping, -- loff_t pos, unsigned len, unsigned flags, -- struct page **pagep, void **fsdata); -+ loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); - int novfs_a_write_end(struct file *file, struct address_space *mapping, -- loff_t pos, unsigned len, unsigned copied, -- struct page *pagep, void *fsdata); -+ loff_t pos, unsigned len, unsigned copied, struct page *pagep, void *fsdata); - int novfs_a_readpage(struct file *file, struct page *page); --int novfs_a_readpages(struct file *file, struct address_space *mapping, -- struct list_head *page_lst, unsigned nr_pages); --ssize_t novfs_a_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov, -- loff_t offset, unsigned long nr_segs); -+int novfs_a_readpages(struct file *file, struct address_space *mapping, struct list_head *page_lst, unsigned nr_pages); -+ssize_t novfs_a_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs); - - /* - * Declared file_operations -@@ -122,14 +113,12 @@ int novfs_f_lock(struct file *, int, str - * Declared inode_operations - */ - int novfs_i_create(struct inode *, struct dentry *, int, struct nameidata *); --struct dentry *novfs_i_lookup(struct inode *, struct dentry *, -- struct nameidata *); -+struct dentry *novfs_i_lookup(struct inode *, struct dentry *, struct nameidata *); - int novfs_i_mkdir(struct inode *, struct dentry *, int); - int novfs_i_unlink(struct inode *dir, struct dentry *dentry); - int novfs_i_rmdir(struct inode *, struct dentry *); - int novfs_i_mknod(struct inode *, struct dentry *, int, dev_t); --int novfs_i_rename(struct inode *, struct dentry *, struct inode *, -- struct dentry *); -+int novfs_i_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); - int novfs_i_setattr(struct dentry *, struct iattr *); - int novfs_i_getattr(struct vfsmount *mnt, struct dentry *, struct kstat *); - int novfs_i_revalidate(struct dentry *dentry); -@@ -138,10 +127,8 @@ int novfs_i_revalidate(struct dentry *de - * Extended attributes operations - */ - --ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, -- size_t size); --int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, -- size_t value_size, int flags); -+ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); -+int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, size_t value_size, int flags); - ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); - - void update_inode(struct inode *Inode, struct novfs_entry_info *Info); -@@ -160,38 +147,27 @@ int novfs_statfs(struct dentry *de, stru - /* - * Declared control interface functions - */ --ssize_t --novfs_control_Read(struct file *file, char *buf, size_t nbytes, loff_t * ppos); -+ssize_t novfs_control_Read(struct file *file, char *buf, size_t nbytes, loff_t * ppos); - --ssize_t --novfs_control_write(struct file *file, const char *buf, size_t nbytes, -- loff_t * ppos); -+ssize_t novfs_control_write(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); - --int novfs_control_ioctl(struct inode *inode, struct file *file, -- unsigned int cmd, unsigned long arg); -+int novfs_control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); - - int __init init_novfs(void); - void __exit exit_novfs(void); - - int novfs_lock_inode_cache(struct inode *i); - void novfs_unlock_inode_cache(struct inode *i); --int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, -- ino_t * ino, struct novfs_entry_info *info); --int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, -- struct novfs_entry_info *info); --int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, -- struct novfs_entry_info *info); --int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, -- struct novfs_entry_info *info, u64 * EntryTime); -+int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, ino_t * ino, struct novfs_entry_info *info); -+int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info); -+int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, struct novfs_entry_info *info); -+int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info, u64 * EntryTime); - int novfs_get_remove_entry(struct inode *i, ino_t * ino, struct novfs_entry_info *info); - void novfs_invalidate_inode_cache(struct inode *i); --struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, -- ino_t ino); -+struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, ino_t ino); - int novfs_lookup_validate(struct inode *i, struct qstr *name, ino_t ino); --int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, -- struct novfs_entry_info *info); --int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, -- struct novfs_entry_info *info); -+int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info); -+int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info); - void novfs_remove_inode_entry(struct inode *i, struct qstr *name, ino_t ino); - void novfs_free_invalid_entries(struct inode *i); - void novfs_free_inode_cache(struct inode *i); -@@ -294,7 +270,6 @@ static struct file_operations novfs_Cont - - static atomic_t novfs_Inode_Number = ATOMIC_INIT(0); - -- - struct dentry *novfs_root = NULL; - char *novfs_current_mnt = NULL; - -@@ -325,18 +300,13 @@ static void PRINT_DENTRY(const char *s, - __DbgPrint(" d_lock: 0x%x\n", d->d_lock); - __DbgPrint(" d_inode: 0x%x\n", d->d_inode); - __DbgPrint(" d_lru: 0x%p\n" -- " next: 0x%p\n" -- " prev: 0x%p\n", &d->d_lru, d->d_lru.next, -- d->d_lru.prev); -+ " next: 0x%p\n" " prev: 0x%p\n", &d->d_lru, d->d_lru.next, d->d_lru.prev); - __DbgPrint(" d_child: 0x%p\n" " next: 0x%p\n" -- " prev: 0x%p\n", &d->d_u.d_child, -- d->d_u.d_child.next, d->d_u.d_child.prev); -+ " prev: 0x%p\n", &d->d_u.d_child, d->d_u.d_child.next, d->d_u.d_child.prev); - __DbgPrint(" d_subdirs: 0x%p\n" " next: 0x%p\n" -- " prev: 0x%p\n", &d->d_subdirs, d->d_subdirs.next, -- d->d_subdirs.prev); -+ " prev: 0x%p\n", &d->d_subdirs, d->d_subdirs.next, d->d_subdirs.prev); - __DbgPrint(" d_alias: 0x%p\n" " next: 0x%p\n" -- " prev: 0x%p\n", &d->d_alias, d->d_alias.next, -- d->d_alias.prev); -+ " prev: 0x%p\n", &d->d_alias, d->d_alias.next, d->d_alias.prev); - __DbgPrint(" d_time: 0x%x\n", d->d_time); - __DbgPrint(" d_op: 0x%p\n", d->d_op); - __DbgPrint(" d_sb: 0x%p\n", d->d_sb); -@@ -345,14 +315,11 @@ static void PRINT_DENTRY(const char *s, - __DbgPrint(" d_fsdata: 0x%p\n", d->d_fsdata); - /* DbgPrint(" d_cookie: 0x%x\n", d->d_cookie); */ - __DbgPrint(" d_parent: 0x%p\n", d->d_parent); -- __DbgPrint(" d_name: 0x%p %.*s\n", &d->d_name, d->d_name.len, -- d->d_name.name); -+ __DbgPrint(" d_name: 0x%p %.*s\n", &d->d_name, d->d_name.len, d->d_name.name); - __DbgPrint(" name: 0x%p\n" " len: %d\n" -- " hash: 0x%x\n", d->d_name.name, d->d_name.len, -- d->d_name.hash); -+ " hash: 0x%x\n", d->d_name.name, d->d_name.len, d->d_name.hash); - __DbgPrint(" d_hash: 0x%x\n" " next: 0x%x\n" -- " pprev: 0x%x\n", d->d_hash, d->d_hash.next, -- d->d_hash.pprev); -+ " pprev: 0x%x\n", d->d_hash, d->d_hash.next, d->d_hash.pprev); - } - - /*++======================================================================*/ -@@ -370,8 +337,7 @@ int novfs_remove_from_root(char *RemoveN - dentry = d_lookup(novfs_root, &name); - if (dentry) { - if (dentry->d_inode && dentry->d_inode->i_private) { -- struct inode_data *n_inode = -- dentry->d_inode->i_private; -+ struct inode_data *n_inode = dentry->d_inode->i_private; - n_inode->Scope = NULL; - } - dput(dentry); -@@ -410,7 +376,7 @@ int novfs_add_to_root(char *AddName) - info.size = 0; - info.atime = info.ctime = info.mtime = CURRENT_TIME; - -- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); - novfs_add_inode_entry(dir, &name, ino, &info); - } - -@@ -446,20 +412,16 @@ int novfs_Add_to_Root2(char *AddName) - /* - * done in novfs_d_add now... entry->d_fsdata = (void *)novfs_internal_hash( &name ); - */ -- inode = -- novfs_get_inode(novfs_root->d_sb, S_IFDIR | 0700, 0, novfs_scope_get_uid(scope), 0, &name); -+ inode = novfs_get_inode(novfs_root->d_sb, S_IFDIR | 0700, 0, novfs_scope_get_uid(scope), 0, &name); - DbgPrint("Inode=0x%p", inode); - if (inode) { -- inode->i_atime = -- inode->i_ctime = -- inode->i_mtime = CURRENT_TIME; -+ inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME; - if (!novfs_d_add(novfs_root, entry, inode, 1)) { - if (inode->i_private) { - struct inode_data *n_inode = inode->i_private; - n_inode->Flags = USER_INODE; - } -- PRINT_DENTRY("After novfs_d_add", -- entry); -+ PRINT_DENTRY("After novfs_d_add", entry); - } else { - dput(entry); - iput(inode); -@@ -524,8 +486,7 @@ int verify_dentry(struct dentry *dentry, - return (0); - } - -- if (dentry && dentry->d_parent && -- (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) { -+ if (dentry && dentry->d_parent && (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) { - parent = dget_parent(dentry); - - info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); -@@ -538,11 +499,8 @@ int verify_dentry(struct dentry *dentry, - if (!novfs_get_entry_time(dir, &name, &ino, info, &ctime)) { - inode = dentry->d_inode; - if (inode && inode->i_private && -- ((inode->i_size != info->size) || -- (inode->i_mtime.tv_sec != -- info->mtime.tv_sec) -- || (inode->i_mtime.tv_nsec != -- info->mtime.tv_nsec))) { -+ ((inode->i_size != info->size) || (inode->i_mtime.tv_sec != info->mtime.tv_sec) -+ || (inode->i_mtime.tv_nsec != info->mtime.tv_nsec))) { - /* - * Values don't match so update. - */ -@@ -563,8 +521,7 @@ int verify_dentry(struct dentry *dentry, - } - - if (IS_ROOT(dentry->d_parent)) { -- session = novfs_scope_get_sessionId( -- novfs_get_scope_from_name(&dentry->d_name)); -+ session = novfs_scope_get_sessionId(novfs_get_scope_from_name(&dentry->d_name)); - } else - session = novfs_scope_get_sessionId(id->Scope); - -@@ -595,7 +552,7 @@ int verify_dentry(struct dentry *dentry, - info->mode = S_IFDIR | 0700; - info->size = 0; - info->atime = info->ctime = info->mtime = CURRENT_TIME; -- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); - novfs_add_inode_entry(dir, &name, ino, info); - } - } -@@ -603,75 +560,48 @@ int verify_dentry(struct dentry *dentry, - novfs_free_invalid_entries(dir); - } else { - -- path = -- novfs_dget_path(dentry, info->name, -- PATH_LENGTH_BUFFER); -+ path = novfs_dget_path(dentry, info->name, PATH_LENGTH_BUFFER); - if (path) { -- if (dentry->d_name.len <= -- NW_MAX_PATH_LENGTH) { -- name.hash = -- novfs_internal_hash -- (&dentry->d_name); -+ if (dentry->d_name.len <= NW_MAX_PATH_LENGTH) { -+ name.hash = novfs_internal_hash(&dentry->d_name); - name.len = dentry->d_name.len; - name.name = dentry->d_name.name; - -- retVal = -- novfs_get_file_info(path, -- info, -- session); -+ retVal = novfs_get_file_info(path, info, session); - if (0 == retVal) { -- dentry->d_time = -- jiffies + -- (novfs_update_timeout -- * HZ); -- iLock = -- novfs_lock_inode_cache -- (dir); -- if (novfs_update_entry -- (dir, &name, 0, -- info)) { -- if (dentry-> -- d_inode) { -+ dentry->d_time = jiffies + (novfs_update_timeout * HZ); -+ iLock = novfs_lock_inode_cache(dir); -+ if (novfs_update_entry(dir, &name, 0, info)) { -+ if (dentry->d_inode) { - ino = dentry->d_inode->i_ino; - } else { -- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); - } -- novfs_add_inode_entry -- (dir, &name, -- ino, info); -+ novfs_add_inode_entry(dir, &name, ino, info); - } - if (dentry->d_inode) { -- update_inode -- (dentry-> -- d_inode, -- info); -- id->Flags &= -- ~UPDATE_INODE; -- -- dentry-> -- d_inode-> -- i_flags &= -- ~S_DEAD; -- if (dentry-> -- d_inode-> -- i_private) { -- ((struct inode_data *) dentry->d_inode->i_private)->Scope = id->Scope; -+ update_inode(dentry->d_inode, info); -+ id->Flags &= ~UPDATE_INODE; -+ -+ dentry->d_inode->i_flags &= ~S_DEAD; -+ if (dentry->d_inode->i_private) { -+ ((struct inode_data *)dentry->d_inode->i_private)->Scope = -+ id->Scope; - } - } - } else if (-EINTR != retVal) { - retVal = 0; - iLock = novfs_lock_inode_cache(dir); - novfs_remove_inode_entry(dir, &name, 0); -- if (dentry->d_inode -- && !(dentry->d_inode->i_flags & S_DEAD)) { -+ if (dentry->d_inode && !(dentry->d_inode->i_flags & S_DEAD)) { - dentry->d_inode->i_flags |= S_DEAD; -- dentry->d_inode-> i_size = 0; -+ dentry->d_inode->i_size = 0; - dentry->d_inode->i_atime.tv_sec = -- dentry->d_inode->i_atime.tv_nsec = -- dentry->d_inode->i_ctime.tv_sec = -- dentry->d_inode->i_ctime.tv_nsec = -- dentry->d_inode->i_mtime.tv_sec = -- dentry->d_inode->i_mtime.tv_nsec = 0; -+ dentry->d_inode->i_atime.tv_nsec = -+ dentry->d_inode->i_ctime.tv_sec = -+ dentry->d_inode->i_ctime.tv_nsec = -+ dentry->d_inode->i_mtime.tv_sec = -+ dentry->d_inode->i_mtime.tv_nsec = 0; - dentry->d_inode->i_blocks = 0; - d_delete(dentry); /* Remove from cache */ - } -@@ -700,7 +630,6 @@ int verify_dentry(struct dentry *dentry, - return (retVal); - } - -- - static int novfs_d_add(struct dentry *Parent, struct dentry *d, struct inode *i, int a) - { - void *scope; -@@ -712,14 +641,13 @@ static int novfs_d_add(struct dentry *Pa - if (buf) { - path = novfs_dget_path(d, buf, PATH_LENGTH_BUFFER); - if (path) { -- DbgPrint("inode=0x%p ino=%d path %s", i, -- i->i_ino, path); -+ DbgPrint("inode=0x%p ino=%d path %s", i, i->i_ino, path); - } - kfree(buf); - } - - if (Parent && Parent->d_inode && Parent->d_inode->i_private) { -- id = (struct inode_data *) Parent->d_inode->i_private; -+ id = (struct inode_data *)Parent->d_inode->i_private; - } - - if (id && id->Scope) { -@@ -728,7 +656,7 @@ static int novfs_d_add(struct dentry *Pa - scope = novfs_get_scope(d); - } - -- ((struct inode_data *) i->i_private)->Scope = scope; -+ ((struct inode_data *)i->i_private)->Scope = scope; - - d->d_time = jiffies + (novfs_update_timeout * HZ); - if (a) { -@@ -750,16 +678,12 @@ int novfs_d_revalidate(struct dentry *de - __DbgPrint("%s: 0x%p %.*s\n" - " d_count: %d\n" - " d_inode: 0x%p\n", __func__, -- dentry, dentry->d_name.len, dentry->d_name.name, -- dentry->d_count, dentry->d_inode); -+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_count, dentry->d_inode); - - if (IS_ROOT(dentry)) { - retCode = 1; - } else { -- if (dentry->d_inode && -- dentry->d_parent && -- (dir = dentry->d_parent->d_inode) && -- (id = dir->i_private)) { -+ if (dentry->d_inode && dentry->d_parent && (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) { - /* - * Check timer to see if in valid time limit - */ -@@ -769,18 +693,13 @@ int novfs_d_revalidate(struct dentry *de - */ - name.len = dentry->d_name.len; - name.name = dentry->d_name.name; -- name.hash = -- novfs_internal_hash(&dentry->d_name); -+ name.hash = novfs_internal_hash(&dentry->d_name); - dentry->d_time = 0; - - if (0 == verify_dentry(dentry, 0)) { - if (novfs_lock_inode_cache(dir)) { -- if (novfs_lookup_inode_cache -- (dir, &name, 0)) { -- dentry->d_time = -- jiffies + -- (novfs_update_timeout -- * HZ); -+ if (novfs_lookup_inode_cache(dir, &name, 0)) { -+ dentry->d_time = jiffies + (novfs_update_timeout * HZ); - retCode = 1; - } - novfs_unlock_inode_cache(dir); -@@ -800,8 +719,7 @@ int novfs_d_revalidate(struct dentry *de - */ - } - -- DbgPrint("return 0x%x %.*s", retCode, -- dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("return 0x%x %.*s", retCode, dentry->d_name.len, dentry->d_name.name); - - return (retCode); - } -@@ -837,8 +755,7 @@ int novfs_d_strcmp(struct qstr *s1, stru - unsigned char *str1, *str2; - unsigned int len; - -- DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, -- s2->len, s2->name); -+ DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, s2->len, s2->name); - - if (s1->len && (s1->len == s2->len) && (s1->hash == s2->hash)) { - len = s1->len; -@@ -873,8 +790,7 @@ int novfs_d_delete(struct dentry *dentry - int retVal = 0; - - DbgPrint("0x%p %.*s; d_count: %d; d_inode: 0x%p", -- dentry, dentry->d_name.len, dentry->d_name.name, -- dentry->d_count, dentry->d_inode); -+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_count, dentry->d_inode); - - if (dentry->d_inode && (dentry->d_inode->i_flags & S_DEAD)) { - retVal = 1; -@@ -887,15 +803,13 @@ int novfs_d_delete(struct dentry *dentry - - void novfs_d_release(struct dentry *dentry) - { -- DbgPrint("0x%p %.*s", dentry, dentry->d_name.len, -- dentry->d_name.name); -+ DbgPrint("0x%p %.*s", dentry, dentry->d_name.len, dentry->d_name.name); - } - - void novfs_d_iput(struct dentry *dentry, struct inode *inode) - { - DbgPrint("Inode=0x%p Ino=%d Dentry=0x%p i_state=%d Name=%.*s", -- inode, inode->i_ino, dentry, inode->i_state, dentry->d_name.len, -- dentry->d_name.name); -+ inode, inode->i_ino, dentry, inode->i_state, dentry->d_name.len, dentry->d_name.name); - - iput(inode); - -@@ -906,8 +820,7 @@ int novfs_dir_open(struct inode *dir, st - char *path, *buf; - struct file_private *file_private = NULL; - -- DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, -- file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - - buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); - if (buf) { -@@ -933,8 +846,7 @@ int novfs_dir_release(struct inode *dir, - struct inode *inode = file->f_dentry->d_inode; - struct novfs_schandle sessionId; - -- DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, -- file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - - if (file_private) { - if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) { -@@ -956,8 +868,7 @@ loff_t novfs_dir_lseek(struct file * fil - { - struct file_private *file_private = NULL; - -- DbgPrint("offset %lld %d Name %.*s", offset, origin, -- file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ DbgPrint("offset %lld %d Name %.*s", offset, origin, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - //printk("<1> seekdir file = %.*s offset = %i\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name, (int)offset); - - if (0 != offset) { -@@ -966,7 +877,7 @@ loff_t novfs_dir_lseek(struct file * fil - - file->f_pos = 0; - -- file_private = (struct file_private *) file->private_data; -+ file_private = (struct file_private *)file->private_data; - file_private->listedall = 0; - if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) { - struct novfs_schandle sessionId; -@@ -999,8 +910,7 @@ ssize_t novfs_dir_read(struct file * fil - } - return(rlen); - */ -- DbgPrint("%lld %d Name %.*s", *off, len, -- file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ DbgPrint("%lld %d Name %.*s", *off, len, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - return (generic_read_dir(file, buf, len, off)); - } - -@@ -1039,8 +949,7 @@ static void novfs_Dump_Info(struct novfs - DbgPrint("name = %s", namebuf); - } - --void processList(struct file *file, void *dirent, filldir_t filldir, char *list, -- int type, struct novfs_schandle SessionId) -+void processList(struct file *file, void *dirent, filldir_t filldir, char *list, int type, struct novfs_schandle SessionId) - { - unsigned char *path, *buf = NULL, *cp; - struct qstr name; -@@ -1066,20 +975,16 @@ void processList(struct file *file, void - name.hash = novfs_internal_hash(&name); - cp += (name.len + 1); - -- pinfo = -- kmalloc(sizeof(struct novfs_entry_info) + -- PATH_LENGTH_BUFFER, GFP_KERNEL); -+ pinfo = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); - pinfo->mode = S_IFDIR | 0700; - pinfo->size = 0; -- pinfo->atime = pinfo->ctime = pinfo->mtime = -- CURRENT_TIME; -+ pinfo->atime = pinfo->ctime = pinfo->mtime = CURRENT_TIME; - strcpy(pinfo->name, name.name); - pinfo->namelength = name.len; - - novfs_Dump_Info(pinfo); - -- filldir(dirent, pinfo->name, pinfo->namelength, -- file->f_pos, file->f_pos, pinfo->mode >> 12); -+ filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, file->f_pos, pinfo->mode >> 12); - file->f_pos += 1; - - kfree(pinfo); -@@ -1091,8 +996,7 @@ void processList(struct file *file, void - } - } - --int processEntries(struct file *file, void *dirent, filldir_t filldir, -- void ** enumHandle, struct novfs_schandle sessionId) -+int processEntries(struct file *file, void *dirent, filldir_t filldir, void **enumHandle, struct novfs_schandle sessionId) - { - unsigned char *path = NULL, *buf = NULL; - int count = 0, status = 0; -@@ -1111,9 +1015,7 @@ int processEntries(struct file *file, vo - } - //NWSearchfiles - count = 0; -- status = -- novfs_get_dir_listex(path, enumHandle, &count, &pinfo, -- sessionId); -+ status = novfs_get_dir_listex(path, enumHandle, &count, &pinfo, sessionId); - pInfoMem = pinfo; - - if ((count == -1) || (count == 0) || (status != 0)) { -@@ -1123,11 +1025,10 @@ int processEntries(struct file *file, vo - } - // parse resultset - while (pinfo && count--) { -- filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, -- file->f_pos, pinfo->mode >> 12); -+ filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, file->f_pos, pinfo->mode >> 12); - file->f_pos += 1; - -- pinfo = (struct novfs_entry_info *) (pinfo->name + pinfo->namelength); -+ pinfo = (struct novfs_entry_info *)(pinfo->name + pinfo->namelength); - } - - kfree(pInfoMem); -@@ -1146,9 +1047,8 @@ int novfs_dir_readdir(struct file *file, - struct file_private *file_private = NULL; - int lComm; - -- file_private = (struct file_private *) file->private_data; -- DbgPrint("Name %.*s", file->f_dentry->d_name.len, -- file->f_dentry->d_name.name); -+ file_private = (struct file_private *)file->private_data; -+ DbgPrint("Name %.*s", file->f_dentry->d_name.len, file->f_dentry->d_name.name); - - //printk("<1> file = %.*s\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name); - -@@ -1168,8 +1068,7 @@ int novfs_dir_readdir(struct file *file, - file_private->listedall = 1; - } else { - if (inHAX) { -- if (get_nanosecond_time() - inHAXTime > -- 100 * 1000 * 1000) { -+ if (get_nanosecond_time() - inHAXTime > 100 * 1000 * 1000) { - //printk("<1> xoverhack: it was long, long, long ago...\n"); - inHAX = 0; - } else { -@@ -1187,17 +1086,14 @@ int novfs_dir_readdir(struct file *file, - #endif - - if (file->f_pos == 0) { -- if (filldir(dirent, ".", 1, file->f_pos, inode->i_ino, DT_DIR) < -- 0) -+ if (filldir(dirent, ".", 1, file->f_pos, inode->i_ino, DT_DIR) < 0) - return 1; - file->f_pos++; - return 1; - } - - if (file->f_pos == 1) { -- if (filldir -- (dirent, "..", 2, file->f_pos, -- file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) -+ if (filldir(dirent, "..", 2, file->f_pos, file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) - return 1; - file->f_pos++; - return 1; -@@ -1209,17 +1105,12 @@ int novfs_dir_readdir(struct file *file, - - inode = file->f_dentry->d_inode; - if (inode && inode->i_private) { -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -- Scope); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - if (0 == SC_PRESENT(sessionId)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(file->f_dentry); -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - } -- uid = novfs_scope_get_uid(((struct inode_data *) inode->i_private)->Scope); -+ uid = novfs_scope_get_uid(((struct inode_data *)inode->i_private)->Scope); - } else { - SC_INITIALIZE(sessionId); - uid = current_euid(); -@@ -1239,17 +1130,14 @@ int novfs_dir_readdir(struct file *file, - type = SERVER_LIST; - } else { - DbgPrint("Parent-Parent is Root directory"); -- novfs_get_vols(&file->f_dentry->d_name, -- &list, sessionId); -+ novfs_get_vols(&file->f_dentry->d_name, &list, sessionId); - type = VOLUME_LIST; - } - - processList(file, dirent, filldir, list, type, sessionId); - file_private->listedall = 1; - } else { -- status = -- processEntries(file, dirent, filldir, -- &file_private->enumHandle, sessionId); -+ status = processEntries(file, dirent, filldir, &file_private->enumHandle, sessionId); - - if (status != 0) { - file_private->listedall = 1; -@@ -1257,15 +1145,10 @@ int novfs_dir_readdir(struct file *file, - // Hack for crossover part 2 - begin - lComm = strlen(current->comm); - if ((lComm > 4) -- && (0 == -- strcmp(current->comm + lComm - 4, ".EXE"))) { -- if (filldir -- (dirent, " !xover", 7, file->f_pos, -- inode->i_ino, DT_DIR) < 0) -+ && (0 == strcmp(current->comm + lComm - 4, ".EXE"))) { -+ if (filldir(dirent, " !xover", 7, file->f_pos, inode->i_ino, DT_DIR) < 0) - return 1; -- if (filldir -- (dirent, "z!xover", 7, file->f_pos, -- inode->i_ino, DT_DIR) < 0) -+ if (filldir(dirent, "z!xover", 7, file->f_pos, inode->i_ino, DT_DIR) < 0) - return 1; - file->f_pos += 2; - } -@@ -1293,33 +1176,24 @@ ssize_t novfs_f_read(struct file * file, - struct novfs_schandle session; - struct inode_data *id; - -- if (file->f_dentry && -- (inode = file->f_dentry->d_inode) && -- (id = (struct inode_data *) inode->i_private)) { -+ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = (struct inode_data *)inode->i_private)) { - - DbgPrint("(0x%p 0x%p %d %lld %.*s)", -- file->private_data, -- buf, len, offset, -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name); -+ file->private_data, buf, len, offset, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - - if (novfs_page_cache && !(file->f_flags & O_DIRECT) && id->CacheFlag) { - totalread = do_sync_read(file, buf, len, off); - } else { - session = novfs_scope_get_sessionId(id->Scope); - if (0 == SC_PRESENT(session)) { -- id->Scope = -- novfs_get_scope(file->f_dentry); -+ id->Scope = novfs_get_scope(file->f_dentry); - session = novfs_scope_get_sessionId(id->Scope); - } - - while (len > 0 && (offset < i_size_read(inode))) { - int retval; - thisread = len; -- retval = -- novfs_read_file(file->private_data, buf, -- &thisread, &offset, -- session); -+ retval = novfs_read_file(file->private_data, buf, &thisread, &offset, session); - if (retval || !thisread) { - if (retval) { - totalread = retval; -@@ -1340,8 +1214,7 @@ ssize_t novfs_f_read(struct file * file, - return (totalread); - } - --ssize_t novfs_f_write(struct file * file, const char *buf, size_t len, -- loff_t * off) -+ssize_t novfs_f_write(struct file * file, const char *buf, size_t len, loff_t * off) - { - ssize_t thiswrite, totalwrite = 0; - loff_t offset = *off; -@@ -1350,30 +1223,23 @@ ssize_t novfs_f_write(struct file * file - int status; - struct inode_data *id; - -- if (file->f_dentry && -- (inode = file->f_dentry->d_inode) && -- (id = file->f_dentry->d_inode->i_private)) { -+ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = file->f_dentry->d_inode->i_private)) { - DbgPrint("(0x%p 0x%p 0x%p %d %lld %.*s)", - file->private_data, inode, id->FileHandle, len, offset, -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name); -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name); - -- if (novfs_page_cache && -- !(file->f_flags & O_DIRECT) && -- id->CacheFlag && !(file->f_flags & O_WRONLY)) { -+ if (novfs_page_cache && !(file->f_flags & O_DIRECT) && id->CacheFlag && !(file->f_flags & O_WRONLY)) { - totalwrite = do_sync_write(file, buf, len, off); - } else { - if (file->f_flags & O_APPEND) { - offset = i_size_read(inode); - DbgPrint("appending to end %lld %.*s", -- offset, file->f_dentry->d_name.len, -- file->f_dentry->d_name.name); -+ offset, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - } - - session = novfs_scope_get_sessionId(id->Scope); - if (0 == SC_PRESENT(session)) { -- id->Scope = -- novfs_get_scope(file->f_dentry); -+ id->Scope = novfs_get_scope(file->f_dentry); - session = novfs_scope_get_sessionId(id->Scope); - } - -@@ -1381,23 +1247,18 @@ ssize_t novfs_f_write(struct file * file - thiswrite = len; - if ((status = - novfs_write_file(file->private_data, -- (unsigned char *)buf, -- &thiswrite, &offset, -- session)) || !thiswrite) { -+ (unsigned char *)buf, &thiswrite, &offset, session)) || !thiswrite) { - totalwrite = status; - break; - } -- DbgPrint("thiswrite = 0x%x", -- thiswrite); -+ DbgPrint("thiswrite = 0x%x", thiswrite); - len -= thiswrite; - buf += thiswrite; - offset += thiswrite; - totalwrite += thiswrite; - if (offset > i_size_read(inode)) { - i_size_write(inode, offset); -- inode->i_blocks = -- (offset + inode->i_sb->s_blocksize - -- 1) >> inode->i_blkbits; -+ inode->i_blocks = (offset + inode->i_sb->s_blocksize - 1) >> inode->i_blkbits; - } - inode->i_mtime = inode->i_atime = CURRENT_TIME; - id->Flags |= UPDATE_INODE; -@@ -1416,8 +1277,7 @@ int novfs_f_readdir(struct file *file, v - return -EISDIR; - } - --int novfs_f_ioctl(struct inode *inode, struct file *file, unsigned int cmd, -- unsigned long arg) -+int novfs_f_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) - { - DbgPrint("file=0x%p cmd=0x%x arg=0x%p", file, cmd, arg); - -@@ -1428,8 +1288,7 @@ int novfs_f_mmap(struct file *file, stru - { - int retCode = -EINVAL; - -- DbgPrint("file=0x%p %.*s", file, file->f_dentry->d_name.len, -- file->f_dentry->d_name.name); -+ DbgPrint("file=0x%p %.*s", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - - retCode = generic_file_mmap(file, vma); - -@@ -1449,80 +1308,56 @@ int novfs_f_open(struct inode *inode, st - int errInfo; - - DbgPrint("inode=0x%p file=0x%p dentry=0x%p dentry->d_inode=0x%p %.*s", -- inode, file, file->f_dentry, file->f_dentry->d_inode, -- file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ inode, file, file->f_dentry, file->f_dentry->d_inode, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - if (file->f_dentry) { - DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name, -- file->f_flags, file->f_mode, inode->i_mode); -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode); - } - - if (inode && inode->i_private) { -- id = (struct inode_data *) file->f_dentry->d_inode->i_private; -+ id = (struct inode_data *)file->f_dentry->d_inode->i_private; - session = novfs_scope_get_sessionId(id->Scope); - if (0 == SC_PRESENT(session)) { - id->Scope = novfs_get_scope(file->f_dentry); - session = novfs_scope_get_sessionId(id->Scope); - } - -- info = kmalloc(sizeof(struct novfs_entry_info) + -- PATH_LENGTH_BUFFER, GFP_KERNEL); -+ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); - if (info) { -- path = -- novfs_dget_path(file->f_dentry, info->name, -- PATH_LENGTH_BUFFER); -+ path = novfs_dget_path(file->f_dentry, info->name, PATH_LENGTH_BUFFER); - if (path) { - if (file->f_flags & O_TRUNC) { -- errInfo = -- novfs_get_file_info(path, info, -- session); -+ errInfo = novfs_get_file_info(path, info, session); - - if (errInfo || info->size == 0) { - // clear O_TRUNC flag, bug #275366 -- file->f_flags = -- file->f_flags & (~O_TRUNC); -+ file->f_flags = file->f_flags & (~O_TRUNC); - } - } - - DbgPrint("%s", path); -- retCode = novfs_open_file(path, -- file-> -- f_flags & ~O_EXCL, -- info, -- &file->private_data, -- session); -+ retCode = novfs_open_file(path, file->f_flags & ~O_EXCL, info, &file->private_data, session); - -- DbgPrint("0x%x 0x%p", retCode, -- file->private_data); -+ DbgPrint("0x%x 0x%p", retCode, file->private_data); - if (!retCode) { - /* - *update_inode(inode, &info); - */ - //id->FileHandle = file->private_data; -- id->CacheFlag = -- novfs_get_file_cache_flag(path, -- session); -+ id->CacheFlag = novfs_get_file_cache_flag(path, session); - -- if (!novfs_get_file_info -- (path, info, session)) { -+ if (!novfs_get_file_info(path, info, session)) { - update_inode(inode, info); - } - - parent = dget_parent(file->f_dentry); - - if (parent && parent->d_inode) { -- struct inode *dir = -- parent->d_inode; -+ struct inode *dir = parent->d_inode; - novfs_lock_inode_cache(dir); - ino = 0; -- if (novfs_get_entry -- (dir, -- &file->f_dentry->d_name, -- &ino, info)) { -- ((struct inode_data *) inode-> -- i_private)->Flags |= -- UPDATE_INODE; -+ if (novfs_get_entry(dir, &file->f_dentry->d_name, &ino, info)) { -+ ((struct inode_data *)inode->i_private)->Flags |= UPDATE_INODE; - } - - novfs_unlock_inode_cache(dir); -@@ -1537,8 +1372,7 @@ int novfs_f_open(struct inode *inode, st - return (retCode); - } - --int novfs_flush_mapping(void *Handle, struct address_space *mapping, -- struct novfs_schandle Session) -+int novfs_flush_mapping(void *Handle, struct address_space *mapping, struct novfs_schandle Session) - { - struct pagevec pagevec; - unsigned nrpages; -@@ -1549,10 +1383,7 @@ int novfs_flush_mapping(void *Handle, st - - do { - done = 1; -- nrpages = pagevec_lookup_tag(&pagevec, -- mapping, -- &index, -- PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE); -+ nrpages = pagevec_lookup_tag(&pagevec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE); - - if (nrpages) { - struct page *page; -@@ -1570,16 +1401,11 @@ int novfs_flush_mapping(void *Handle, st - page_cache_get(page); - if (page->mapping == mapping) { - if (clear_page_dirty_for_io(page)) { -- rc = novfs_write_page(Handle, -- page, -- Session); -+ rc = novfs_write_page(Handle, page, Session); - if (!rc) { - //ClearPageDirty(page); - radix_tree_tag_clear -- (&mapping-> -- page_tree, -- page_index(page), -- PAGECACHE_TAG_DIRTY); -+ (&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); - } - } - } -@@ -1613,41 +1439,27 @@ int novfs_f_flush(struct file *file, fl_ - inode = file->f_dentry->d_inode; - DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", - file->f_dentry->d_name.len, -- file->f_dentry->d_name.name, file->f_flags, -- file->f_mode, inode->i_mode); -+ file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode); - - session = novfs_scope_get_sessionId(id->Scope); - if (0 == SC_PRESENT(session)) { -- id->Scope = -- novfs_get_scope(file->f_dentry); -+ id->Scope = novfs_get_scope(file->f_dentry); - session = novfs_scope_get_sessionId(id->Scope); - } - -- if (inode && -- inode->i_mapping && inode->i_mapping->nrpages) { -+ if (inode && inode->i_mapping && inode->i_mapping->nrpages) { - - DbgPrint("%.*s pages=%lu", -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name, -- inode->i_mapping->nrpages); -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, inode->i_mapping->nrpages); - - if (file->f_dentry && - file->f_dentry->d_inode && - file->f_dentry->d_inode->i_mapping && - file->f_dentry->d_inode->i_mapping->a_ops && -- file->f_dentry->d_inode->i_mapping->a_ops-> -- writepage) { -- rc = filemap_fdatawrite(file->f_dentry-> -- d_inode-> -- i_mapping); -+ file->f_dentry->d_inode->i_mapping->a_ops->writepage) { -+ rc = filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); - } else { -- rc = novfs_flush_mapping(file-> -- private_data, -- file-> -- f_dentry-> -- d_inode-> -- i_mapping, -- session); -+ rc = novfs_flush_mapping(file->private_data, file->f_dentry->d_inode->i_mapping, session); - } - } - } -@@ -1662,9 +1474,7 @@ int novfs_f_release(struct inode *inode, - struct novfs_schandle session; - struct inode_data *id; - -- DbgPrint("path=%.*s handle=%p", -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name, file->private_data); -+ DbgPrint("path=%.*s handle=%p", file->f_dentry->d_name.len, file->f_dentry->d_name.name, file->private_data); - - if (inode && (id = inode->i_private)) { - session = novfs_scope_get_sessionId(id->Scope); -@@ -1675,26 +1485,18 @@ int novfs_f_release(struct inode *inode, - - if ((file->f_flags & O_ACCMODE) != O_RDONLY) { - DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name, file->f_flags, -- file->f_mode, inode->i_mode); -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name, file->f_flags, file->f_mode, inode->i_mode); - - if (inode->i_mapping && inode->i_mapping->nrpages) { - - DbgPrint("%.*s pages=%lu", -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name, -- inode->i_mapping->nrpages); -- -- if (inode->i_mapping->a_ops && -- inode->i_mapping->a_ops->writepage) { -- filemap_fdatawrite(file->f_dentry-> -- d_inode->i_mapping); -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, inode->i_mapping->nrpages); -+ -+ if (inode->i_mapping->a_ops && inode->i_mapping->a_ops->writepage) { -+ filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); - } else { -- novfs_flush_mapping(file->private_data, -- file->f_dentry-> -- d_inode->i_mapping, -- session); -+ novfs_flush_mapping(file->private_data, file->f_dentry->d_inode->i_mapping, session); - } - } - } -@@ -1717,8 +1519,7 @@ int novfs_f_fsync(struct file *file, int - int novfs_f_llseek(struct file *file, loff_t offset, int origin) - { - DbgPrint("File=0x%p Name=%.*s offset=%lld origin=%d", -- file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, -- offset, origin); -+ file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, offset, origin); - return (generic_file_llseek(file, offset, origin)); - } - -@@ -1756,12 +1557,9 @@ int novfs_f_lock(struct file *file, int - struct inode_data *id; - loff_t len; - -- DbgPrint("(0x%p): begin in novfs_f_lock 0x%p", -- __builtin_return_address(0), file->private_data); -- DbgPrint("cmd = %d, F_GETLK = %d, F_SETLK = %d, F_SETLKW = %d", -- cmd, F_GETLK, F_SETLK, F_SETLKW); -- DbgPrint("lock->fl_start = 0x%llX, lock->fl_end = 0x%llX", -- lock->fl_start, lock->fl_end); -+ DbgPrint("(0x%p): begin in novfs_f_lock 0x%p", __builtin_return_address(0), file->private_data); -+ DbgPrint("cmd = %d, F_GETLK = %d, F_SETLK = %d, F_SETLKW = %d", cmd, F_GETLK, F_SETLK, F_SETLKW); -+ DbgPrint("lock->fl_start = 0x%llX, lock->fl_end = 0x%llX", lock->fl_start, lock->fl_end); - - err_code = -1; - if (lock->fl_start <= lock->fl_end) { -@@ -1771,18 +1569,13 @@ int novfs_f_lock(struct file *file, int - len = 0; - } - -- if (file->f_dentry && -- (inode = file->f_dentry->d_inode) && -- (id = (struct inode_data *) inode->i_private)) { -+ if (file->f_dentry && (inode = file->f_dentry->d_inode) && (id = (struct inode_data *)inode->i_private)) { - DbgPrint("(0x%p 0x%p %.*s)", -- file->private_data, inode, -- file->f_dentry->d_name.len, -- file->f_dentry->d_name.name); -+ file->private_data, inode, file->f_dentry->d_name.len, file->f_dentry->d_name.name); - - session = novfs_scope_get_sessionId(id->Scope); - if (0 == SC_PRESENT(session)) { -- id->Scope = -- novfs_get_scope(file->f_dentry); -+ id->Scope = novfs_get_scope(file->f_dentry); - session = novfs_scope_get_sessionId(id->Scope); - } - -@@ -1793,22 +1586,14 @@ int novfs_f_lock(struct file *file, int - case F_SETLK64: - #endif - -- err_code = -- novfs_set_file_lock(session, -- file->private_data, -- lock->fl_type, -- lock->fl_start, len); -+ err_code = novfs_set_file_lock(session, file->private_data, lock->fl_type, lock->fl_start, len); - break; - - case F_SETLKW: - #ifdef F_GETLK64 - case F_SETLKW64: - #endif -- err_code = -- novfs_set_file_lock(session, -- file->private_data, -- lock->fl_type, -- lock->fl_start, len); -+ err_code = novfs_set_file_lock(session, file->private_data, lock->fl_type, lock->fl_start, len); - break; - - case F_GETLK: -@@ -1822,17 +1607,13 @@ int novfs_f_lock(struct file *file, int - break; - - default: -- printk -- ("<1> novfs in novfs_f_lock, not implemented cmd = %d\n", -- cmd); -- DbgPrint("novfs in novfs_f_lock, not implemented cmd = %d", -- cmd); -+ printk("<1> novfs in novfs_f_lock, not implemented cmd = %d\n", cmd); -+ DbgPrint("novfs in novfs_f_lock, not implemented cmd = %d", cmd); - break; - } - } - -- DbgPrint("lock->fl_type = %u, err_code 0x%X", -- lock->fl_type, err_code); -+ DbgPrint("lock->fl_type = %u, err_code 0x%X", lock->fl_type, err_code); - - if ((err_code != 0) && (err_code != -1) - && (err_code != -ENOSYS)) { -@@ -1847,8 +1628,7 @@ int novfs_f_lock(struct file *file, int - - /*++======================================================================*/ - static void novfs_copy_cache_pages(struct address_space *mapping, -- struct list_head *pages, int bytes_read, -- char *data, struct pagevec *plru_pvec) -+ struct list_head *pages, int bytes_read, char *data, struct pagevec *plru_pvec) - { - struct page *page; - char *target; -@@ -1872,8 +1652,7 @@ static void novfs_copy_cache_pages(struc - if (PAGE_CACHE_SIZE > bytes_read) { - memcpy(target, data, bytes_read); - /* zero the tail end of this partial page */ -- memset(target + bytes_read, 0, -- PAGE_CACHE_SIZE - bytes_read); -+ memset(target + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); - bytes_read = 0; - } else { - memcpy(target, data, PAGE_CACHE_SIZE); -@@ -1901,7 +1680,7 @@ int novfs_a_writepage(struct page *page, - struct novfs_data_list dlst[2]; - size_t len = PAGE_CACHE_SIZE; - -- session = novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)->Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - - page_cache_get(page); - -@@ -1933,8 +1712,7 @@ int novfs_a_writepage(struct page *page, - return (retCode); - } - --int novfs_a_writepages(struct address_space *mapping, -- struct writeback_control *wbc) -+int novfs_a_writepages(struct address_space *mapping, struct writeback_control *wbc) - { - int retCode = 0; - struct inode *inode = mapping->host; -@@ -1953,31 +1731,24 @@ int novfs_a_writepages(struct address_sp - size_t tsize; - - SC_INITIALIZE(session); -- DbgPrint("inode=0x%p mapping=0x%p wbc=0x%p nr_to_write=%d", -- inode, mapping, wbc, wbc->nr_to_write); -+ DbgPrint("inode=0x%p mapping=0x%p wbc=0x%p nr_to_write=%d", inode, mapping, wbc, wbc->nr_to_write); - - if (inode) { -- DbgPrint("Inode=0x%p Ino=%d Id=0x%p", inode, inode->i_ino, -- inode->i_private); -+ DbgPrint("Inode=0x%p Ino=%d Id=0x%p", inode, inode->i_ino, inode->i_private); - - if (NULL != (id = inode->i_private)) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -- fh = ((struct inode_data *) inode->i_private)->FileHandle; -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); -+ fh = ((struct inode_data *)inode->i_private)->FileHandle; - } - } - - dlist = kmalloc(sizeof(struct novfs_data_list) * max_page_lookup, GFP_KERNEL); -- pages = -- kmalloc(sizeof(struct page *) * max_page_lookup, GFP_KERNEL); -+ pages = kmalloc(sizeof(struct page *) * max_page_lookup, GFP_KERNEL); - - if (id) -- DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p %s", -- inode, fh, dlist, pages, id->Name); -+ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p %s", inode, fh, dlist, pages, id->Name); - else -- DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p", -- inode, fh, dlist, pages); -+ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p", inode, fh, dlist, pages); - - if (dlist && pages) { - struct backing_dev_info *bdi = mapping->backing_dev_info; -@@ -2005,8 +1776,7 @@ int novfs_a_writepages(struct address_sp - - DbgPrint("nr_pages=%d", nr_pages); - if (!nr_pages) { -- memset(pages, 0, -- sizeof(struct page *) * max_page_lookup); -+ memset(pages, 0, sizeof(struct page *) * max_page_lookup); - - spin_lock_irq(&mapping->tree_lock); - -@@ -2016,13 +1786,8 @@ int novfs_a_writepages(struct address_sp - * the first entry for the reply buffer. - */ - nr_pages = -- radix_tree_gang_lookup_tag(&mapping-> -- page_tree, -- (void **)pages, -- index, -- max_page_lookup - -- 1, -- PAGECACHE_TAG_DIRTY); -+ radix_tree_gang_lookup_tag(&mapping->page_tree, -+ (void **)pages, index, max_page_lookup - 1, PAGECACHE_TAG_DIRTY); - - DbgPrint("2; nr_pages=%d\n", nr_pages); - /* -@@ -2044,9 +1809,7 @@ int novfs_a_writepages(struct address_sp - - if (nr_pages) { - index = pages[nr_pages - 1]->index + 1; -- pos = -- (loff_t) pages[0]-> -- index << PAGE_CACHE_SHIFT; -+ pos = (loff_t) pages[0]->index << PAGE_CACHE_SHIFT; - } - - if (!nr_pages) { -@@ -2062,9 +1825,7 @@ int novfs_a_writepages(struct address_sp - } - } else { - if (pages[i]) { -- pos = -- (loff_t) pages[i]-> -- index << PAGE_CACHE_SHIFT; -+ pos = (loff_t) pages[i]->index << PAGE_CACHE_SHIFT; - } - } - -@@ -2094,8 +1855,7 @@ int novfs_a_writepages(struct address_sp - if (wbc->sync_mode != WB_SYNC_NONE) - wait_on_page_writeback(page); - -- if (page->mapping != mapping -- || PageWriteback(page) -+ if (page->mapping != mapping || PageWriteback(page) - || !clear_page_dirty_for_io(page)) { - unlock_page(page); - continue; -@@ -2106,8 +1866,7 @@ int novfs_a_writepages(struct address_sp - dlptr[dlist_idx].len = PAGE_CACHE_SIZE; - dlptr[dlist_idx].rwflag = DLREAD; - dlist_idx++; -- DbgPrint("Add page=0x%p index=0x%lx", -- page, page->index); -+ DbgPrint("Add page=0x%p index=0x%lx", page, page->index); - } - - DbgPrint("dlist_idx=%d", dlist_idx); -@@ -2117,13 +1876,10 @@ int novfs_a_writepages(struct address_sp - * Check size so we don't write pass end of file. - */ - if ((pos + tsize) > i_size_read(inode)) { -- tsize = -- (size_t) (i_size_read(inode) - pos); -+ tsize = (size_t) (i_size_read(inode) - pos); - } - -- retCode = -- novfs_write_pages(fh, dlist, dlist_idx + 1, -- tsize, pos, session); -+ retCode = novfs_write_pages(fh, dlist, dlist_idx + 1, tsize, pos, session); - switch (retCode) { - case 0: - wbc->nr_to_write -= dlist_idx; -@@ -2144,13 +1900,11 @@ int novfs_a_writepages(struct address_sp - unlock_page((struct page *) - dlptr[dlist_idx - 1].page); - page_cache_release((struct page *) -- dlptr[dlist_idx - -- 1].page); -- DbgPrint("release page=0x%p index=0x%lx", -- dlptr[dlist_idx - 1].page, -- ((struct page *) -- dlptr[dlist_idx - -- 1].page)->index); -+ dlptr[dlist_idx - 1].page); -+ DbgPrint("release page=0x%p index=0x%lx", dlptr[dlist_idx - 1].page, ((struct page *) -+ dlptr[dlist_idx - -+ 1].page)-> -+ index); - if (!retCode) { - wbc->nr_to_write--; - } -@@ -2189,14 +1943,12 @@ int novfs_a_readpage(struct file *file, - struct novfs_schandle session; - - SC_INITIALIZE(session); -- DbgPrint("File=0x%p Name=%.*s Page=0x%p", file, -- file->f_dentry->d_name.len, file->f_dentry->d_name.name, page); -+ DbgPrint("File=0x%p Name=%.*s Page=0x%p", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, page); - - dentry = file->f_dentry; - - if (dentry) { -- DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, -- dentry->d_name.name); -+ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, dentry->d_name.name); - if (dentry->d_inode) { - inode = dentry->d_inode; - } -@@ -2206,15 +1958,10 @@ int novfs_a_readpage(struct file *file, - DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino); - - if (inode->i_private) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - if (0 == SC_PRESENT(session)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(file->f_dentry); -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - } - } - } -@@ -2233,11 +1980,8 @@ int novfs_a_readpage(struct file *file, - dlst[1].len = PAGE_CACHE_SIZE; - dlst[1].rwflag = DLWRITE; - -- DbgPrint("calling= novfs_Read_Pages %lld", -- offset); -- retCode = -- novfs_read_pages(file->private_data, dlst, 2, &len, &offset, -- session); -+ DbgPrint("calling= novfs_Read_Pages %lld", offset); -+ retCode = novfs_read_pages(file->private_data, dlst, 2, &len, &offset, session); - if (len && (len < PAGE_CACHE_SIZE)) { - pbuf = kmap_atomic(page, KM_USER0); - memset(&((char *)pbuf)[len], 0, PAGE_CACHE_SIZE - len); -@@ -2254,8 +1998,7 @@ int novfs_a_readpage(struct file *file, - - } - --int novfs_a_readpages(struct file *file, struct address_space *mapping, -- struct list_head *page_lst, unsigned nr_pages) -+int novfs_a_readpages(struct file *file, struct address_space *mapping, struct list_head *page_lst, unsigned nr_pages) - { - int retCode = 0; - struct inode *inode = NULL; -@@ -2271,15 +2014,12 @@ int novfs_a_readpages(struct file *file, - char *rbuf, done = 0; - SC_INITIALIZE(session); - -- DbgPrint("File=0x%p Name=%.*s Pages=%d", file, -- file->f_dentry->d_name.len, file->f_dentry->d_name.name, -- nr_pages); -+ DbgPrint("File=0x%p Name=%.*s Pages=%d", file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, nr_pages); - - dentry = file->f_dentry; - - if (dentry) { -- DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, -- dentry->d_name.name); -+ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, dentry->d_name.name); - if (dentry->d_inode) { - inode = dentry->d_inode; - } -@@ -2289,15 +2029,10 @@ int novfs_a_readpages(struct file *file, - DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino); - - if (inode->i_private) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - if (0 == SC_PRESENT(session)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(file->f_dentry); -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - } - } - } -@@ -2321,8 +2056,7 @@ int novfs_a_readpages(struct file *file, - * Count number of contiguous pages. - */ - list_for_each_entry_reverse(tpage, page_lst, lru) { -- if ((next_index != tpage->index) || -- (len >= novfs_max_iosize - PAGE_SIZE)) { -+ if ((next_index != tpage->index) || (len >= novfs_max_iosize - PAGE_SIZE)) { - break; - } - len += PAGE_SIZE; -@@ -2337,14 +2071,9 @@ int novfs_a_readpages(struct file *file, - dllst[1].len = len; - dllst[1].rwflag = DLWRITE; - -- DbgPrint("calling novfs_Read_Pages %lld", -- offset); -- if (!novfs_read_pages -- (file->private_data, dllst, 2, &len, -- &offset, session)) { -- novfs_copy_cache_pages(mapping, -- page_lst, len, -- rbuf, &lru_pvec); -+ DbgPrint("calling novfs_Read_Pages %lld", offset); -+ if (!novfs_read_pages(file->private_data, dllst, 2, &len, &offset, session)) { -+ novfs_copy_cache_pages(mapping, page_lst, len, rbuf, &lru_pvec); - page_idx += len >> PAGE_CACHE_SHIFT; - if ((int)(len & PAGE_CACHE_MASK) != len) { - page_idx++; -@@ -2362,8 +2091,7 @@ int novfs_a_readpages(struct file *file, - * Free any remaining pages. - */ - while (!list_empty(page_lst)) { -- struct page *page = -- list_entry(page_lst->prev, struct page, lru); -+ struct page *page = list_entry(page_lst->prev, struct page, lru); - - list_del(&page->lru); - page_cache_release(page); -@@ -2381,8 +2109,7 @@ int novfs_a_readpages(struct file *file, - } - - int novfs_a_write_begin(struct file *file, struct address_space *mapping, -- loff_t pos, unsigned len, unsigned flags, -- struct page **pagep, void **fsdata) -+ loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) - { - int retVal = 0; - loff_t offset = pos; -@@ -2405,8 +2132,7 @@ int novfs_a_write_begin(struct file *fil - *pagep = page; - - DbgPrint("File=0x%p Page=0x%p offset=0x%llx From=%u To=%u " -- "filesize=%lld\n", file, page, offset, from, to, -- i_size_read(file->f_dentry->d_inode)); -+ "filesize=%lld\n", file, page, offset, from, to, i_size_read(file->f_dentry->d_inode)); - if (!PageUptodate(page)) { - /* - * Check to see if whole page -@@ -2424,17 +2150,11 @@ int novfs_a_write_begin(struct file *fil - */ - if (file->f_dentry && file->f_dentry->d_inode) { - if (file->f_dentry->d_inode->i_private) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) -- inode-> -- i_private)-> -- Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *) -+ inode->i_private)->Scope); - if (0 == SC_PRESENT(session)) { -- ((struct inode_data *) inode-> -- i_private)->Scope = -- novfs_get_scope(file->f_dentry); -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - } - } - } -@@ -2456,10 +2176,8 @@ int novfs_a_write_begin(struct file *fil - dllst[1].len = len; - dllst[1].rwflag = DLWRITE; - -- DbgPrint("calling novfs_Read_Pages %lld", -- offset); -- novfs_read_pages(file->private_data, dllst, 2, -- &len, &offset, session); -+ DbgPrint("calling novfs_Read_Pages %lld", offset); -+ novfs_read_pages(file->private_data, dllst, 2, &len, &offset, session); - - /* - * Zero unnsed page. -@@ -2490,8 +2208,7 @@ int novfs_a_write_begin(struct file *fil - } - - int novfs_a_write_end(struct file *file, struct address_space *mapping, -- loff_t pos, unsigned len, unsigned copied, -- struct page *page, void *fsdata) -+ loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) - { - int retCode = 0; - struct inode *inode = page->mapping->host; -@@ -2507,11 +2224,9 @@ int novfs_a_write_end(struct file *file, - from = pos & (PAGE_CACHE_SIZE - 1); - to = from + len; - -- - DbgPrint("File=0x%p Page=0x%p offset=0x%x To=%u filesize=%lld", - file, page, offset, to, i_size_read(file->f_dentry->d_inode)); -- if (file->f_dentry->d_inode -- && (id = file->f_dentry->d_inode->i_private)) { -+ if (file->f_dentry->d_inode && (id = file->f_dentry->d_inode->i_private)) { - session = novfs_scope_get_sessionId(id->Scope); - if (0 == SC_PRESENT(session)) { - id->Scope = novfs_get_scope(file->f_dentry); -@@ -2528,20 +2243,17 @@ int novfs_a_write_end(struct file *file, - } - - if (!PageUptodate(page)) { -- pos = -- ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; -+ pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; - - if (to < offset) { - return (retCode); - } - dlst[0].page = page; -- dlst[0].offset = (void *)(unsigned long) offset; -+ dlst[0].offset = (void *)(unsigned long)offset; - dlst[0].len = len; - dlst[0].rwflag = DLREAD; - -- retCode = -- novfs_write_pages(id->FileHandle, dlst, 1, len, pos, -- session); -+ retCode = novfs_write_pages(id->FileHandle, dlst, 1, len, pos, session); - - } else { - set_page_dirty(page); -@@ -2552,9 +2264,7 @@ int novfs_a_write_end(struct file *file, - } - - /*++======================================================================*/ --ssize_t novfs_a_direct_IO(int rw, struct kiocb * kiocb, -- const struct iovec * iov, -- loff_t offset, unsigned long nr_segs) -+ssize_t novfs_a_direct_IO(int rw, struct kiocb * kiocb, const struct iovec * iov, loff_t offset, unsigned long nr_segs) - /* - * - * Notes: This is a dummy function so that we can allow a file -@@ -2568,8 +2278,7 @@ ssize_t novfs_a_direct_IO(int rw, struct - } - - /*++======================================================================*/ --int novfs_i_create(struct inode *dir, struct dentry *dentry, int mode, -- struct nameidata *nd) -+int novfs_i_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) - { - char *path, *buf; - struct novfs_entry_info info; -@@ -2577,8 +2286,7 @@ int novfs_i_create(struct inode *dir, st - struct novfs_schandle session; - int retCode = -EACCES; - -- DbgPrint("mode=0%o flags=0%o %.*s", mode, -- nd->NDOPENFLAGS, dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("mode=0%o flags=0%o %.*s", mode, nd->NDOPENFLAGS, dentry->d_name.len, dentry->d_name.name); - - if (IS_ROOT(dentry) || /* Root */ - IS_ROOT(dentry->d_parent) || /* User */ -@@ -2589,41 +2297,23 @@ int novfs_i_create(struct inode *dir, st - - if (mode | S_IFREG) { - if (dir->i_private) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)-> -- Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); - if (0 == SC_PRESENT(session)) { -- ((struct inode_data *) dir->i_private)->Scope = -- novfs_get_scope(dentry); -- session = -- novfs_scope_get_sessionId(((struct inode_data *) dir-> -- i_private)->Scope); -+ ((struct inode_data *)dir->i_private)->Scope = novfs_get_scope(dentry); -+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); - } - - buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); - if (buf) { -- path = -- novfs_dget_path(dentry, buf, -- PATH_LENGTH_BUFFER); -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); - if (path) { -- retCode = -- novfs_open_file(path, -- nd-> -- NDOPENFLAGS | -- O_RDWR, &info, -- &handle, session); -+ retCode = novfs_open_file(path, nd->NDOPENFLAGS | O_RDWR, &info, &handle, session); - if (!retCode && handle) { -- novfs_close_file(handle, -- session); -- if (!novfs_i_mknod -- (dir, dentry, -- mode | S_IFREG, 0)) { -+ novfs_close_file(handle, session); -+ if (!novfs_i_mknod(dir, dentry, mode | S_IFREG, 0)) { - if (dentry->d_inode) { - ((struct inode_data *) -- dentry-> -- d_inode-> -- i_private)-> -- Flags |= UPDATE_INODE; -+ dentry->d_inode->i_private)->Flags |= UPDATE_INODE; - } - } - } -@@ -2643,21 +2333,16 @@ void update_inode(struct inode *Inode, s - - DbgPrint("atime=%s", ctime_r(&Info->atime.tv_sec, dbuf)); - DbgPrint("ctime=%s", ctime_r(&Info->ctime.tv_sec, dbuf)); -- DbgPrint("mtime=%s %d", ctime_r(&Info->mtime.tv_sec, dbuf), -- Info->mtime.tv_nsec); -+ DbgPrint("mtime=%s %d", ctime_r(&Info->mtime.tv_sec, dbuf), Info->mtime.tv_nsec); - DbgPrint("size=%lld", Info->size); - DbgPrint("mode=0%o", Info->mode); - - if (Inode && - ((Inode->i_size != Info->size) || -- (Inode->i_mtime.tv_sec != Info->mtime.tv_sec) || -- (Inode->i_mtime.tv_nsec != Info->mtime.tv_nsec))) { -- DbgPrint ("calling invalidate_remote_inode sz %d %d", -- Inode->i_size, Info->size); -- DbgPrint ("calling invalidate_remote_inode sec %d %d", -- Inode->i_mtime.tv_sec, Info->mtime.tv_sec); -- DbgPrint ("calling invalidate_remote_inode ns %d %d", -- Inode->i_mtime.tv_nsec, Info->mtime.tv_nsec); -+ (Inode->i_mtime.tv_sec != Info->mtime.tv_sec) || (Inode->i_mtime.tv_nsec != Info->mtime.tv_nsec))) { -+ DbgPrint("calling invalidate_remote_inode sz %d %d", Inode->i_size, Info->size); -+ DbgPrint("calling invalidate_remote_inode sec %d %d", Inode->i_mtime.tv_sec, Info->mtime.tv_sec); -+ DbgPrint("calling invalidate_remote_inode ns %d %d", Inode->i_mtime.tv_nsec, Info->mtime.tv_nsec); - - if (Inode && Inode->i_mapping) { - invalidate_remote_inode(Inode); -@@ -2679,8 +2364,8 @@ void update_inode(struct inode *Inode, s - * - * Update i_blocks to have the number of 512 blocks - */ -- Inode->i_blocks = (((loff_t)Info->size) + Inode->i_sb->s_blocksize - 1) -- >> (loff_t)Inode->i_blkbits; -+ Inode->i_blocks = (((loff_t) Info->size) + Inode->i_sb->s_blocksize - 1) -+ >> (loff_t) Inode->i_blkbits; - Inode->i_blocks = Inode->i_blocks << (PAGE_CACHE_SHIFT - 9); - Inode->i_bytes = Info->size & (Inode->i_sb->s_blocksize - 1); - -@@ -2691,8 +2376,7 @@ void update_inode(struct inode *Inode, s - } - } - --struct dentry *novfs_i_lookup(struct inode *dir, struct dentry *dentry, -- struct nameidata *nd) -+struct dentry *novfs_i_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) - { - struct dentry *retVal = ERR_PTR(-ENOENT); - struct dentry *parent; -@@ -2709,15 +2393,12 @@ struct dentry *novfs_i_lookup(struct ino - char *path; - path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); - if (path) { -- DbgPrint("dir 0x%p %d hash %d inode 0x%0p %s", -- dir, dir->i_ino, dentry->d_name.hash, -- dentry->d_inode, path); -+ DbgPrint("dir 0x%p %d hash %d inode 0x%0p %s", dir, dir->i_ino, dentry->d_name.hash, dentry->d_inode, path); - } - kfree(buf); - } else { - DbgPrint("dir 0x%p %d name %.*s hash %d inode 0x%0p", -- dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name, -- dentry->d_name.hash, dentry->d_inode); -+ dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name, dentry->d_name.hash, dentry->d_inode); - } - - if ((dentry->d_name.len == 7) -@@ -2742,27 +2423,18 @@ struct dentry *novfs_i_lookup(struct ino - inode = novfs_root->d_inode; - return (0); - } else { -- info = -- kmalloc(sizeof(struct novfs_entry_info) + -- PATH_LENGTH_BUFFER, GFP_KERNEL); -+ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); - if (info) { -- if (NULL == -- (retVal = -- ERR_PTR(verify_dentry(dentry, 1)))) { -+ if (NULL == (retVal = ERR_PTR(verify_dentry(dentry, 1)))) { - name.name = dentry->d_name.name; - name.len = dentry->d_name.len; - name.hash = novfs_internal_hash(&name); - - if (novfs_lock_inode_cache(dir)) { -- if (!novfs_get_entry -- (dir, &name, &ino, info)) { -- inode = -- ilookup(dentry-> -- d_sb, ino); -+ if (!novfs_get_entry(dir, &name, &ino, info)) { -+ inode = ilookup(dentry->d_sb, ino); - if (inode) { -- update_inode -- (inode, -- info); -+ update_inode(inode, info); - } - } - novfs_unlock_inode_cache(dir); -@@ -2775,16 +2447,13 @@ struct dentry *novfs_i_lookup(struct ino - uid = novfs_scope_get_uid(novfs_get_scope(dentry)); - } - if (novfs_lock_inode_cache(dir)) { -- inode = novfs_get_inode (dentry->d_sb, info->mode, 0, uid, ino, &name); -+ inode = novfs_get_inode(dentry->d_sb, info->mode, 0, uid, ino, &name); - if (inode) { - if (!novfs_get_entry(dir, &dentry->d_name, &ino, info)) { -- update_inode -- (inode, -- info); -+ update_inode(inode, info); - } - } -- novfs_unlock_inode_cache -- (dir); -+ novfs_unlock_inode_cache(dir); - } - } - } -@@ -2806,8 +2475,7 @@ struct dentry *novfs_i_lookup(struct ino - if (info) - kfree(info); - -- DbgPrint("inode=0x%p dentry->d_inode=0x%p return=0x%p", -- dir, dentry->d_inode, retVal); -+ DbgPrint("inode=0x%p dentry->d_inode=0x%p return=0x%p", dir, dentry->d_inode, retVal); - - return (retVal); - } -@@ -2820,15 +2488,11 @@ int novfs_i_unlink(struct inode *dir, st - char *path, *buf; - uint64_t t64; - -- DbgPrint("dir=0x%p dir->i_ino=%d %.*s", dir, -- dir->i_ino, dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("dir=0x%p dir->i_ino=%d %.*s", dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name); - DbgPrint("IS_ROOT(dentry)=%d", IS_ROOT(dentry)); -- DbgPrint("IS_ROOT(dentry->d_parent)=%d", -- IS_ROOT(dentry->d_parent)); -- DbgPrint("IS_ROOT(dentry->d_parent->d_parent)=%d", -- IS_ROOT(dentry->d_parent->d_parent)); -- DbgPrint("IS_ROOT(dentry->d_parent->d_parent->d_parent)=%d", -- IS_ROOT(dentry->d_parent->d_parent->d_parent)); -+ DbgPrint("IS_ROOT(dentry->d_parent)=%d", IS_ROOT(dentry->d_parent)); -+ DbgPrint("IS_ROOT(dentry->d_parent->d_parent)=%d", IS_ROOT(dentry->d_parent->d_parent)); -+ DbgPrint("IS_ROOT(dentry->d_parent->d_parent->d_parent)=%d", IS_ROOT(dentry->d_parent->d_parent->d_parent)); - - if (IS_ROOT(dentry) || /* Root */ - IS_ROOT(dentry->d_parent) || /* User */ -@@ -2839,34 +2503,23 @@ int novfs_i_unlink(struct inode *dir, st - - inode = dentry->d_inode; - if (inode) { -- DbgPrint("dir=0x%p dir->i_ino=%d inode=0x%p ino=%d", -- dir, dir->i_ino, inode, inode->i_ino); -+ DbgPrint("dir=0x%p dir->i_ino=%d inode=0x%p ino=%d", dir, dir->i_ino, inode, inode->i_ino); - if (inode->i_private) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - if (0 == SC_PRESENT(session)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(dentry); -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - } - - buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); - if (buf) { -- path = -- novfs_dget_path(dentry, buf, -- PATH_LENGTH_BUFFER); -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); - if (path) { -- DbgPrint("path %s mode 0%o", -- path, inode->i_mode); -+ DbgPrint("path %s mode 0%o", path, inode->i_mode); - if (IS_ROOT(dentry->d_parent->d_parent)) { - retCode = novfs_daemon_logout(&dentry->d_name, &session); - } else { -- retCode = -- novfs_delete(path, -- S_ISDIR(inode->i_mode), session); -+ retCode = novfs_delete(path, S_ISDIR(inode->i_mode), session); - if (retCode) { - struct iattr ia; - memset(&ia, 0, sizeof(ia)); -@@ -2877,14 +2530,10 @@ int novfs_i_unlink(struct inode *dir, st - } - } - if (!retCode || IS_DEADDIR(inode)) { -- novfs_remove_inode_entry(dir, -- &dentry-> -- d_name, -- 0); -+ novfs_remove_inode_entry(dir, &dentry->d_name, 0); - dentry->d_time = 0; - t64 = 0; -- novfs_scope_set_userspace(&t64, &t64, -- &t64, &t64); -+ novfs_scope_set_userspace(&t64, &t64, &t64, &t64); - retCode = 0; - } - } -@@ -2907,8 +2556,7 @@ int novfs_i_mkdir(struct inode *dir, str - uid_t uid; - - DbgPrint("dir=0x%p ino=%d dentry=0x%p %.*s mode=0%lo", -- dir, dir->i_ino, dentry, dentry->d_name.len, -- dentry->d_name.name, mode); -+ dir, dir->i_ino, dentry, dentry->d_name.len, dentry->d_name.name, mode); - - if (IS_ROOT(dentry) || /* Root */ - IS_ROOT(dentry->d_parent) || /* User */ -@@ -2920,61 +2568,35 @@ int novfs_i_mkdir(struct inode *dir, str - mode |= S_IFDIR; - mode &= (S_IFMT | S_IRWXU); - if (dir->i_private) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)->Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); - if (0 == SC_PRESENT(session)) { -- ((struct inode_data *) dir->i_private)->Scope = -- novfs_get_scope(dentry); -- session = -- novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)-> -- Scope); -+ ((struct inode_data *)dir->i_private)->Scope = novfs_get_scope(dentry); -+ session = novfs_scope_get_sessionId(((struct inode_data *)dir->i_private)->Scope); - } - -- uid = novfs_scope_get_uid(((struct inode_data *) dir->i_private)->Scope); -+ uid = novfs_scope_get_uid(((struct inode_data *)dir->i_private)->Scope); - buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); - if (buf) { - path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); - if (path) { - DbgPrint("path %s", path); -- retCode = -- novfs_create(path, S_ISDIR(mode), session); -+ retCode = novfs_create(path, S_ISDIR(mode), session); - if (!retCode) { -- retCode = -- novfs_get_file_info(path, &info, -- session); -+ retCode = novfs_get_file_info(path, &info, session); - if (!retCode) { -- retCode = -- novfs_i_mknod(dir, dentry, -- mode, 0); -+ retCode = novfs_i_mknod(dir, dentry, mode, 0); - inode = dentry->d_inode; - if (inode) { -- update_inode(inode, -- &info); -- ((struct inode_data *) inode-> -- i_private)->Flags &= -- ~UPDATE_INODE; -- -- dentry->d_time = -- jiffies + -- (novfs_update_timeout -- * HZ); -- -- novfs_lock_inode_cache -- (dir); -- if (novfs_update_entry -- (dir, -- &dentry->d_name, 0, -- &info)) { -- novfs_add_inode_entry -- (dir, -- &dentry-> -- d_name, -- inode-> -- i_ino, -- &info); -+ update_inode(inode, &info); -+ ((struct inode_data *)inode->i_private)->Flags &= ~UPDATE_INODE; -+ -+ dentry->d_time = jiffies + (novfs_update_timeout * HZ); -+ -+ novfs_lock_inode_cache(dir); -+ if (novfs_update_entry(dir, &dentry->d_name, 0, &info)) { -+ novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, &info); - } -- novfs_unlock_inode_cache -- (dir); -+ novfs_unlock_inode_cache(dir); - } - - } -@@ -3006,11 +2628,10 @@ int novfs_i_mknod(struct inode *dir, str - return (-EACCES); - } - -- if (((struct inode_data *) dir->i_private)) { -- uid = novfs_scope_get_uid(((struct inode_data *) dir->i_private)->Scope); -+ if (((struct inode_data *)dir->i_private)) { -+ uid = novfs_scope_get_uid(((struct inode_data *)dir->i_private)->Scope); - if (mode & (S_IFREG | S_IFDIR)) { -- inode = -- novfs_get_inode(dir->i_sb, mode, dev, uid, 0, &dentry->d_name); -+ inode = novfs_get_inode(dir->i_sb, mode, dev, uid, 0, &dentry->d_name); - } - } - if (inode) { -@@ -3022,8 +2643,7 @@ int novfs_i_mknod(struct inode *dir, str - memset(&info, 0, sizeof(info)); - info.mode = inode->i_mode; - novfs_lock_inode_cache(dir); -- novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, -- &info); -+ novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, &info); - novfs_unlock_inode_cache(dir); - - dput(parent); -@@ -3034,8 +2654,7 @@ int novfs_i_mknod(struct inode *dir, str - return retCode; - } - --int novfs_i_rename(struct inode *odir, struct dentry *od, struct inode *ndir, -- struct dentry *nd) -+int novfs_i_rename(struct inode *odir, struct dentry *od, struct inode *ndir, struct dentry *nd) - { - int retCode = -ENOTEMPTY; - char *newpath, *newbuf, *newcon; -@@ -3053,8 +2672,7 @@ int novfs_i_rename(struct inode *odir, s - return (-EACCES); - } - -- DbgPrint("odir=0x%p ino=%d ndir=0x%p ino=%d", odir, -- odir->i_ino, ndir, ndir->i_ino); -+ DbgPrint("odir=0x%p ino=%d ndir=0x%p ino=%d", odir, odir->i_ino, ndir, ndir->i_ino); - - oldbuf = kmalloc(PATH_LENGTH_BUFFER * 2, GFP_KERNEL); - newbuf = oldbuf + PATH_LENGTH_BUFFER; -@@ -3065,15 +2683,12 @@ int novfs_i_rename(struct inode *odir, s - oldlen = PATH_LENGTH_BUFFER - (int)(oldpath - oldbuf); - newlen = PATH_LENGTH_BUFFER - (int)(newpath - newbuf); - -- DbgPrint("od=0x%p od->inode=0x%p od->inode->i_ino=%d %s", -- od, od->d_inode, od->d_inode->i_ino, oldpath); -+ DbgPrint("od=0x%p od->inode=0x%p od->inode->i_ino=%d %s", od, od->d_inode, od->d_inode->i_ino, oldpath); - if (nd->d_inode) { - DbgPrint("nd=0x%p nd->inode=0x%p nd->inode->i_ino=%d %s", -- nd, nd->d_inode, nd->d_inode->i_ino, -- newpath); -+ nd, nd->d_inode, nd->d_inode->i_ino, newpath); - } else { -- DbgPrint("nd=0x%p nd->inode=0x%p %s", -- nd, nd->d_inode, newpath); -+ DbgPrint("nd=0x%p nd->inode=0x%p %s", nd, nd->d_inode, newpath); - } - - /* -@@ -3084,17 +2699,12 @@ int novfs_i_rename(struct inode *odir, s - DbgPrint("newcon=0x%p newpath=0x%p", newcon, newpath); - DbgPrint("oldcon=0x%p oldpath=0x%p", oldcon, oldpath); - retCode = -EXDEV; -- if (newcon && oldcon -- && ((int)(newcon - newpath) == -- (int)(oldcon - oldpath))) { -+ if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { - newcon = strchr(newcon + 1, '\\'); - oldcon = strchr(oldcon + 1, '\\'); -- DbgPrint("2; newcon=0x%p newpath=0x%p", -- newcon, newpath); -- DbgPrint("2; oldcon=0x%p oldpath=0x%p", -- oldcon, oldpath); -- if (newcon && oldcon && -- ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { -+ DbgPrint("2; newcon=0x%p newpath=0x%p", newcon, newpath); -+ DbgPrint("2; oldcon=0x%p oldpath=0x%p", oldcon, oldpath); -+ if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { - newname.name = newpath; - newname.len = (int)(newcon - newpath); - newname.hash = 0; -@@ -3104,23 +2714,18 @@ int novfs_i_rename(struct inode *odir, s - oldname.hash = 0; - if (!novfs_d_strcmp(&newname, &oldname)) { - -- if (od->d_inode -- && od->d_inode->i_private) { -+ if (od->d_inode && od->d_inode->i_private) { - -- if (nd->d_inode -- && nd->d_inode-> -- i_private) { -+ if (nd->d_inode && nd->d_inode->i_private) { - session = - novfs_scope_get_sessionId -- (((struct inode_data *) ndir->i_private)->Scope); -- if (0 == -- SC_PRESENT -- (session)) { -- ((struct inode_data *) ndir->i_private)->Scope = novfs_get_scope(nd); -- session -- = -- novfs_scope_get_sessionId -- (((struct inode_data *) ndir->i_private)->Scope); -+ (((struct inode_data *)ndir->i_private)->Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *)ndir->i_private)->Scope = -+ novfs_get_scope(nd); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *)ndir-> -+ i_private)->Scope); - } - - retCode = -@@ -3131,20 +2736,28 @@ int novfs_i_rename(struct inode *odir, s - ia.ia_valid = ATTR_MODE; - ia.ia_mode = S_IRWXU; - novfs_set_attr(newpath, &ia, session); -- retCode = novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), session); -+ retCode = -+ novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), -+ session); - } - - } - -- session = novfs_scope_get_sessionId(((struct inode_data *) ndir->i_private)->Scope); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *)ndir->i_private)-> -+ Scope); - if (0 == SC_PRESENT(session)) { - ((struct inode_data *)ndir->i_private)->Scope = novfs_get_scope(nd); -- session = novfs_scope_get_sessionId(((struct inode_data *) ndir->i_private)->Scope); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *)ndir-> -+ i_private)->Scope); - } -- retCode = novfs_rename_file(S_ISDIR(od->d_inode->i_mode), oldpath, oldlen - 1, newpath, newlen - 1, session); -+ retCode = -+ novfs_rename_file(S_ISDIR(od->d_inode->i_mode), oldpath, oldlen - 1, -+ newpath, newlen - 1, session); - - if (!retCode) { -- info = (struct novfs_entry_info *) oldbuf; -+ info = (struct novfs_entry_info *)oldbuf; - od->d_time = 0; - novfs_remove_inode_entry(odir, &od->d_name, 0); - novfs_remove_inode_entry(ndir, &nd->d_name, 0); -@@ -3152,9 +2765,9 @@ int novfs_i_rename(struct inode *odir, s - nd->d_time = jiffies + (novfs_update_timeout * HZ); - - if (od->d_inode && od->d_inode->i_ino) { -- ino = od->d_inode-> i_ino; -+ ino = od->d_inode->i_ino; - } else { -- ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ ino = (ino_t) atomic_inc_return(&novfs_Inode_Number); - } - novfs_add_inode_entry(ndir, &nd->d_name, ino, info); - } -@@ -3172,7 +2785,6 @@ int novfs_i_rename(struct inode *odir, s - return (retCode); - } - -- - int novfs_i_setattr(struct dentry *dentry, struct iattr *attr) - { - char *path, *buf; -@@ -3192,15 +2804,10 @@ int novfs_i_setattr(struct dentry *dentr - } - - if (inode && inode->i_private) { -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -- Scope); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - if (0 == SC_PRESENT(session)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(dentry); -- session = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); -+ session = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - } - - buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -@@ -3211,40 +2818,31 @@ int novfs_i_setattr(struct dentry *dentr - strcpy(mtime_buf, "Unspecified"); - strcpy(ctime_buf, "Unspecified"); - if (attr->ia_valid & ATTR_ATIME) { -- ctime_r(&attr->ia_atime.tv_sec, -- atime_buf); -+ ctime_r(&attr->ia_atime.tv_sec, atime_buf); - } - if (attr->ia_valid & ATTR_MTIME) { -- ctime_r(&attr->ia_mtime.tv_sec, -- mtime_buf); -+ ctime_r(&attr->ia_mtime.tv_sec, mtime_buf); - } - if (attr->ia_valid & ATTR_CTIME) { -- ctime_r(&attr->ia_ctime.tv_sec, -- ctime_buf); -+ ctime_r(&attr->ia_ctime.tv_sec, ctime_buf); - } - /* Removed for Bug 132374. jlt */ - __DbgPrint("%s: %s\n" -- " ia_valid: 0x%x\n" -- " ia_mode: 0%o\n" -- " ia_uid: %d\n" -- " ia_gid: %d\n" -- " ia_size: %lld\n" -- " ia_atime: %s\n" -- " ia_mtime: %s\n" -- " ia_ctime: %s\n", __func__, -- path, -- attr->ia_valid, -- attr->ia_mode, -- attr->ia_uid, -- attr->ia_gid, -- attr->ia_size, -- atime_buf, mtime_buf, ctime_buf); -- -- if (ia_valid -- && !(retVal = -- novfs_set_attr(path, attr, session))) { -- ((struct inode_data *) inode->i_private)-> -- Flags |= UPDATE_INODE; -+ " ia_valid: 0x%x\n" -+ " ia_mode: 0%o\n" -+ " ia_uid: %d\n" -+ " ia_gid: %d\n" -+ " ia_size: %lld\n" -+ " ia_atime: %s\n" -+ " ia_mtime: %s\n" -+ " ia_ctime: %s\n", __func__, -+ path, -+ attr->ia_valid, -+ attr->ia_mode, -+ attr->ia_uid, attr->ia_gid, attr->ia_size, atime_buf, mtime_buf, ctime_buf); -+ -+ if (ia_valid && !(retVal = novfs_set_attr(path, attr, session))) { -+ ((struct inode_data *)inode->i_private)->Flags |= UPDATE_INODE; - - if (ia_valid & ATTR_ATIME) - inode->i_atime = attr->ia_atime; -@@ -3253,10 +2851,7 @@ int novfs_i_setattr(struct dentry *dentr - if (ia_valid & ATTR_CTIME) - inode->i_ctime = attr->ia_ctime; - if (ia_valid & ATTR_MODE) { -- inode->i_mode = -- attr-> -- ia_mode & (S_IFMT | -- S_IRWXU); -+ inode->i_mode = attr->ia_mode & (S_IFMT | S_IRWXU); - } - } - } -@@ -3268,8 +2863,7 @@ int novfs_i_setattr(struct dentry *dentr - return (retVal); - } - --int novfs_i_getattr(struct vfsmount *mnt, struct dentry *dentry, -- struct kstat *kstat) -+int novfs_i_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *kstat) - { - int retCode = 0; - char atime_buf[32]; -@@ -3296,13 +2890,9 @@ int novfs_i_getattr(struct vfsmount *mnt - - buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); - if (buf) { -- path = -- novfs_dget_path(dentry, buf, -- PATH_LENGTH_BUFFER); -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); - if (path) { -- retCode = -- novfs_get_file_info(path, &info, -- session); -+ retCode = novfs_get_file_info(path, &info, session); - if (!retCode) { - update_inode(inode, &info); - id->Flags &= ~UPDATE_INODE; -@@ -3334,35 +2924,30 @@ int novfs_i_getattr(struct vfsmount *mnt - ctime_r(&kstat->ctime.tv_sec, ctime_buf); - - __DbgPrint("%s: 0x%x 0x%p <%.*s>\n" -- " ino: %d\n" -- " dev: 0x%x\n" -- " mode: 0%o\n" -- " nlink: 0x%x\n" -- " uid: 0x%x\n" -- " gid: 0x%x\n" -- " rdev: 0x%x\n" -- " size: 0x%llx\n" -- " atime: %s\n" -- " mtime: %s\n" -- " ctime: %s\n" -- " blksize: 0x%x\n" -- " blocks: 0x%x\n", __func__, -- retCode, dentry, dentry->d_name.len, dentry->d_name.name, -- kstat->ino, -- kstat->dev, -- kstat->mode, -- kstat->nlink, -- kstat->uid, -- kstat->gid, -- kstat->rdev, -- kstat->size, -- atime_buf, -- mtime_buf, ctime_buf, kstat->blksize, kstat->blocks); -+ " ino: %d\n" -+ " dev: 0x%x\n" -+ " mode: 0%o\n" -+ " nlink: 0x%x\n" -+ " uid: 0x%x\n" -+ " gid: 0x%x\n" -+ " rdev: 0x%x\n" -+ " size: 0x%llx\n" -+ " atime: %s\n" -+ " mtime: %s\n" -+ " ctime: %s\n" -+ " blksize: 0x%x\n" -+ " blocks: 0x%x\n", __func__, -+ retCode, dentry, dentry->d_name.len, dentry->d_name.name, -+ kstat->ino, -+ kstat->dev, -+ kstat->mode, -+ kstat->nlink, -+ kstat->uid, -+ kstat->gid, kstat->rdev, kstat->size, atime_buf, mtime_buf, ctime_buf, kstat->blksize, kstat->blocks); - return (retCode); - } - --ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, -- size_t buffer_size) -+ssize_t novfs_i_getxattr(struct dentry * dentry, const char *name, void *buffer, size_t buffer_size) - { - struct inode *inode = dentry->d_inode; - struct novfs_schandle sessionId; -@@ -3374,23 +2959,17 @@ ssize_t novfs_i_getxattr(struct dentry * - SC_INITIALIZE(sessionId); - - DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */ -- DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", -- dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name); - DbgPrint("name %s", name); - DbgPrint("size %u", buffer_size); - - if (inode && inode->i_private) { -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -- Scope); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - DbgPrint("SessionId = %u", sessionId); - //if (0 == sessionId) - if (0 == SC_PRESENT(sessionId)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(dentry); -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - DbgPrint("SessionId = %u", sessionId); - } - } -@@ -3402,22 +2981,16 @@ ssize_t novfs_i_getxattr(struct dentry * - if (path) { - bufRead = kmalloc(XA_BUFFER, GFP_KERNEL); - if (bufRead) { -- retxcode = -- novfs_getx_file_info(path, name, bufRead, -- XA_BUFFER, &dataLen, -- sessionId); -- DbgPrint("after novfs_GetX_File_Info retxcode = %d", -- retxcode); -+ retxcode = novfs_getx_file_info(path, name, bufRead, XA_BUFFER, &dataLen, sessionId); -+ DbgPrint("after novfs_GetX_File_Info retxcode = %d", retxcode); - if (!retxcode) { - novfs_dump(64, bufRead); - if (buffer_size != 0) { - if (buffer_size >= dataLen) { -- memcpy(buffer, bufRead, -- dataLen); -+ memcpy(buffer, bufRead, dataLen); - } else { - DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d", -- buffer_size, -- dataLen); -+ buffer_size, dataLen); - retxcode = -ERANGE; - } - } -@@ -3439,8 +3012,7 @@ ssize_t novfs_i_getxattr(struct dentry * - return (dataLen); - } - --int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, -- size_t value_size, int flags) -+int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, size_t value_size, int flags) - { - - struct inode *inode = dentry->d_inode; -@@ -3453,24 +3025,18 @@ int novfs_i_setxattr(struct dentry *dent - SC_INITIALIZE(sessionId); - - DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */ -- DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", -- dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name); - DbgPrint("name %s", name); - DbgPrint("value_size %u", value_size); - DbgPrint("flags %d", flags); - - if (inode && inode->i_private) { -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -- Scope); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - DbgPrint("SessionId = %u", sessionId); - //if (0 == sessionId) - if (0 == SC_PRESENT(sessionId)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(dentry); -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - DbgPrint("SessionId = %u", sessionId); - } - } -@@ -3479,10 +3045,7 @@ int novfs_i_setxattr(struct dentry *dent - if (buf) { - path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); - if (path) { -- retxcode = -- novfs_setx_file_info(path, name, value, value_size, -- &bytesWritten, flags, -- sessionId); -+ retxcode = novfs_setx_file_info(path, name, value, value_size, &bytesWritten, flags, sessionId); - if (!retxcode) { - DbgPrint("bytesWritten = %u", bytesWritten); - } -@@ -3500,7 +3063,7 @@ int novfs_i_setxattr(struct dentry *dent - return (retError); - } - --ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) -+ssize_t novfs_i_listxattr(struct dentry * dentry, char *buffer, size_t buffer_size) - { - struct inode *inode = dentry->d_inode; - struct novfs_schandle sessionId; -@@ -3511,22 +3074,16 @@ ssize_t novfs_i_listxattr(struct dentry - SC_INITIALIZE(sessionId); - - DbgPrint("Ian"); //%.*s\n", dentry->d_name.len, dentry->d_name.name); -- DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", -- dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", dentry->d_name.len, dentry->d_name.name); - DbgPrint("size %u", buffer_size); - - if (inode && inode->i_private) { -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -- Scope); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - DbgPrint("SessionId = %u", sessionId); - //if (0 == sessionId) - if (0 == SC_PRESENT(sessionId)) { -- ((struct inode_data *) inode->i_private)->Scope = -- novfs_get_scope(dentry); -- sessionId = -- novfs_scope_get_sessionId(((struct inode_data *) inode-> -- i_private)->Scope); -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(dentry); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); - DbgPrint("SessionId = %u", sessionId); - } - } -@@ -3538,19 +3095,15 @@ ssize_t novfs_i_listxattr(struct dentry - if (path) { - bufList = kmalloc(XA_BUFFER, GFP_KERNEL); - if (bufList) { -- retxcode = -- novfs_listx_file_info(path, bufList, -- XA_BUFFER, &dataLen, -- sessionId); -+ retxcode = novfs_listx_file_info(path, bufList, XA_BUFFER, &dataLen, sessionId); - - novfs_dump(64, bufList); - if (buffer_size != 0) { - if (buffer_size >= dataLen) { -- memcpy(buffer, bufList, -- dataLen); -+ memcpy(buffer, bufList, dataLen); - } else { - DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d", -- buffer_size, dataLen); -+ buffer_size, dataLen); - retxcode = -1; - } - } -@@ -3598,8 +3151,7 @@ int novfs_notify_change(struct dentry *d - struct inode *inode = dentry->d_inode; - - DbgPrint("Dentry=0x%p Name=%.*s Inode=0x%p Ino=%d ia_valid=0x%x", -- dentry, dentry->d_name.len, dentry->d_name.name, inode, -- inode->i_ino, attr->ia_valid); -+ dentry, dentry->d_name.len, dentry->d_name.name, inode, inode->i_ino, attr->ia_valid); - return (0); - } - -@@ -3610,8 +3162,7 @@ void novfs_clear_inode(struct inode *ino - if (inode->i_private) { - struct inode_data *id = inode->i_private; - -- DbgPrint("inode=0x%p ino=%d Scope=0x%p Name=%s", -- inode, inode->i_ino, id->Scope, id->Name); -+ DbgPrint("inode=0x%p ino=%d Scope=0x%p Name=%s", inode, inode->i_ino, id->Scope, id->Name); - - novfs_free_inode_cache(inode); - -@@ -3641,17 +3192,12 @@ int novfs_show_options(struct seq_file * - my_path.dentry = m->mnt_root; - path = d_path(&my_path, buf, PATH_LENGTH_BUFFER); - if (path) { -- if (!novfs_current_mnt -- || (novfs_current_mnt -- && strcmp(novfs_current_mnt, path))) { -+ if (!novfs_current_mnt || (novfs_current_mnt && strcmp(novfs_current_mnt, path))) { - DbgPrint("%.*s %.*s %s", - m->mnt_root->d_name.len, - m->mnt_root->d_name.name, -- m->mnt_mountpoint->d_name.len, -- m->mnt_mountpoint->d_name.name, path); -- tmp = kmalloc(PATH_LENGTH_BUFFER - -- (int)(path - buf), -- GFP_KERNEL); -+ m->mnt_mountpoint->d_name.len, m->mnt_mountpoint->d_name.name, path); -+ tmp = kmalloc(PATH_LENGTH_BUFFER - (int)(path - buf), GFP_KERNEL); - if (tmp) { - strcpy(tmp, path); - path = novfs_current_mnt; -@@ -3692,10 +3238,7 @@ int novfs_statfs(struct dentry *de, stru - buf->f_type = sb->s_magic; - buf->f_bsize = sb->s_blocksize; - buf->f_namelen = NW_MAX_PATH_LENGTH; -- buf->f_blocks = -- (sector_t) (td + -- (uint64_t) (sb->s_blocksize - -- 1)) >> (uint64_t) sb->s_blocksize_bits; -+ buf->f_blocks = (sector_t) (td + (uint64_t) (sb->s_blocksize - 1)) >> (uint64_t) sb->s_blocksize_bits; - buf->f_bfree = (sector_t) fd >> (uint64_t) sb->s_blocksize_bits; - buf->f_bavail = (sector_t) buf->f_bfree; - buf->f_files = (sector_t) te; -@@ -3720,8 +3263,7 @@ int novfs_statfs(struct dentry *de, stru - return 0; - } - --struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, -- uid_t Uid, ino_t ino, struct qstr *name) -+struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, uid_t Uid, ino_t ino, struct qstr *name) - { - struct inode *inode = new_inode(sb); - -@@ -3733,7 +3275,7 @@ struct inode *novfs_get_inode(struct sup - inode->i_blkbits = sb->s_blocksize_bits; - inode->i_blocks = 0; - inode->i_rdev = 0; -- inode->i_ino = (ino) ? ino : (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ inode->i_ino = (ino) ? ino : (ino_t) atomic_inc_return(&novfs_Inode_Number); - if (novfs_page_cache) { - inode->i_mapping->a_ops = &novfs_aops; - } else { -@@ -3744,13 +3286,9 @@ struct inode *novfs_get_inode(struct sup - inode->i_atime.tv_nsec = 0; - inode->i_mtime = inode->i_ctime = inode->i_atime; - -- DbgPrint("Inode=0x%p I_ino=%d len=%d", -- inode, inode->i_ino, name->len); -+ DbgPrint("Inode=0x%p I_ino=%d len=%d", inode, inode->i_ino, name->len); - -- if (NULL != -- (inode->i_private = -- kmalloc(sizeof(struct inode_data) + name->len, -- GFP_KERNEL))) { -+ if (NULL != (inode->i_private = kmalloc(sizeof(struct inode_data) + name->len, GFP_KERNEL))) { - struct inode_data *id; - id = inode->i_private; - -@@ -3863,20 +3401,15 @@ int novfs_fill_super(struct super_block - inode->i_size = info.size = 0; - inode->i_uid = info.uid = 0; - inode->i_gid = info.gid = 0; -- inode->i_atime = info.atime = -- inode->i_ctime = info.ctime = -- inode->i_mtime = info.mtime = CURRENT_TIME; -+ inode->i_atime = info.atime = inode->i_ctime = info.ctime = inode->i_mtime = info.mtime = CURRENT_TIME; - - server = d_alloc(novfs_root, &name); - if (server) { - server->d_op = &novfs_dentry_operations; - server->d_time = 0xffffffff; - d_add(server, inode); -- DbgPrint("d_add %s 0x%p", -- SERVER_DIRECTORY_NAME, server); -- novfs_add_inode_entry(novfs_root->d_inode, -- &name, inode->i_ino, -- &info); -+ DbgPrint("d_add %s 0x%p", SERVER_DIRECTORY_NAME, server); -+ novfs_add_inode_entry(novfs_root->d_inode, &name, inode->i_ino, &info); - } - } - -@@ -3891,20 +3424,15 @@ int novfs_fill_super(struct super_block - inode->i_size = info.size = 0; - inode->i_uid = info.uid = 0; - inode->i_gid = info.gid = 0; -- inode->i_atime = info.atime = -- inode->i_ctime = info.ctime = -- inode->i_mtime = info.mtime = CURRENT_TIME; -+ inode->i_atime = info.atime = inode->i_ctime = info.ctime = inode->i_mtime = info.mtime = CURRENT_TIME; - tree = d_alloc(novfs_root, &name); - if (tree) { - tree->d_op = &novfs_dentry_operations; - tree->d_time = 0xffffffff; - - d_add(tree, inode); -- DbgPrint("d_add %s 0x%p", -- TREE_DIRECTORY_NAME, tree); -- novfs_add_inode_entry(novfs_root->d_inode, -- &name, inode->i_ino, -- &info); -+ DbgPrint("d_add %s 0x%p", TREE_DIRECTORY_NAME, tree); -+ novfs_add_inode_entry(novfs_root->d_inode, &name, inode->i_ino, &info); - } - } - } -@@ -3912,8 +3440,7 @@ int novfs_fill_super(struct super_block - return (0); - } - --static int novfs_get_sb(struct file_system_type *Fstype, int Flags, -- const char *Dev_name, void *Data, struct vfsmount *Mnt) -+static int novfs_get_sb(struct file_system_type *Fstype, int Flags, const char *Dev_name, void *Data, struct vfsmount *Mnt) - { - DbgPrint("Fstype=0x%x Dev_name=%s", Fstype, Dev_name); - return get_sb_nodev(Fstype, Flags, Data, novfs_fill_super, Mnt); -@@ -3925,8 +3452,7 @@ static void novfs_kill_sb(struct super_b - kill_litter_super(super); - } - --ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, -- loff_t * ppos) -+ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, loff_t * ppos) - { - ssize_t retval = 0; - -@@ -3935,8 +3461,7 @@ ssize_t novfs_Control_read(struct file * - return retval; - } - --ssize_t novfs_Control_write(struct file * file, const char *buf, size_t nbytes, -- loff_t * ppos) -+ssize_t novfs_Control_write(struct file * file, const char *buf, size_t nbytes, loff_t * ppos) - { - ssize_t retval = 0; - -@@ -3947,8 +3472,7 @@ ssize_t novfs_Control_write(struct file - return (retval); - } - --int novfs_Control_ioctl(struct inode *inode, struct file *file, -- unsigned int cmd, unsigned long arg) -+int novfs_Control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) - { - int retval = 0; - -@@ -3976,15 +3500,13 @@ int __init init_novfs(void) - - retCode = bdi_init(&novfs_backing_dev_info); - -- if(!retCode) -+ if (!retCode) - retCode = bdi_register(&novfs_backing_dev_info, NULL, "novfs-map"); - if (retCode) { - bdi_destroy(&novfs_backing_dev_info); - goto bdi_fail; - } - -- -- - retCode = novfs_proc_init(); - - novfs_profile_init(); -@@ -4044,8 +3566,7 @@ void novfs_unlock_inode_cache(struct ino - } - } - --int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, -- ino_t * ino, struct novfs_entry_info *info) -+int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, ino_t * ino, struct novfs_entry_info *info) - /* - * Arguments: struct inode *i - pointer to directory inode - * -@@ -4094,8 +3615,7 @@ int novfs_enumerate_inode_cache(struct i - } - - /* DirCacheLock should be held before calling this routine. */ --int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, -- struct novfs_entry_info *info) -+int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info) - { - struct inode_data *id; - struct novfs_dir_cache *dc; -@@ -4133,8 +3653,7 @@ int novfs_get_entry(struct inode *i, str - } - - /*DirCacheLock should be held before calling this routine. */ --int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, -- struct novfs_entry_info *info) -+int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, struct novfs_entry_info *info) - { - int retVal = -1; - loff_t count = 0; -@@ -4153,8 +3672,7 @@ int novfs_get_entry_by_pos(struct inode - } - - /* DirCacheLock should be held before calling this routine. */ --int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, -- struct novfs_entry_info *info, u64 * EntryTime) -+int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, struct novfs_entry_info *info, u64 * EntryTime) - { - struct inode_data *id; - struct novfs_dir_cache *dc; -@@ -4255,8 +3773,7 @@ void novfs_invalidate_inode_cache(struct - } - - /*++======================================================================*/ --struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, -- ino_t ino) -+struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, ino_t ino) - /* - * Returns: struct novfs_dir_cache entry if match - * NULL - if there is no match. -@@ -4282,8 +3799,7 @@ struct novfs_dir_cache *novfs_lookup_ino - n = (char *)name->name; - hash = name->hash; - } -- DbgPrint("inode: 0x%p; name: %.*s; hash: 0x%x;\n" -- " len: %d; ino: %d", i, nl, n, hash, nl, ino); -+ DbgPrint("inode: 0x%p; name: %.*s; hash: 0x%x;\n" " len: %d; ino: %d", i, nl, n, hash, nl, ino); - - list_for_each(l, &id->DirCache) { - dc = list_entry(l, struct novfs_dir_cache, list); -@@ -4297,9 +3813,7 @@ struct novfs_dir_cache *novfs_lookup_ino - dc, dc->ino, dc->hash, dc->nameLen, dc->nameLen, dc->name); - */ - if ((name->hash == dc->hash) && -- (name->len == dc->nameLen) && -- (0 == -- memcmp(name->name, dc->name, name->len))) { -+ (name->len == dc->nameLen) && (0 == memcmp(name->name, dc->name, name->len))) { - retVal = dc; - break; - } -@@ -4351,8 +3865,7 @@ int novfs_lookup_validate(struct inode * - * - * DirCacheLock should be held before calling this routine. - */ --int novfs_add_inode_entry(struct inode *i, -- struct qstr *name, ino_t ino, struct novfs_entry_info *info) -+int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info) - { - struct inode_data *id; - struct novfs_dir_cache *new; -@@ -4375,8 +3888,7 @@ int novfs_add_inode_entry(struct inode * - DCCount++; - DbgPrint("inode: 0x%p; id: 0x%p; DC: 0x%p; new: 0x%p; " - "name: %.*s; ino: %d; size: %lld; mode: 0x%x", -- i, id, &id->DirCache, new, name->len, -- name->name, ino, info->size, info->mode); -+ i, id, &id->DirCache, new, name->len, name->name, ino, info->size, info->mode); - - retVal = 0; - new->flags = ENTRY_VALID; -@@ -4400,8 +3912,7 @@ int novfs_add_inode_entry(struct inode * - /* - * DirCacheLock should be held before calling this routine. - */ --int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, -- struct novfs_entry_info *info) -+int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, struct novfs_entry_info *info) - { - struct inode_data *id; - struct novfs_dir_cache *dc; -@@ -4422,9 +3933,7 @@ int novfs_update_entry(struct inode *i, - ctime_r(&info->mtime.tv_sec, mtime_buf); - ctime_r(&info->ctime.tv_sec, ctime_buf); - DbgPrint("inode: 0x%p; name: %.*s; ino: %d; size: %lld; " -- "atime: %s; mtime: %s; ctime: %s", -- i, nl, n, ino, info->size, atime_buf, mtime_buf, -- ctime_buf); -+ "atime: %s; mtime: %s; ctime: %s", i, nl, n, ino, info->size, atime_buf, mtime_buf, ctime_buf); - - dc = novfs_lookup_inode_cache(i, name, ino); - if (dc) { -@@ -4445,9 +3954,7 @@ int novfs_update_entry(struct inode *i, - "mtime: %s %d; ctime: %s; hash: 0x%x; " - " nameLen: %d; name: %s", - dc, dc->flags, dc->jiffies, dc->ino, dc->size, -- dc->mode, atime_buf, mtime_buf, -- dc->mtime.tv_nsec, ctime_buf, dc->hash, -- dc->nameLen, dc->name); -+ dc->mode, atime_buf, mtime_buf, dc->mtime.tv_nsec, ctime_buf, dc->hash, dc->nameLen, dc->name); - } - } - DbgPrint("return %d", retVal); -@@ -4479,8 +3986,7 @@ void novfs_remove_inode_entry(struct ino - "[name: %.*s; ino: %d; next: 0x%p; " - "prev: 0x%p]", - i, id, &id->DirCache, nl, n, ino, dc, -- dc->nameLen, dc->name, dc->ino, dc->list.next, -- dc->list.prev); -+ dc->nameLen, dc->name, dc->ino, dc->list.next, dc->list.prev); - list_del(&dc->list); - kfree(dc); - DCCount--; -@@ -4506,9 +4012,7 @@ void novfs_free_invalid_entries(struct i - dc = list_entry(l, struct novfs_dir_cache, list); - if (0 == (dc->flags & ENTRY_VALID)) { - DbgPrint("inode: 0x%p; id: 0x%p; entry: 0x%p; " -- "name: %.*s; ino: %d", -- i, id, dc, dc->nameLen, dc->name, -- dc->ino); -+ "name: %.*s; ino: %d", i, id, dc, dc->nameLen, dc->name, dc->ino); - l = l->prev; - list_del(&dc->list); - kfree(dc); -@@ -4565,25 +4069,20 @@ void novfs_dump_inode(void *pf) - - pfunc("Inode=0x%p I_ino=%d\n", inode, inode->i_ino); - -- pfunc(" atime=%s\n", -- ctime_r(&inode->i_atime.tv_sec, atime_buf)); -- pfunc(" ctime=%s\n", -- ctime_r(&inode->i_mtime.tv_sec, atime_buf)); -- pfunc(" mtime=%s\n", -- ctime_r(&inode->i_ctime.tv_sec, atime_buf)); -+ pfunc(" atime=%s\n", ctime_r(&inode->i_atime.tv_sec, atime_buf)); -+ pfunc(" ctime=%s\n", ctime_r(&inode->i_mtime.tv_sec, atime_buf)); -+ pfunc(" mtime=%s\n", ctime_r(&inode->i_ctime.tv_sec, atime_buf)); - pfunc(" size=%lld\n", inode->i_size); - pfunc(" mode=0%o\n", inode->i_mode); - pfunc(" count=0%o\n", atomic_read(&inode->i_count)); - } - -- pfunc(" nofs_inode_data: 0x%p Name=%s Scope=0x%p\n", id, id->Name, -- id->Scope); -+ pfunc(" nofs_inode_data: 0x%p Name=%s Scope=0x%p\n", id, id->Name, id->Scope); - - if (id->DirCache.next) { - list_for_each(l, &id->DirCache) { - dccnt++; -- dc = list_entry(l, struct novfs_dir_cache, -- list); -+ dc = list_entry(l, struct novfs_dir_cache, list); - ctime_r(&dc->atime.tv_sec, atime_buf); - ctime_r(&dc->mtime.tv_sec, mtime_buf); - ctime_r(&dc->ctime.tv_sec, ctime_buf); -@@ -4602,15 +4101,13 @@ void novfs_dump_inode(void *pf) - " name: %s\n", - dc, dc->flags, dc->jiffies, - dc->ino, dc->size, dc->mode, -- atime_buf, mtime_buf, ctime_buf, -- dc->hash, dc->nameLen, dc->name); -+ atime_buf, mtime_buf, ctime_buf, dc->hash, dc->nameLen, dc->name); - } - } - } - up(&InodeList_lock); - -- pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, -- dccnt); -+ pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, dccnt); - - } - ---- a/fs/novfs/nwcapi.c -+++ b/fs/novfs/nwcapi.c -@@ -31,11 +31,13 @@ - #define strlen_user(str) strnlen_user(str, ~0UL >> 1) - #endif - --static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply); --static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply); -+static void GetUserData(struct nwc_scan_conn_info *connInfo, struct novfs_xplat_call_request *cmd, -+ struct novfs_xplat_call_reply *reply); -+static void GetConnData(struct nwc_get_conn_info *connInfo, struct novfs_xplat_call_request *cmd, -+ struct novfs_xplat_call_reply *reply); - - /*++======================================================================*/ --int novfs_open_conn_by_name(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+int novfs_open_conn_by_name(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) - { - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; -@@ -63,7 +65,7 @@ int novfs_open_conn_by_name(struct novfs - cmd->NwcCommand = NWC_OPEN_CONN_BY_NAME; - - cmd->dataLen = datalen; -- openConn = (struct nwd_open_conn_by_name *) cmd->data; -+ openConn = (struct nwd_open_conn_by_name *)cmd->data; - - openConn->nameLen = pnamelen; - openConn->serviceLen = stypelen; -@@ -78,21 +80,18 @@ int novfs_open_conn_by_name(struct novfs - data += openConn->nameLen; - cpylen = copy_from_user(data, ocbn.pServiceType, openConn->serviceLen); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - /* - * we got reply data from the daemon - */ -- connReply = (struct nwd_open_conn_by_name *) reply->data; -+ connReply = (struct nwd_open_conn_by_name *)reply->data; - retCode = reply->Reply.ErrorCode; - if (!retCode) { - /* - * we got valid data. - */ -- connReply = (struct nwd_open_conn_by_name *) reply->data; -+ connReply = (struct nwd_open_conn_by_name *)reply->data; - ocbn.RetConnHandle = HandletoUint32(connReply->newConnHandle); - *Handle = connReply->newConnHandle; - -@@ -107,7 +106,7 @@ int novfs_open_conn_by_name(struct novfs - - } - --int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) - { - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; -@@ -131,7 +130,7 @@ int novfs_open_conn_by_addr(struct novfs - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_OPEN_CONN_BY_ADDRESS; - cmd->dataLen = datalen; -- openConn = (struct nwd_open_conn_by_addr *) cmd->data; -+ openConn = (struct nwd_open_conn_by_addr *)cmd->data; - - cpylen = copy_from_user(&tranAddr, ocba.pTranAddr, sizeof(tranAddr)); - if (tranAddr.uAddressLength > sizeof(addr)) { -@@ -151,28 +150,23 @@ int novfs_open_conn_by_addr(struct novfs - DbgPrint("addr"); - novfs_dump(sizeof(addr), addr); - -- openConn->TranAddr.oAddress = *(unsigned int *) (&addr[2]); -+ openConn->TranAddr.oAddress = *(unsigned int *)(&addr[2]); - -- retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - /* - * we got reply data from the daemon - */ -- connReply = (struct nwd_open_conn_by_addr *) reply->data; -+ connReply = (struct nwd_open_conn_by_addr *)reply->data; - retCode = reply->Reply.ErrorCode; - if (!retCode) { - /* - * we got valid data. - */ -- connReply = (struct nwd_open_conn_by_addr *) reply->data; -- ocba.ConnHandle = -- HandletoUint32(connReply->ConnHandle); -+ connReply = (struct nwd_open_conn_by_addr *)reply->data; -+ ocba.ConnHandle = HandletoUint32(connReply->ConnHandle); - *Handle = connReply->ConnHandle; -- cpylen = -- copy_to_user(pdata->reqData, &ocba, -- sizeof(ocba)); -+ cpylen = copy_to_user(pdata->reqData, &ocba, sizeof(ocba)); - DbgPrint("New Conn Handle = %X", connReply->ConnHandle); - } - kfree(reply); -@@ -184,7 +178,7 @@ out: - - } - --int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) - { - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; -@@ -205,20 +199,17 @@ int novfs_open_conn_by_ref(struct novfs_ - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_OPEN_CONN_BY_REFERENCE; - cmd->dataLen = datalen; -- openConn = (struct nwd_open_conn_by_ref *) cmd->data; -+ openConn = (struct nwd_open_conn_by_ref *)cmd->data; - -- openConn->uConnReference = (void *) (unsigned long) ocbr.uConnReference; -+ openConn->uConnReference = (void *)(unsigned long)ocbr.uConnReference; - openConn->uConnFlags = ocbr.uConnFlags; - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - /* - * we got reply data from the daemon - */ -- openConn = (struct nwd_open_conn_by_ref *) reply->data; -+ openConn = (struct nwd_open_conn_by_ref *)reply->data; - retCode = reply->Reply.ErrorCode; - if (!retCode) { - /* -@@ -306,8 +297,7 @@ int novfs_raw_send(struct novfs_xplat *p - * Allocate the cmd Request - */ - cmdlen = datalen + sizeof(*cmd) + sizeof(*ncpData); -- DbgPrint("[XPLAT RawNCP] - Frag Count 0x%X", -- xRequest.uNumRequestFrags); -+ DbgPrint("[XPLAT RawNCP] - Frag Count 0x%X", xRequest.uNumRequestFrags); - DbgPrint("[XPLAT RawNCP] - Total Command Data Len = %x", cmdlen); - - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -325,10 +315,10 @@ int novfs_raw_send(struct novfs_xplat *p - * build the NCP Request - */ - cmd->dataLen = cmdlen - sizeof(*cmd); -- ncpData = (struct nwd_ncp_req *) cmd->data; -+ ncpData = (struct nwd_ncp_req *)cmd->data; - ncpData->replyLen = totalLen; - ncpData->requestLen = datalen; -- ncpData->ConnHandle = (void *) (unsigned long) xRequest.ConnHandle; -+ ncpData->ConnHandle = (void *)(unsigned long)xRequest.ConnHandle; - ncpData->function = xRequest.uFunction; - - reqData = ncpData->data; -@@ -340,10 +330,7 @@ int novfs_raw_send(struct novfs_xplat *p - cFrag++; - } - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - DbgPrint("RawNCP - reply = %x", reply); - DbgPrint("RawNCP - retCode = %x", retCode); - -@@ -351,11 +338,10 @@ int novfs_raw_send(struct novfs_xplat *p - /* - * we got reply data from the daemon - */ -- ncpReply = (struct nwd_ncp_rep *) reply->data; -+ ncpReply = (struct nwd_ncp_rep *)reply->data; - retCode = reply->Reply.ErrorCode; - -- DbgPrint("RawNCP - Reply Frag Count 0x%X", -- xRequest.uNumReplyFrags); -+ DbgPrint("RawNCP - Reply Frag Count 0x%X", xRequest.uNumReplyFrags); - - /* - * We need to copy the reply frags to the packet. -@@ -366,10 +352,9 @@ int novfs_raw_send(struct novfs_xplat *p - totalLen = ncpReply->replyLen; - for (x = 0; x < xRequest.uNumReplyFrags; x++) { - -- DbgPrint("RawNCP - Copy Frag %d: 0x%X", x, -- cFrag->uLength); -+ DbgPrint("RawNCP - Copy Frag %d: 0x%X", x, cFrag->uLength); - -- datalen = min((unsigned long) cFrag->uLength, totalLen); -+ datalen = min((unsigned long)cFrag->uLength, totalLen); - - cpylen = copy_to_user(cFrag->pData, reqData, datalen); - totalLen -= datalen; -@@ -384,7 +369,6 @@ int novfs_raw_send(struct novfs_xplat *p - retCode = -EIO; - } - -- - xRequest.uActualReplyLength = actualReplyLength; - cpylen = copy_to_user(pdata->reqData, &xRequest, sizeof(xRequest)); - -@@ -396,7 +380,7 @@ out: - return (retCode); - } - --int novfs_conn_close(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+int novfs_conn_close(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session) - { - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; -@@ -417,16 +401,14 @@ int novfs_conn_close(struct novfs_xplat - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_CLOSE_CONN; - -- nwdClose = (struct nwd_close_conn *) cmd->data; -+ nwdClose = (struct nwd_close_conn *)cmd->data; - cmd->dataLen = sizeof(*nwdClose); - *Handle = nwdClose->ConnHandle = Uint32toHandle(cc.ConnHandle); - - /* - * send the request - */ -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, 0); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -457,16 +439,15 @@ int novfs_sys_conn_close(struct novfs_xp - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_SYS_CLOSE_CONN; - -- nwdClose = (struct nwd_close_conn *) cmd->data; -+ nwdClose = (struct nwd_close_conn *)cmd->data; - cmd->dataLen = sizeof(*nwdClose); -- nwdClose->ConnHandle = (void *) (unsigned long) cc.ConnHandle; -- *Handle = (unsigned long) cc.ConnHandle; -+ nwdClose->ConnHandle = (void *)(unsigned long)cc.ConnHandle; -+ *Handle = (unsigned long)cc.ConnHandle; - - /* - * send the request - */ -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -486,7 +467,6 @@ int novfs_login_id(struct novfs_xplat *p - unsigned long cpylen; - struct nwc_string nwcStr; - -- - memset(&server, 0, sizeof(server)); - memset(&username, 0, sizeof(username)); - memset(&password, 0, sizeof(password)); -@@ -536,13 +516,17 @@ int novfs_login_id(struct novfs_xplat *p - password.type = nwcStr.DataType; - password.len = nwcStr.DataLen; - if (!copy_from_user((void *)password.buffer, nwcStr.pBuffer, password.len)) { -- retCode = novfs_do_login(&server, &username, &password, (void **)&lgn.AuthenticationId, &Session); -+ retCode = -+ novfs_do_login(&server, &username, &password, -+ (void **)&lgn.AuthenticationId, &Session); - if (retCode) { - lgn.AuthenticationId = 0; - } - - plgn = (struct nwc_login_id *)pdata->reqData; -- cpylen = copy_to_user(&plgn->AuthenticationId, &lgn.AuthenticationId, sizeof(plgn->AuthenticationId)); -+ cpylen = -+ copy_to_user(&plgn->AuthenticationId, &lgn.AuthenticationId, -+ sizeof(plgn->AuthenticationId)); - } - memset(password.buffer, 0, password.len); - -@@ -581,15 +565,12 @@ int novfs_auth_conn(struct novfs_xplat * - - cpylen = copy_from_user(&pauth, pdata->reqData, sizeof(pauth)); - -- pDauth = (struct nwc_auth_wid *) cmd->data; -+ pDauth = (struct nwc_auth_wid *)cmd->data; - cmd->dataLen = datalen; - pDauth->AuthenticationId = pauth.AuthenticationId; -- pDauth->ConnHandle = (void *) (unsigned long) pauth.ConnHandle; -+ pDauth->ConnHandle = (void *)(unsigned long)pauth.ConnHandle; - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -602,7 +583,7 @@ int novfs_license_conn(struct novfs_xpla - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; - struct nwc_license_conn lisc; -- struct nwc_lisc_id * pDLisc = NULL; -+ struct nwc_lisc_id *pDLisc = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -619,14 +600,11 @@ int novfs_license_conn(struct novfs_xpla - - cpylen = copy_from_user(&lisc, pdata->reqData, sizeof(lisc)); - -- pDLisc = (struct nwc_lisc_id *) cmd->data; -+ pDLisc = (struct nwc_lisc_id *)cmd->data; - cmd->dataLen = datalen; -- pDLisc->ConnHandle = (void *) (unsigned long) lisc.ConnHandle; -+ pDLisc->ConnHandle = (void *)(unsigned long)lisc.ConnHandle; - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -654,17 +632,13 @@ int novfs_logout_id(struct novfs_xplat * - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_LOGOUT_IDENTITY; - -- cpylen = -- copy_from_user(&logout, pdata->reqData, sizeof(logout)); -+ cpylen = copy_from_user(&logout, pdata->reqData, sizeof(logout)); - -- pDLogout = (struct nwc_lo_id *) cmd->data; -+ pDLogout = (struct nwc_lo_id *)cmd->data; - cmd->dataLen = datalen; - pDLogout->AuthenticationId = logout.AuthenticationId; - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -693,13 +667,10 @@ int novfs_unlicense_conn(struct novfs_xp - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_UNLICENSE_CONN; - cmd->dataLen = datalen; -- pUconn = (struct nwc_unlic_conn *) cmd->data; -+ pUconn = (struct nwc_unlic_conn *)cmd->data; - -- pUconn->ConnHandle = (void *) (unsigned long) ulc.ConnHandle; -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ pUconn->ConnHandle = (void *)(unsigned long)ulc.ConnHandle; -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - /* - * we got reply data from the daemon -@@ -732,15 +703,12 @@ int novfs_unauthenticate(struct novfs_xp - - cpylen = copy_from_user(&auth, pdata->reqData, sizeof(auth)); - -- pDAuth = (struct nwc_unauthenticate *) cmd->data; -+ pDAuth = (struct nwc_unauthenticate *)cmd->data; - cmd->dataLen = datalen; - pDAuth->AuthenticationId = auth.AuthenticationId; -- pDAuth->ConnHandle = (void *) (unsigned long) auth.ConnHandle; -+ pDAuth->ConnHandle = (void *)(unsigned long)auth.ConnHandle; - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -761,8 +729,7 @@ int novfs_get_conn_info(struct novfs_xpl - - cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo); - cmd = kmalloc(cmdlen, GFP_KERNEL); -- cpylen = -- copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_get_conn_info)); -+ cpylen = copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_get_conn_info)); - - if (!cmd) - return -ENOMEM; -@@ -777,17 +744,14 @@ int novfs_get_conn_info(struct novfs_xpl - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_GET_CONN_INFO; - -- pDConnInfo = (struct nwd_conn_info *) cmd->data; -+ pDConnInfo = (struct nwd_conn_info *)cmd->data; - -- pDConnInfo->ConnHandle = (void *) (unsigned long) connInfo.ConnHandle; -+ pDConnInfo->ConnHandle = (void *)(unsigned long)connInfo.ConnHandle; - pDConnInfo->uInfoLevel = connInfo.uInfoLevel; - pDConnInfo->uInfoLength = connInfo.uInfoLength; - cmd->dataLen = sizeof(*pDConnInfo); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - if (!retCode) { -@@ -827,17 +791,14 @@ int novfs_set_conn_info(struct novfs_xpl - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_SET_CONN_INFO; - -- pDConnInfo = (struct nwd_set_conn_info *) cmd->data; -+ pDConnInfo = (struct nwd_set_conn_info *)cmd->data; - -- pDConnInfo->ConnHandle = (void *) (unsigned long) connInfo.ConnHandle; -+ pDConnInfo->ConnHandle = (void *)(unsigned long)connInfo.ConnHandle; - pDConnInfo->uInfoLevel = connInfo.uInfoLevel; - pDConnInfo->uInfoLength = connInfo.uInfoLength; - cmd->dataLen = sizeof(*pDConnInfo); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -872,13 +833,11 @@ int novfs_get_id_info(struct novfs_xplat - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_GET_IDENTITY_INFO; - -- idInfo = (struct nwd_get_id_info *) cmd->data; -+ idInfo = (struct nwd_get_id_info *)cmd->data; - idInfo->AuthenticationId = qidInfo.AuthenticationId; - cmd->dataLen = sizeof(*idInfo); - -- retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - -@@ -887,32 +846,20 @@ int novfs_get_id_info(struct novfs_xplat - * Save the return info to the user structure. - */ - gId = pdata->reqData; -- idInfo = (struct nwd_get_id_info *) reply->data; -- cpylen = copy_to_user(&gId->AuthenticationId, -- &idInfo->AuthenticationId, -- sizeof(idInfo-> -- AuthenticationId)); -- cpylen = copy_to_user(&gId->AuthType, -- &idInfo->AuthType, -- sizeof(idInfo->AuthType)); -- cpylen = copy_to_user(&gId->IdentityFlags, -- &idInfo->IdentityFlags, -- sizeof(idInfo->IdentityFlags)); -- cpylen = copy_to_user(&gId->NameType, -- &idInfo->NameType, -- sizeof(idInfo->NameType)); -- cpylen = copy_to_user(&gId->ObjectType, -- &idInfo->ObjectType, -- sizeof(idInfo->ObjectType)); -+ idInfo = (struct nwd_get_id_info *)reply->data; -+ cpylen = copy_to_user(&gId->AuthenticationId, &idInfo->AuthenticationId, sizeof(idInfo->AuthenticationId)); -+ cpylen = copy_to_user(&gId->AuthType, &idInfo->AuthType, sizeof(idInfo->AuthType)); -+ cpylen = copy_to_user(&gId->IdentityFlags, &idInfo->IdentityFlags, sizeof(idInfo->IdentityFlags)); -+ cpylen = copy_to_user(&gId->NameType, &idInfo->NameType, sizeof(idInfo->NameType)); -+ cpylen = copy_to_user(&gId->ObjectType, &idInfo->ObjectType, sizeof(idInfo->ObjectType)); - -- cpylen = copy_from_user(&xferStr, gId->pDomainName, -- sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&xferStr, gId->pDomainName, sizeof(struct nwc_string)); - if (idInfo->pDomainNameOffset >= reply->dataLen) { - retCode = -EINVAL; - goto out; - } - str = (char *)((char *)reply->data + idInfo->pDomainNameOffset); -- if (idInfo->domainLen > reply->dataLen - idInfo->pDomainNameOffset ) { -+ if (idInfo->domainLen > reply->dataLen - idInfo->pDomainNameOffset) { - retCode = -EINVAL; - goto out; - } -@@ -937,10 +884,10 @@ int novfs_get_id_info(struct novfs_xplat - xferStr.DataType = NWC_STRING_TYPE_ASCII; - cpylen = copy_to_user(gId->pObjectName, &xferStr, sizeof(struct nwc_string)); - } -- } -+ } - - out: -- kfree(reply); -+ kfree(reply); - kfree(cmd); - return (retCode); - } -@@ -970,20 +917,15 @@ int novfs_scan_conn_info(struct novfs_xp - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_SCAN_CONN_INFO; - -- pDConnInfo = (struct nwd_scan_conn_info *) cmd->data; -+ pDConnInfo = (struct nwd_scan_conn_info *)cmd->data; - - DbgPrint("Input Data"); - __DbgPrint(" connInfo.uScanIndex = 0x%X\n", connInfo.uScanIndex); -- __DbgPrint(" connInfo.uConnectionReference = 0x%X\n", -- connInfo.uConnectionReference); -- __DbgPrint(" connInfo.uScanInfoLevel = 0x%X\n", -- connInfo.uScanInfoLevel); -- __DbgPrint(" connInfo.uScanInfoLen = 0x%X\n", -- connInfo.uScanInfoLen); -- __DbgPrint(" connInfo.uReturnInfoLength = 0x%X\n", -- connInfo.uReturnInfoLength); -- __DbgPrint(" connInfo.uReturnInfoLevel = 0x%X\n", -- connInfo.uReturnInfoLevel); -+ __DbgPrint(" connInfo.uConnectionReference = 0x%X\n", connInfo.uConnectionReference); -+ __DbgPrint(" connInfo.uScanInfoLevel = 0x%X\n", connInfo.uScanInfoLevel); -+ __DbgPrint(" connInfo.uScanInfoLen = 0x%X\n", connInfo.uScanInfoLen); -+ __DbgPrint(" connInfo.uReturnInfoLength = 0x%X\n", connInfo.uReturnInfoLength); -+ __DbgPrint(" connInfo.uReturnInfoLevel = 0x%X\n", connInfo.uReturnInfoLevel); - __DbgPrint(" connInfo.uScanFlags = 0x%X\n", connInfo.uScanFlags); - - pDConnInfo->uScanIndex = connInfo.uScanIndex; -@@ -995,53 +937,38 @@ int novfs_scan_conn_info(struct novfs_xp - pDConnInfo->uScanFlags = connInfo.uScanFlags; - - if (pDConnInfo->uScanInfoLen) { -- localData = (unsigned char *) pDConnInfo; -+ localData = (unsigned char *)pDConnInfo; - pDConnInfo->uScanConnInfoOffset = sizeof(*pDConnInfo); - localData += pDConnInfo->uScanConnInfoOffset; -- cpylen = copy_from_user(localData, connInfo.pScanConnInfo, -- connInfo.uScanInfoLen); -+ cpylen = copy_from_user(localData, connInfo.pScanConnInfo, connInfo.uScanInfoLen); - } else { - pDConnInfo->uScanConnInfoOffset = 0; - } - - cmd->dataLen = sizeof(*pDConnInfo); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - DbgPrint("Reply recieved"); - __DbgPrint(" NextIndex = %x\n", connInfo.uScanIndex); - __DbgPrint(" ErrorCode = %x\n", reply->Reply.ErrorCode); - __DbgPrint(" data = %p\n", reply->data); - -- pDConnInfo = (struct nwd_scan_conn_info *) reply->data; -- retCode = (unsigned long) reply->Reply.ErrorCode; -+ pDConnInfo = (struct nwd_scan_conn_info *)reply->data; -+ retCode = (unsigned long)reply->Reply.ErrorCode; - if (!retCode) { - GetUserData(&connInfo, cmd, reply); -- rInfo = (struct nwc_scan_conn_info *) pdata->repData; -- cpylen = -- copy_to_user(pdata->repData, -- &pDConnInfo->uScanIndex, -- sizeof(pDConnInfo-> -- uScanIndex)); -+ rInfo = (struct nwc_scan_conn_info *)pdata->repData; -+ cpylen = copy_to_user(pdata->repData, &pDConnInfo->uScanIndex, sizeof(pDConnInfo->uScanIndex)); - cpylen = -- copy_to_user(&rInfo->uConnectionReference, -- &pDConnInfo-> -- uConnectionReference, -- sizeof(pDConnInfo-> -- uConnectionReference)); -+ copy_to_user(&rInfo->uConnectionReference, -+ &pDConnInfo->uConnectionReference, sizeof(pDConnInfo->uConnectionReference)); - } else { - unsigned long x; - - x = 0; -- rInfo = (struct nwc_scan_conn_info *) pdata->reqData; -- cpylen = -- copy_to_user(&rInfo->uConnectionReference, -- &x, -- sizeof(rInfo-> -- uConnectionReference)); -+ rInfo = (struct nwc_scan_conn_info *)pdata->reqData; -+ cpylen = copy_to_user(&rInfo->uConnectionReference, &x, sizeof(rInfo->uConnectionReference)); - } - - kfree(reply); -@@ -1055,17 +982,17 @@ int novfs_scan_conn_info(struct novfs_xp - /* - * Copies the user data out of the scan conn info call. - */ --static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) -+static void GetUserData(struct nwc_scan_conn_info *connInfo, struct novfs_xplat_call_request *cmd, -+ struct novfs_xplat_call_reply *reply) - { - unsigned long uLevel; - struct nwd_scan_conn_info *pDConnInfo = NULL; - unsigned char *srcData = NULL; - unsigned long dataLen = 0, cpylen; - -- pDConnInfo = (struct nwd_scan_conn_info *) reply->data; -+ pDConnInfo = (struct nwd_scan_conn_info *)reply->data; - uLevel = pDConnInfo->uReturnInfoLevel; -- DbgPrint("uLevel = %d, reply = 0x%p, reply->data = 0x%X", -- uLevel, reply, reply->data); -+ DbgPrint("uLevel = %d, reply = 0x%p, reply->data = 0x%X", uLevel, reply, reply->data); - - switch (uLevel) { - case NWC_CONN_INFO_RETURN_ALL: -@@ -1088,7 +1015,7 @@ static void GetUserData(struct nwc_scan_ - case NWC_CONN_INFO_TREE_NAME: - case NWC_CONN_INFO_SERVER_NAME: - case NWC_CONN_INFO_VERSION: -- srcData = (unsigned char *) pDConnInfo; -+ srcData = (unsigned char *)pDConnInfo; - srcData += pDConnInfo->uReturnConnInfoOffset; - dataLen = pDConnInfo->uReturnInfoLength; - break; -@@ -1098,14 +1025,13 @@ static void GetUserData(struct nwc_scan_ - unsigned char *dstData = connInfo->pReturnConnInfo; - struct nwc_tran_addr tranAddr; - -- srcData = (unsigned char *) reply->data; -+ srcData = (unsigned char *)reply->data; - dataLen = reply->dataLen; - -- DbgPrint("NWC_CONN_INFO_TRAN_ADDR 0x%p -> 0x%p :: 0x%X", -- srcData, connInfo->pReturnConnInfo, dataLen); -+ DbgPrint("NWC_CONN_INFO_TRAN_ADDR 0x%p -> 0x%p :: 0x%X", srcData, connInfo->pReturnConnInfo, dataLen); - - cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); -- if (((struct nwd_scan_conn_info *) srcData)->uReturnConnInfoOffset >= reply->dataLen) -+ if (((struct nwd_scan_conn_info *)srcData)->uReturnConnInfoOffset >= reply->dataLen) - goto out; - srcData += ((struct nwd_scan_conn_info *)srcData)->uReturnConnInfoOffset; - tranAddr.uTransportType = ((struct nwd_tran_addr *)srcData)->uTransportType; -@@ -1114,7 +1040,7 @@ static void GetUserData(struct nwc_scan_ - goto out; - cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); - cpylen = copy_to_user(tranAddr.puAddress, -- ((struct tagNwdTranAddrEx *) srcData)->Buffer, tranAddr.uAddressLength); -+ ((struct tagNwdTranAddrEx *)srcData)->Buffer, tranAddr.uAddressLength); - dataLen = 0; - break; - } -@@ -1129,8 +1055,7 @@ static void GetUserData(struct nwc_scan_ - } - - if (srcData && dataLen && dataLen <= reply->dataLen) { -- DbgPrint("Copy Data 0x%p -> 0x%p :: 0x%X", -- srcData, connInfo->pReturnConnInfo, dataLen); -+ DbgPrint("Copy Data 0x%p -> 0x%p :: 0x%X", srcData, connInfo->pReturnConnInfo, dataLen); - cpylen = copy_to_user(connInfo->pReturnConnInfo, srcData, dataLen); - } - -@@ -1141,7 +1066,8 @@ out: - /* - * Copies the user data out of the scan conn info call. - */ --static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) -+static void GetConnData(struct nwc_get_conn_info *connInfo, struct novfs_xplat_call_request *cmd, -+ struct novfs_xplat_call_reply *reply) - { - unsigned long uLevel; - struct nwd_conn_info *pDConnInfo = NULL; -@@ -1149,12 +1075,12 @@ static void GetConnData(struct nwc_get_c - unsigned char *srcData = NULL; - unsigned long dataLen = 0, cpylen; - -- pDConnInfo = (struct nwd_conn_info *) cmd->data; -+ pDConnInfo = (struct nwd_conn_info *)cmd->data; - uLevel = pDConnInfo->uInfoLevel; - - switch (uLevel) { - case NWC_CONN_INFO_RETURN_ALL: -- srcData = (unsigned char *) reply->data; -+ srcData = (unsigned char *)reply->data; - dataLen = reply->dataLen; - break; - -@@ -1167,19 +1093,16 @@ static void GetConnData(struct nwc_get_c - unsigned char *dstData = connInfo->pConnInfo; - struct nwc_tran_addr tranAddr; - -- srcData = (unsigned char *) reply->data; -+ srcData = (unsigned char *)reply->data; - - cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); -- tranAddr.uTransportType = -- ((struct tagNwdTranAddrEx *) srcData)->uTransportType; -- tranAddr.uAddressLength = -- ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; -+ tranAddr.uTransportType = ((struct tagNwdTranAddrEx *)srcData)->uTransportType; -+ tranAddr.uAddressLength = ((struct tagNwdTranAddrEx *)srcData)->uAddressLength; - if (tranAddr.uAddressLength > MAX_ADDRESS_LENGTH) - goto out; - cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); - cpylen = copy_to_user(tranAddr.puAddress, -- ((struct tagNwdTranAddrEx *) srcData)->Buffer, -- tranAddr.uAddressLength); -+ ((struct tagNwdTranAddrEx *)srcData)->Buffer, tranAddr.uAddressLength); - dataLen = 0; - break; - } -@@ -1202,7 +1125,7 @@ static void GetConnData(struct nwc_get_c - case NWC_CONN_INFO_VERSION: - case NWC_CONN_INFO_SERVER_NAME: - case NWC_CONN_INFO_TREE_NAME: -- srcData = (unsigned char *) reply->data; -+ srcData = (unsigned char *)reply->data; - dataLen = reply->dataLen; - break; - -@@ -1250,16 +1173,11 @@ int novfs_get_daemon_ver(struct novfs_xp - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_GET_REQUESTER_VERSION; - cmdlen = sizeof(*cmd); -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; -- pDVersion = (struct nwd_get_reqversion *) reply->data; -- cpylen = -- copy_to_user(pDVersion, pdata->reqData, -- sizeof(*pDVersion)); -+ pDVersion = (struct nwd_get_reqversion *)reply->data; -+ cpylen = copy_to_user(pDVersion, pdata->reqData, sizeof(*pDVersion)); - kfree(reply); - } - kfree(cmd); -@@ -1277,8 +1195,7 @@ int novfs_get_preferred_DS_tree(struct n - unsigned long cmdlen, datalen, replylen, cpylen; - unsigned char *dPtr = NULL; - -- cpylen = copy_from_user(&xplatCall, pdata->reqData, -- sizeof(struct nwc_get_pref_ds_tree)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_get_pref_ds_tree)); - if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) - return -EINVAL; - datalen = sizeof(*pDGetTree) + xplatCall.uTreeLength; -@@ -1294,24 +1211,20 @@ int novfs_get_preferred_DS_tree(struct n - cmd->NwcCommand = NWC_GET_PREFERRED_DS_TREE; - cmdlen = sizeof(*cmd); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - if (!retCode) { -- pDGetTree = (struct nwd_get_pref_ds_tree *) reply->data; -+ pDGetTree = (struct nwd_get_pref_ds_tree *)reply->data; - if (pDGetTree->DsTreeNameOffset >= reply->dataLen) { - retCode = -EINVAL; - goto out; - } - dPtr = reply->data + pDGetTree->DsTreeNameOffset; -- p = (struct nwc_get_pref_ds_tree *) pdata->reqData; -+ p = (struct nwc_get_pref_ds_tree *)pdata->reqData; - - DbgPrint("Reply recieved"); -- __DbgPrint(" TreeLen = %x\n", -- pDGetTree->uTreeLength); -+ __DbgPrint(" TreeLen = %x\n", pDGetTree->uTreeLength); - __DbgPrint(" TreeName = %s\n", dPtr); - - if (pDGetTree->uTreeLength > reply->dataLen - pDGetTree->DsTreeNameOffset) { -@@ -1355,18 +1268,14 @@ int novfs_set_preferred_DS_tree(struct n - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_SET_PREFERRED_DS_TREE; - -- pDSetTree = (struct nwd_set_pref_ds_tree *) cmd->data; -+ pDSetTree = (struct nwd_set_pref_ds_tree *)cmd->data; - pDSetTree->DsTreeNameOffset = sizeof(*pDSetTree); - pDSetTree->uTreeLength = xplatCall.uTreeLength; - - dPtr = cmd->data + sizeof(*pDSetTree); -- cpylen = copy_from_user(dPtr, xplatCall.pDsTreeName, -- xplatCall.uTreeLength); -+ cpylen = copy_from_user(dPtr, xplatCall.pDsTreeName, xplatCall.uTreeLength); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -1376,19 +1285,17 @@ int novfs_set_preferred_DS_tree(struct n - - } - --int novfs_set_default_ctx(struct novfs_xplat *pdata, -- struct novfs_schandle Session) -+int novfs_set_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session) - { - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; - struct nwc_set_def_name_ctx xplatCall; -- struct nwd_set_def_name_ctx * pDSet = NULL; -+ struct nwd_set_def_name_ctx *pDSet = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - unsigned char *dPtr = NULL; - -- cpylen = copy_from_user(&xplatCall, pdata->reqData, -- sizeof(struct nwc_set_def_name_ctx)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_def_name_ctx)); - if (xplatCall.uNameLength > MAX_NAME_LEN || xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) - return -EINVAL; - datalen = sizeof(*pDSet) + xplatCall.uTreeLength + xplatCall.uNameLength; -@@ -1403,7 +1310,7 @@ int novfs_set_default_ctx(struct novfs_x - cmd->NwcCommand = NWC_SET_DEFAULT_NAME_CONTEXT; - cmd->dataLen = sizeof(struct nwd_set_def_name_ctx) + xplatCall.uTreeLength + xplatCall.uNameLength; - -- pDSet = (struct nwd_set_def_name_ctx *) cmd->data; -+ pDSet = (struct nwd_set_def_name_ctx *)cmd->data; - dPtr = cmd->data; - - pDSet->TreeOffset = sizeof(struct nwd_set_def_name_ctx); -@@ -1413,14 +1320,9 @@ int novfs_set_default_ctx(struct novfs_x - - //sgled cpylen = copy_from_user(dPtr+pDSet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); - cpylen = copy_from_user(dPtr + pDSet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled -- cpylen = copy_from_user(dPtr + pDSet->NameContextOffset, -- xplatCall.pNameContext, -- xplatCall.uNameLength); -- -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ cpylen = copy_from_user(dPtr + pDSet->NameContextOffset, xplatCall.pNameContext, xplatCall.uNameLength); -+ -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - kfree(reply); -@@ -1430,23 +1332,21 @@ int novfs_set_default_ctx(struct novfs_x - - } - --int novfs_get_default_ctx(struct novfs_xplat *pdata, -- struct novfs_schandle Session) -+int novfs_get_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session) - { - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; - struct nwc_get_def_name_ctx xplatCall; -- struct nwd_get_def_name_ctx * pGet = NULL; -+ struct nwd_get_def_name_ctx *pGet = NULL; - char *dPtr = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, replylen, cpylen; - -- cpylen = copy_from_user(&xplatCall, pdata->reqData, -- sizeof(struct nwc_get_def_name_ctx)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_get_def_name_ctx)); - if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) - return -EINVAL; - -- cmdlen = sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx ) + xplatCall.uTreeLength; -+ cmdlen = sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx) + xplatCall.uTreeLength; - cmd = kmalloc(cmdlen, GFP_KERNEL); - - if (!cmd) -@@ -1455,40 +1355,31 @@ int novfs_get_default_ctx(struct novfs_x - cmd->Command.SequenceNumber = 0; - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_GET_DEFAULT_NAME_CONTEXT; -- cmd->dataLen = -- sizeof(struct nwd_get_def_name_ctx) + xplatCall.uTreeLength; -+ cmd->dataLen = sizeof(struct nwd_get_def_name_ctx) + xplatCall.uTreeLength; - -- pGet = (struct nwd_get_def_name_ctx *) cmd->data; -+ pGet = (struct nwd_get_def_name_ctx *)cmd->data; - dPtr = cmd->data; - -- pGet->TreeOffset = sizeof(struct nwd_get_def_name_ctx ); -+ pGet->TreeOffset = sizeof(struct nwd_get_def_name_ctx); - pGet->uTreeLength = xplatCall.uTreeLength; - - //sgled cpylen = copy_from_user( dPtr + pGet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); - cpylen = copy_from_user(dPtr + pGet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled - dPtr[pGet->TreeOffset + pGet->uTreeLength] = 0; - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - retCode = reply->Reply.ErrorCode; - if (!retCode) { -- pGet = (struct nwd_get_def_name_ctx *) reply->data; -+ pGet = (struct nwd_get_def_name_ctx *)reply->data; - -- DbgPrint("retCode=0x%x uNameLength1=%d uNameLength2=%d", -- retCode, pGet->uNameLength, -- xplatCall.uNameLength); -+ DbgPrint("retCode=0x%x uNameLength1=%d uNameLength2=%d", retCode, pGet->uNameLength, xplatCall.uNameLength); - if (xplatCall.uNameLength < pGet->uNameLength) { -- pGet->uNameLength = -- xplatCall.uNameLength; -+ pGet->uNameLength = xplatCall.uNameLength; - retCode = NWE_BUFFER_OVERFLOW; - } - dPtr = (char *)pGet + pGet->NameContextOffset; -- cpylen = -- copy_to_user(xplatCall.pNameContext, dPtr, -- pGet->uNameLength); -+ cpylen = copy_to_user(xplatCall.pNameContext, dPtr, pGet->uNameLength); - } - - kfree(reply); -@@ -1504,8 +1395,7 @@ int novfs_query_feature(struct novfs_xpl - int status = 0; - unsigned long cpylen; - -- cpylen = -- copy_from_user(&xpCall, pdata->reqData, sizeof(struct nwc_query_feature)); -+ cpylen = copy_from_user(&xpCall, pdata->reqData, sizeof(struct nwc_query_feature)); - switch (xpCall.Feature) { - case NWC_FEAT_NDS: - case NWC_FEAT_NDS_MTREE: -@@ -1518,8 +1408,7 @@ int novfs_query_feature(struct novfs_xpl - return (status); - } - --int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, -- struct novfs_schandle Session) -+int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) - { - struct novfs_xplat_call_request *cmd = NULL; - struct novfs_xplat_call_reply *reply = NULL; -@@ -1528,9 +1417,7 @@ int novfs_get_tree_monitored_conn(struct - char *dPtr = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, -- sizeof(struct nwc_get_tree_monitored_conn_ref)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_get_tree_monitored_conn_ref)); - if (!access_ok(VERIFY_READ, xplatCall.pTreeName, sizeof(struct nwc_string))) - return -EINVAL; - if (xplatCall.pTreeName->DataLen > NW_MAX_TREE_NAME_LEN) -@@ -1547,26 +1434,19 @@ int novfs_get_tree_monitored_conn(struct - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_GET_TREE_MONITORED_CONN_REF; - -- pDConnRef = (struct nwd_get_tree_monitored_conn_ref *) cmd->data; -+ pDConnRef = (struct nwd_get_tree_monitored_conn_ref *)cmd->data; - pDConnRef->TreeName.boffset = sizeof(*pDConnRef); - pDConnRef->TreeName.len = xplatCall.pTreeName->DataLen; - pDConnRef->TreeName.type = xplatCall.pTreeName->DataType; - - dPtr = cmd->data + sizeof(*pDConnRef); -- cpylen = -- copy_from_user(dPtr, xplatCall.pTreeName->pBuffer, -- pDConnRef->TreeName.len); -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ cpylen = copy_from_user(dPtr, xplatCall.pTreeName->pBuffer, pDConnRef->TreeName.len); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { -- pDConnRef = (struct nwd_get_tree_monitored_conn_ref *) reply->data; -+ pDConnRef = (struct nwd_get_tree_monitored_conn_ref *)reply->data; - dPtr = reply->data + pDConnRef->TreeName.boffset; -- p = (struct nwc_get_tree_monitored_conn_ref *) pdata->reqData; -- cpylen = -- copy_to_user(&p->uConnReference, -- &pDConnRef->uConnReference, 4); -+ p = (struct nwc_get_tree_monitored_conn_ref *)pdata->reqData; -+ cpylen = copy_to_user(&p->uConnReference, &pDConnRef->uConnReference, 4); - - status = reply->Reply.ErrorCode; - kfree(reply); -@@ -1585,9 +1465,7 @@ int novfs_enum_ids(struct novfs_xplat *p - char *str = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, -- sizeof(struct nwc_enum_ids)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_enum_ids)); - datalen = sizeof(*pEnum); - cmdlen = datalen + sizeof(*cmd); - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -1604,66 +1482,40 @@ int novfs_enum_ids(struct novfs_xplat *p - __DbgPrint(" iterator = %x\n", xplatCall.Iterator); - __DbgPrint(" cmdlen = %d\n", cmdlen); - -- pEnum = (struct nwd_enum_ids *) cmd->data; -+ pEnum = (struct nwd_enum_ids *)cmd->data; - pEnum->Iterator = xplatCall.Iterator; -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - status = reply->Reply.ErrorCode; - - eId = pdata->repData; -- pEnum = (struct nwd_enum_ids *) reply->data; -- cpylen = -- copy_to_user(&eId->Iterator, &pEnum->Iterator, -- sizeof(pEnum->Iterator)); -- DbgPrint("[XPLAT NWCAPI] Found AuthId 0x%X", -- pEnum->AuthenticationId); -- cpylen = -- copy_to_user(&eId->AuthenticationId, -- &pEnum->AuthenticationId, -- sizeof(pEnum->AuthenticationId)); -- cpylen = -- copy_to_user(&eId->AuthType, &pEnum->AuthType, -- sizeof(pEnum->AuthType)); -- cpylen = -- copy_to_user(&eId->IdentityFlags, -- &pEnum->IdentityFlags, -- sizeof(pEnum->IdentityFlags)); -- cpylen = -- copy_to_user(&eId->NameType, &pEnum->NameType, -- sizeof(pEnum->NameType)); -- cpylen = -- copy_to_user(&eId->ObjectType, &pEnum->ObjectType, -- sizeof(pEnum->ObjectType)); -+ pEnum = (struct nwd_enum_ids *)reply->data; -+ cpylen = copy_to_user(&eId->Iterator, &pEnum->Iterator, sizeof(pEnum->Iterator)); -+ DbgPrint("[XPLAT NWCAPI] Found AuthId 0x%X", pEnum->AuthenticationId); -+ cpylen = copy_to_user(&eId->AuthenticationId, &pEnum->AuthenticationId, sizeof(pEnum->AuthenticationId)); -+ cpylen = copy_to_user(&eId->AuthType, &pEnum->AuthType, sizeof(pEnum->AuthType)); -+ cpylen = copy_to_user(&eId->IdentityFlags, &pEnum->IdentityFlags, sizeof(pEnum->IdentityFlags)); -+ cpylen = copy_to_user(&eId->NameType, &pEnum->NameType, sizeof(pEnum->NameType)); -+ cpylen = copy_to_user(&eId->ObjectType, &pEnum->ObjectType, sizeof(pEnum->ObjectType)); - - if (!status) { -- cpylen = -- copy_from_user(&xferStr, eId->pDomainName, -- sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&xferStr, eId->pDomainName, sizeof(struct nwc_string)); - if (pEnum->domainNameOffset >= reply->dataLen) { - status = -EINVAL; - goto out; - } -- str = -- (char *)((char *)reply->data + -- pEnum->domainNameOffset); -+ str = (char *)((char *)reply->data + pEnum->domainNameOffset); - DbgPrint("[XPLAT NWCAPI] Found Domain %s", str); - if (pEnum->domainNameLen > reply->dataLen - pEnum->domainNameOffset) { - status = -EINVAL; - goto out; - } -- cpylen = -- copy_to_user(xferStr.pBuffer, str, -- pEnum->domainNameLen); -+ cpylen = copy_to_user(xferStr.pBuffer, str, pEnum->domainNameLen); - xferStr.DataType = NWC_STRING_TYPE_ASCII; - xferStr.DataLen = pEnum->domainNameLen - 1; -- cpylen = copy_to_user(eId->pDomainName, &xferStr, -- sizeof(struct nwc_string)); -+ cpylen = copy_to_user(eId->pDomainName, &xferStr, sizeof(struct nwc_string)); - -- cpylen = copy_from_user(&xferStr, eId->pObjectName, -- sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&xferStr, eId->pObjectName, sizeof(struct nwc_string)); - if (pEnum->objectNameOffset >= reply->dataLen) { - status = -EINVAL; - goto out; -@@ -1704,14 +1556,12 @@ int novfs_change_auth_key(struct novfs_x - return -EINVAL; - if (xplatCall.pDomainName->DataLen > MAX_DOMAIN_LEN || - xplatCall.pObjectName->DataLen > MAX_OBJECT_NAME_LENGTH || -- xplatCall.pNewPassword->DataLen > MAX_PASSWORD_LENGTH || -- xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) -+ xplatCall.pNewPassword->DataLen > MAX_PASSWORD_LENGTH || xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) - return -EINVAL; - - datalen = - sizeof(struct nwd_change_key) + xplatCall.pDomainName->DataLen + -- xplatCall.pObjectName->DataLen + xplatCall.pNewPassword->DataLen + -- xplatCall.pVerifyPassword->DataLen; -+ xplatCall.pObjectName->DataLen + xplatCall.pNewPassword->DataLen + xplatCall.pVerifyPassword->DataLen; - - cmdlen = sizeof(*cmd) + datalen; - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -1719,7 +1569,7 @@ int novfs_change_auth_key(struct novfs_x - if (!cmd) - return -ENOMEM; - -- pNewKey = (struct nwd_change_key *) cmd->data; -+ pNewKey = (struct nwd_change_key *)cmd->data; - cmd->dataLen = datalen; - cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; -@@ -1783,9 +1633,7 @@ int novfs_change_auth_key(struct novfs_x - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->verifyPasswordLen = xferStr.DataLen; - -- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - status = reply->Reply.ErrorCode; - -@@ -1805,9 +1653,7 @@ int novfs_set_pri_conn(struct novfs_xpla - struct nwd_set_primary_conn *pConn = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, -- sizeof(struct nwc_set_primary_conn)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_primary_conn)); - - datalen = sizeof(struct nwd_set_primary_conn); - cmdlen = sizeof(*cmd) + datalen; -@@ -1815,17 +1661,14 @@ int novfs_set_pri_conn(struct novfs_xpla - if (!cmd) - return -ENOMEM; - -- pConn = (struct nwd_set_primary_conn *) cmd->data; -+ pConn = (struct nwd_set_primary_conn *)cmd->data; - cmd->dataLen = datalen; - cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_SET_PRIMARY_CONN; -- pConn->ConnHandle = (void *) (unsigned long) xplatCall.ConnHandle; -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ pConn->ConnHandle = (void *)(unsigned long)xplatCall.ConnHandle; -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - status = reply->Reply.ErrorCode; -@@ -1841,7 +1684,7 @@ int novfs_get_pri_conn(struct novfs_xpla - struct novfs_xplat_call_reply *reply = NULL; - unsigned long status = -ENOMEM, cmdlen, replylen, cpylen; - -- cmdlen = (unsigned long) (&((struct novfs_xplat_call_request *) 0)->data); -+ cmdlen = (unsigned long)(&((struct novfs_xplat_call_request *)0)->data); - - cmd.dataLen = 0; - cmd.Command.CommandType = VFS_COMMAND_XPLAT_CALL; -@@ -1849,16 +1692,12 @@ int novfs_get_pri_conn(struct novfs_xpla - cmd.Command.SessionId = Session; - cmd.NwcCommand = NWC_GET_PRIMARY_CONN; - -- status = -- Queue_Daemon_Command((void *)&cmd, cmdlen, NULL, 0, (void **)&reply, -- &replylen, INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)&cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - status = reply->Reply.ErrorCode; - if (!status) { -- cpylen = -- copy_to_user(pdata->repData, reply->data, -- sizeof(unsigned long)); -+ cpylen = copy_to_user(pdata->repData, reply->data, sizeof(unsigned long)); - } - - kfree(reply); -@@ -1881,13 +1720,11 @@ int novfs_set_map_drive(struct novfs_xpl - return -EFAULT; - if (symInfo.dirPathOffsetLength > MAX_OFFSET_LEN || symInfo.linkOffsetLength > MAX_OFFSET_LEN) - return -EINVAL; -- datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength + -- symInfo.linkOffsetLength; -+ datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength + symInfo.linkOffsetLength; - - __DbgPrint(" cmdlen = %d\n", cmdlen); - __DbgPrint(" dataLen = %d\n", datalen); -- __DbgPrint(" symInfo.dirPathOffsetLength = %d\n", -- symInfo.dirPathOffsetLength); -+ __DbgPrint(" symInfo.dirPathOffsetLength = %d\n", symInfo.dirPathOffsetLength); - __DbgPrint(" symInfo.linkOffsetLength = %d\n", symInfo.linkOffsetLength); - __DbgPrint(" pdata->datalen = %d\n", pdata->reqLen); - -@@ -1909,10 +1746,7 @@ int novfs_set_map_drive(struct novfs_xpl - kfree(cmd); - return -EFAULT; - } -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - status = reply->Reply.ErrorCode; -@@ -1950,9 +1784,7 @@ int novfs_unmap_drive(struct novfs_xplat - cmd->NwcCommand = NWC_UNMAP_DRIVE; - - cpylen = copy_from_user(cmd->data, pdata->reqData, datalen); -- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - status = reply->Reply.ErrorCode; -@@ -1982,29 +1814,23 @@ int novfs_enum_drives(struct novfs_xplat - cmd->Command.SequenceNumber = 0; - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_ENUMERATE_DRIVES; -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - status = reply->Reply.ErrorCode; - DbgPrint("Status Code = 0x%X", status); - if (!status) { -- offset = sizeof(((struct nwc_get_mapped_drives *) pdata-> -- repData)->MapBuffLen); -+ offset = sizeof(((struct nwc_get_mapped_drives *) pdata->repData)->MapBuffLen); - cp = reply->data; -- replylen = ((struct nwc_get_mapped_drives *) pdata->repData)->MapBuffLen; -+ replylen = ((struct nwc_get_mapped_drives *)pdata->repData)->MapBuffLen; - if (offset > reply->dataLen) { - status = -EINVAL; - goto out; - } - cpylen = copy_to_user(pdata->repData, cp, offset); - cp += offset; -- cpylen = copy_to_user(((struct nwc_get_mapped_drives *) pdata-> -- repData)->MapBuffer, cp, -- min(replylen - offset, -- reply->dataLen - offset)); -+ cpylen = copy_to_user(((struct nwc_get_mapped_drives *)pdata->repData)->MapBuffer, cp, -+ min(replylen - offset, reply->dataLen - offset)); - } - } - out: -@@ -2034,13 +1860,10 @@ int novfs_get_bcast_msg(struct novfs_xpl - cmd->Command.SessionId = Session; - - cmd->NwcCommand = NWC_GET_BROADCAST_MESSAGE; -- dmsg = (struct nwd_get_bcast_notification *) cmd->data; -- dmsg->uConnReference = (void *) (unsigned long) msg.uConnReference; -+ dmsg = (struct nwd_get_bcast_notification *)cmd->data; -+ dmsg->uConnReference = (void *)(unsigned long)msg.uConnReference; - -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - - if (reply) { - status = reply->Reply.ErrorCode; -@@ -2048,7 +1871,7 @@ int novfs_get_bcast_msg(struct novfs_xpl - if (!status) { - char *cp = pdata->repData; - -- dmsg = (struct nwd_get_bcast_notification *) reply->data; -+ dmsg = (struct nwd_get_bcast_notification *)reply->data; - if (pdata->repLen < dmsg->messageLen) { - dmsg->messageLen = pdata->repLen; - } -@@ -2085,7 +1908,7 @@ int novfs_set_key_value(struct novfs_xpl - - if (cstrObjectName.DataLen > MAX_OBJECT_NAME_LENGTH || cstrPassword.DataLen > MAX_PASSWORD_LENGTH) - return -EINVAL; -- datalen = sizeof(struct nwd_set_key ) + cstrObjectName.DataLen + cstrPassword.DataLen; -+ datalen = sizeof(struct nwd_set_key) + cstrObjectName.DataLen + cstrPassword.DataLen; - - cmdlen = sizeof(*cmd) + datalen; - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -2093,7 +1916,7 @@ int novfs_set_key_value(struct novfs_xpl - if (!cmd) - return -ENOMEM; - -- pNewKey = (struct nwd_set_key *) cmd->data; -+ pNewKey = (struct nwd_set_key *)cmd->data; - cmd->dataLen = datalen; - cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; -@@ -2102,19 +1925,17 @@ int novfs_set_key_value(struct novfs_xpl - - pNewKey->ObjectType = xplatCall.ObjectType; - pNewKey->AuthenticationId = xplatCall.AuthenticationId; -- pNewKey->ConnHandle = (void *) (unsigned long) xplatCall.ConnHandle; -+ pNewKey->ConnHandle = (void *)(unsigned long)xplatCall.ConnHandle; - str = (char *)pNewKey; - - /* - * Get the User Name - */ -- str += sizeof(struct nwd_set_key ); -- cpylen = -- copy_from_user(str, cstrObjectName.pBuffer, -- cstrObjectName.DataLen); -+ str += sizeof(struct nwd_set_key); -+ cpylen = copy_from_user(str, cstrObjectName.pBuffer, cstrObjectName.DataLen); - - str += pNewKey->objectNameLen = cstrObjectName.DataLen; -- pNewKey->objectNameOffset = sizeof(struct nwd_set_key ); -+ pNewKey->objectNameOffset = sizeof(struct nwd_set_key); - - /* - * Get the Verify Password -@@ -2124,9 +1945,7 @@ int novfs_set_key_value(struct novfs_xpl - pNewKey->newPasswordLen = cstrPassword.DataLen; - pNewKey->newPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; - -- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - status = reply->Reply.ErrorCode; - kfree(reply); -@@ -2164,7 +1983,7 @@ int novfs_verify_key_value(struct novfs_ - if (!cmd) - return -ENOMEM; - -- pNewKey = (struct nwd_verify_key *) cmd->data; -+ pNewKey = (struct nwd_verify_key *)cmd->data; - cmd->dataLen = datalen; - cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; -@@ -2180,9 +1999,7 @@ int novfs_verify_key_value(struct novfs_ - * Get the tree name - */ - str += sizeof(*pNewKey); -- cpylen = -- copy_from_user(&xferStr, xplatCall.pDomainName, -- sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&xferStr, xplatCall.pDomainName, sizeof(struct nwc_string)); - pNewKey->domainNameOffset = sizeof(*pNewKey); - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->domainNameLen = xferStr.DataLen; -@@ -2191,8 +2008,7 @@ int novfs_verify_key_value(struct novfs_ - * Get the User Name - */ - str += pNewKey->domainNameLen; -- cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, -- sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, sizeof(struct nwc_string)); - pNewKey->objectNameOffset = pNewKey->domainNameOffset + pNewKey->domainNameLen; - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->objectNameLen = xferStr.DataLen; -@@ -2201,16 +2017,12 @@ int novfs_verify_key_value(struct novfs_ - * Get the Verify Password - */ - str += pNewKey->objectNameLen; -- cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, -- sizeof(struct nwc_string)); -- pNewKey->verifyPasswordOffset = -- pNewKey->objectNameOffset + pNewKey->objectNameLen; -+ cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, sizeof(struct nwc_string)); -+ pNewKey->verifyPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->verifyPasswordLen = xferStr.DataLen; - -- status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -- (void **)&reply, &replylen, -- INTERRUPTIBLE); -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, INTERRUPTIBLE); - if (reply) { - status = reply->Reply.ErrorCode; - kfree(reply); -@@ -2218,4 +2030,3 @@ int novfs_verify_key_value(struct novfs_ - kfree(cmd); - return (status); - } -- ---- a/fs/novfs/nwcapi.h -+++ b/fs/novfs/nwcapi.h -@@ -14,7 +14,7 @@ - #ifndef __NWCLNX_H__ - #define __NWCLNX_H__ - --#if 0 //sgled hack -+#if 0 //sgled hack - #else //sgled hack (up to endif) - - #define NW_MAX_TREE_NAME_LEN 33 -@@ -120,8 +120,7 @@ struct novfs_xplat { - - #if 0 - N_EXTERN_LIBRARY(NWRCODE) -- NWCLnxReq -- (nuint32 request, nptr pInBuf, nuint32 inLen, nptr pOutBuf, nuint32 outLen); -+ NWCLnxReq(nuint32 request, nptr pInBuf, nuint32 inLen, nptr pOutBuf, nuint32 outLen); - #endif - // - // Network Name Format Type -@@ -302,13 +301,12 @@ N_EXTERN_LIBRARY(NWRCODE) - #define MIN_NUM_REPLIES 1 - #define MAX_NUM_REQUESTS 4096 - #define MIN_NUM_REQUESTS 1 --#define MAX_FRAG_SIZE 4096 -+#define MAX_FRAG_SIZE 4096 - #define MIN_FRAG_SIZE 1 - #define MAX_INFO_LEN 4096 - #define MAX_DOMAIN_LEN MAX_NETWORK_NAME_LENGTH - #define MAX_OFFSET_LEN 4096 - -- - // - // Flags for the GetBroadcastMessage API - // -@@ -338,7 +336,6 @@ N_EXTERN_LIBRARY(NWRCODE) - - //===[ Type definitions ]================================================== - -- - // - // Structure for defining what a transport - // address looks like -@@ -350,7 +347,6 @@ struct nwc_tran_addr { - unsigned char *puAddress; - }; - -- - struct nwc_conn_string { - char *pString; - u32 uStringType; -@@ -501,7 +497,6 @@ struct nwc_convert_netware_handle { - u32 uFileSize; - }; - -- - //++======================================================================= - // API Name: NwcGetConnInfo - // -@@ -617,7 +612,6 @@ struct nwc_get_tree_monitored_conn_ref { - - }; - -- - //++======================================================================= - // API Name: NwcGetPreferredDsTree - // -@@ -678,7 +672,6 @@ struct nwc_license_conn { - u32 ConnHandle; - }; - -- - //++======================================================================= - // API Name: NWCGetMappedDrives - // -@@ -1171,7 +1164,6 @@ struct nwc_set_primary_conn { - - }; - -- - //++======================================================================= - // API Name: NwcQueryFeature - // -@@ -1316,7 +1308,6 @@ struct nwc_login_id { - - }; - -- - //++======================================================================= - // API Name: NWCSetPassword - // -@@ -1401,7 +1392,6 @@ struct nwc_auth_with_id { - - }; - -- - struct nwc_unmap_drive_ex { - // unsigned long connHdl; - unsigned int linkLen; ---- a/fs/novfs/nwerror.h -+++ b/fs/novfs/nwerror.h -@@ -14,7 +14,6 @@ - #ifndef __NOVFS_ERROR_H - #define __NOVFS_ERROR_H - -- - /* - * Network errors - * Decimal values at end of line are 32768 lower than actual -@@ -655,4 +654,4 @@ - #define NWE_LOCK_ERROR 0x89FF // 255 - #define NWE_FAILURE 0x89FF // 255 Generic Failure - --#endif /* __NOVFS_ERROR_H */ -+#endif /* __NOVFS_ERROR_H */ ---- a/fs/novfs/proc.c -+++ b/fs/novfs/proc.c -@@ -47,9 +47,7 @@ static int Novfs_Get_Version(char *page, - if (novfs_current_mnt) { - i = strlen(novfs_current_mnt); - if ((i > 0) && i < (count - len)) { -- len += -- sprintf(buf + len, "Novfs mount=%s\n", -- novfs_current_mnt); -+ len += sprintf(buf + len, "Novfs mount=%s\n", novfs_current_mnt); - } - } - DbgPrint("%s", buf); -@@ -69,9 +67,7 @@ int novfs_proc_init(void) - - if (Novfs_Control) { - Novfs_Control->size = 0; -- memcpy(&novfs_daemon_proc_fops, -- Novfs_Control->proc_fops, -- sizeof(struct file_operations)); -+ memcpy(&novfs_daemon_proc_fops, Novfs_Control->proc_fops, sizeof(struct file_operations)); - - /* - * Setup our functions -@@ -96,8 +92,7 @@ int novfs_proc_init(void) - /* - * Setup our file functions - */ -- memcpy(&novfs_lib_proc_fops, Novfs_Library->proc_fops, -- sizeof(struct file_operations)); -+ memcpy(&novfs_lib_proc_fops, Novfs_Library->proc_fops, sizeof(struct file_operations)); - novfs_lib_proc_fops.owner = THIS_MODULE; - novfs_lib_proc_fops.open = novfs_daemon_lib_open; - novfs_lib_proc_fops.release = novfs_daemon_lib_close; -@@ -112,9 +107,7 @@ int novfs_proc_init(void) - return (-ENOENT); - } - -- Novfs_Version = -- create_proc_read_entry("Version", 0444, novfs_procfs_dir, -- Novfs_Get_Version, NULL); -+ Novfs_Version = create_proc_read_entry("Version", 0444, novfs_procfs_dir, Novfs_Get_Version, NULL); - if (Novfs_Version) { - Novfs_Version->size = 0; - } else { -@@ -141,8 +134,7 @@ void novfs_proc_exit(void) - DbgPrint("remove_proc_entry(Library, NULL)\n"); - remove_proc_entry("Library", novfs_procfs_dir); - -- DbgPrint("remove_proc_entry(%s, NULL)\n", -- MODULE_NAME); -+ DbgPrint("remove_proc_entry(%s, NULL)\n", MODULE_NAME); - remove_proc_entry(MODULE_NAME, NULL); - - DbgPrint("done\n"); ---- a/fs/novfs/profile.c -+++ b/fs/novfs/profile.c -@@ -62,7 +62,7 @@ static struct proc_dir_entry *inode_file - - static DECLARE_MUTEX(LocalPrint_lock); - --static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) -+static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user * buf, size_t nbytes, loff_t * ppos) - { - ssize_t retval = nbytes; - u_char *lbuf, *p; -@@ -101,8 +101,7 @@ static ssize_t User_proc_write_DbgBuffer - } else if (!strcmp("novfsd", lbuf)) { - novfs_daemon_debug_cmd_send(p); - } else if (!strcmp("file_update_timeout", lbuf)) { -- novfs_update_timeout = -- simple_strtoul(p, NULL, 0); -+ novfs_update_timeout = simple_strtoul(p, NULL, 0); - } else if (!strcmp("cache", lbuf)) { - if (!strcmp("on", p)) { - novfs_page_cache = 1; -@@ -134,9 +133,7 @@ static ssize_t User_proc_read_DbgBuffer( - count = nbytes; - } - -- count -= -- copy_to_user(buf, &DbgPrintBuffer[DbgPrintBufferReadOffset], -- count); -+ count -= copy_to_user(buf, &DbgPrintBuffer[DbgPrintBufferReadOffset], count); - - if (count == 0) { - if (retval == 0) -@@ -144,8 +141,7 @@ static ssize_t User_proc_read_DbgBuffer( - } else { - DbgPrintBufferReadOffset += count; - if (DbgPrintBufferReadOffset >= DbgPrintBufferOffset) { -- DbgPrintBufferOffset = -- DbgPrintBufferReadOffset = 0; -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; - } - retval = count; - } -@@ -158,7 +154,8 @@ static int proc_read_DbgBuffer(char *pag - { - int len; - -- printk(KERN_ALERT "proc_read_DbgBuffer: off=%ld count=%d DbgPrintBufferOffset=%lu DbgPrintBufferReadOffset=%lu\n", off, count, DbgPrintBufferOffset, DbgPrintBufferReadOffset); -+ printk(KERN_ALERT "proc_read_DbgBuffer: off=%ld count=%d DbgPrintBufferOffset=%lu DbgPrintBufferReadOffset=%lu\n", off, -+ count, DbgPrintBufferOffset, DbgPrintBufferReadOffset); - - len = DbgPrintBufferOffset - DbgPrintBufferReadOffset; - -@@ -187,9 +184,7 @@ static int LocalPrint(char *Fmt, ...) - - if (DbgPrintBuffer) { - va_start(args, Fmt); -- len += vsnprintf(DbgPrintBuffer + DbgPrintBufferOffset, -- DbgPrintBufferSize - DbgPrintBufferOffset, -- Fmt, args); -+ len += vsnprintf(DbgPrintBuffer + DbgPrintBufferOffset, DbgPrintBufferSize - DbgPrintBufferOffset, Fmt, args); - DbgPrintBufferOffset += len; - } - -@@ -209,8 +204,7 @@ int ___DbgPrint(const char *site, const - if (buf) { - va_start(args, Fmt); - len = snprintf(buf, DBG_BUFFER_SIZE, "[%d] %s ", current->pid, site); -- len += vsnprintf(buf + len, DBG_BUFFER_SIZE - len, Fmt, -- args); -+ len += vsnprintf(buf + len, DBG_BUFFER_SIZE - len, Fmt, args); - if (-1 == len) { - len = DBG_BUFFER_SIZE - 1; - buf[len] = '\0'; -@@ -226,25 +220,18 @@ int ___DbgPrint(const char *site, const - } - - if (DbgPrintBuffer && DbgPrintOn) { -- if ((DbgPrintBufferOffset + len) > -- DbgPrintBufferSize) { -+ if ((DbgPrintBufferOffset + len) > DbgPrintBufferSize) { - offset = DbgPrintBufferOffset; - DbgPrintBufferOffset = 0; -- memset(&DbgPrintBuffer[offset], -- 0, -- DbgPrintBufferSize - -- offset); -+ memset(&DbgPrintBuffer[offset], 0, DbgPrintBufferSize - offset); - } - - mb(); - -- if ((DbgPrintBufferOffset + len) < -- DbgPrintBufferSize) { -+ if ((DbgPrintBufferOffset + len) < DbgPrintBufferSize) { - DbgPrintBufferOffset += len; -- offset = -- DbgPrintBufferOffset - len; -- memcpy(&DbgPrintBuffer[offset], -- buf, len + 1); -+ offset = DbgPrintBufferOffset - len; -+ memcpy(&DbgPrintBuffer[offset], buf, len + 1); - } - } - } -@@ -317,8 +304,7 @@ static void NovfsGregorianDay(struct loc - int leapsToDate; - int lastYear; - int day; -- int MonthOffset[] = -- { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; -+ int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; - - lastYear = tm->tm_year - 1; - -@@ -333,9 +319,7 @@ static void NovfsGregorianDay(struct loc - * - * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be - */ -- if ((tm->tm_year % 4 == 0) && -- ((tm->tm_year % 100 != 0) || (tm->tm_year % 400 == 0)) && -- (tm->tm_mon > 2)) { -+ if ((tm->tm_year % 4 == 0) && ((tm->tm_year % 100 != 0) || (tm->tm_year % 400 == 0)) && (tm->tm_mon > 2)) { - /* - * We are past Feb. 29 in a leap year - */ -@@ -344,8 +328,7 @@ static void NovfsGregorianDay(struct loc - day = 0; - } - -- day += lastYear * 365 + leapsToDate + MonthOffset[tm->tm_mon - 1] + -- tm->tm_mday; -+ day += lastYear * 365 + leapsToDate + MonthOffset[tm->tm_mon - 1] + tm->tm_mday; - - tm->tm_wday = day % 7; - } -@@ -388,17 +371,15 @@ static void private_to_tm(int tim, struc - char *ctime_r(time_t * clock, char *buf) - { - struct local_rtc_time tm; -- static char *DAYOFWEEK[] = -- { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; -- static char *MONTHOFYEAR[] = -- { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", --"Oct", "Nov", "Dec" }; -+ static char *DAYOFWEEK[] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; -+ static char *MONTHOFYEAR[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", -+ "Oct", "Nov", "Dec" -+ }; - - private_to_tm(*clock, &tm); - - sprintf(buf, "%s %s %d %d:%02d:%02d %d", DAYOFWEEK[tm.tm_wday], -- MONTHOFYEAR[tm.tm_mon - 1], tm.tm_mday, tm.tm_hour, tm.tm_min, -- tm.tm_sec, tm.tm_year); -+ MONTHOFYEAR[tm.tm_mon - 1], tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year); - return (buf); - } - -@@ -421,8 +402,7 @@ static void dump(struct dentry *parent, - } - - if (parent) { -- pfunc("starting 0x%p %.*s\n", parent, parent->d_name.len, -- parent->d_name.name); -+ pfunc("starting 0x%p %.*s\n", parent, parent->d_name.len, parent->d_name.name); - if (parent->d_subdirs.next == &parent->d_subdirs) { - pfunc("No children...\n"); - } else { -@@ -434,18 +414,13 @@ static void dump(struct dentry *parent, - while (l) { - p = l->dentry->d_subdirs.next; - while (p != &l->dentry->d_subdirs) { -- d = list_entry(p, struct dentry, -- d_u.d_child); -+ d = list_entry(p, struct dentry, d_u.d_child); - p = p->next; - -- if (d->d_subdirs.next != -- &d->d_subdirs) { -- n = kmalloc(sizeof -- (*n), -- GFP_KERNEL); -+ if (d->d_subdirs.next != &d->d_subdirs) { -+ n = kmalloc(sizeof(*n), GFP_KERNEL); - if (n) { -- n->next = -- l->next; -+ n->next = l->next; - l->next = n; - n->dentry = d; - } -@@ -461,21 +436,11 @@ static void dump(struct dentry *parent, - " d_subdirs: 0x%p\n" - " d_inode: 0x%p\n", - d, path, -- d->d_name. -- len, -- d->d_name. -- name, -- d-> -- d_parent, -+ d->d_name.len, -+ d->d_name.name, -+ d->d_parent, - atomic_read -- (&d-> -- d_count), -- d->d_flags, -- d-> -- d_subdirs. -- next, -- d-> -- d_inode); -+ (&d->d_count), d->d_flags, d->d_subdirs.next, d->d_inode); - } - } - } -@@ -484,22 +449,15 @@ static void dump(struct dentry *parent, - l = start; - while (l) { - d = l->dentry; -- path = -- novfs_scope_dget_path(d, buf, -- PATH_LENGTH_BUFFER, -- 1); -+ path = novfs_scope_dget_path(d, buf, PATH_LENGTH_BUFFER, 1); - if (path) { - sd = " (None)"; -- if (&d->d_subdirs != -- d->d_subdirs.next) { -+ if (&d->d_subdirs != d->d_subdirs.next) { - sd = ""; - } - inode_number[0] = '\0'; - if (d->d_inode) { -- sprintf(inode_number, -- " (%lu)", -- d->d_inode-> -- i_ino); -+ sprintf(inode_number, " (%lu)", d->d_inode->i_ino); - } - pfunc("0x%p %s\n" - " d_parent: 0x%p\n" -@@ -509,9 +467,7 @@ static void dump(struct dentry *parent, - " d_inode: 0x%p%s\n", - d, path, d->d_parent, - atomic_read(&d->d_count), -- d->d_flags, -- d->d_subdirs.next, sd, -- d->d_inode, inode_number); -+ d->d_flags, d->d_subdirs.next, sd, d->d_inode, inode_number); - } - - n = l; -@@ -550,8 +506,7 @@ static ssize_t common_read(char *buf, si - - } - --static ssize_t novfs_profile_read_inode(struct file * file, char *buf, size_t len, -- loff_t * off) -+static ssize_t novfs_profile_read_inode(struct file *file, char *buf, size_t len, loff_t * off) - { - ssize_t retval = 0; - unsigned long offset = *off; -@@ -566,7 +521,6 @@ static ssize_t novfs_profile_read_inode( - novfs_dump_inode(LocalPrint); - } - -- - retval = common_read(buf, len, off); - - if (0 == retval) { -@@ -580,8 +534,7 @@ static ssize_t novfs_profile_read_inode( - - } - --static ssize_t novfs_profile_dentry_read(struct file * file, char *buf, size_t len, -- loff_t * off) -+static ssize_t novfs_profile_dentry_read(struct file *file, char *buf, size_t len, loff_t * off) - { - ssize_t retval = 0; - unsigned long offset = *off; -@@ -630,18 +583,12 @@ void novfs_profile_init() - dbg_dir = proc_mkdir(MODULE_NAME, NULL); - - if (dbg_dir) { -- dbg_file = create_proc_read_entry("Debug", -- 0600, -- dbg_dir, -- proc_read_DbgBuffer, NULL); -+ dbg_file = create_proc_read_entry("Debug", 0600, dbg_dir, proc_read_DbgBuffer, NULL); - if (dbg_file) { - dbg_file->size = DBGBUFFERSIZE; -- memcpy(&Dbg_proc_file_operations, dbg_file->proc_fops, -- sizeof(struct file_operations)); -- Dbg_proc_file_operations.read = -- User_proc_read_DbgBuffer; -- Dbg_proc_file_operations.write = -- User_proc_write_DbgBuffer; -+ memcpy(&Dbg_proc_file_operations, dbg_file->proc_fops, sizeof(struct file_operations)); -+ Dbg_proc_file_operations.read = User_proc_read_DbgBuffer; -+ Dbg_proc_file_operations.write = User_proc_write_DbgBuffer; - dbg_file->proc_fops = &Dbg_proc_file_operations; - } else { - remove_proc_entry(MODULE_NAME, NULL); -@@ -655,22 +602,16 @@ void novfs_profile_init() - inode_file = create_proc_entry("inode", 0600, dbg_dir); - if (inode_file) { - inode_file->size = 0; -- memcpy(&inode_proc_file_ops, -- inode_file->proc_fops, -- sizeof(struct file_operations)); -+ memcpy(&inode_proc_file_ops, inode_file->proc_fops, sizeof(struct file_operations)); - inode_proc_file_ops.owner = THIS_MODULE; -- inode_proc_file_ops.read = -- novfs_profile_read_inode; -+ inode_proc_file_ops.read = novfs_profile_read_inode; - inode_file->proc_fops = &inode_proc_file_ops; - } - -- dentry_file = create_proc_entry("dentry", -- 0600, dbg_dir); -+ dentry_file = create_proc_entry("dentry", 0600, dbg_dir); - if (dentry_file) { - dentry_file->size = 0; -- memcpy(&dentry_proc_file_ops, -- dentry_file->proc_fops, -- sizeof(struct file_operations)); -+ memcpy(&dentry_proc_file_ops, dentry_file->proc_fops, sizeof(struct file_operations)); - dentry_proc_file_ops.owner = THIS_MODULE; - dentry_proc_file_ops.read = novfs_profile_dentry_read; - dentry_file->proc_fops = &dentry_proc_file_ops; -@@ -686,19 +627,14 @@ void novfs_profile_init() - void novfs_profile_exit(void) - { - if (dbg_file) -- DbgPrint("Calling remove_proc_entry(Debug, NULL)\n"), -- remove_proc_entry("Debug", dbg_dir); -+ DbgPrint("Calling remove_proc_entry(Debug, NULL)\n"), remove_proc_entry("Debug", dbg_dir); - if (inode_file) -- DbgPrint("Calling remove_proc_entry(inode, NULL)\n"), -- remove_proc_entry("inode", dbg_dir); -+ DbgPrint("Calling remove_proc_entry(inode, NULL)\n"), remove_proc_entry("inode", dbg_dir); - if (dentry_file) -- DbgPrint("Calling remove_proc_entry(dentry, NULL)\n"), -- remove_proc_entry("dentry", dbg_dir); -+ DbgPrint("Calling remove_proc_entry(dentry, NULL)\n"), remove_proc_entry("dentry", dbg_dir); - - if (dbg_dir && (dbg_dir != novfs_procfs_dir)) { - DbgPrint("Calling remove_proc_entry(%s, NULL)\n", MODULE_NAME); - remove_proc_entry(MODULE_NAME, NULL); - } - } -- -- ---- a/fs/novfs/scope.c -+++ b/fs/novfs/scope.c -@@ -33,7 +33,6 @@ - #define CLEANUP_INTERVAL 10 - #define MAX_USERNAME_LENGTH 32 - -- - static struct list_head Scope_List; - static struct semaphore Scope_Lock; - static struct semaphore Scope_Thread_Delay; -@@ -41,16 +40,14 @@ static int Scope_Thread_Terminate = 0; - static struct timer_list Scope_Timer; - static unsigned int Scope_Hash_Val = 1; - --static struct novfs_scope_list *Scope_Search4Scope(struct novfs_schandle Id, -- int Session, int Locked) -+static struct novfs_scope_list *Scope_Search4Scope(struct novfs_schandle Id, int Session, int Locked) - { - struct novfs_scope_list *scope, *rscope = NULL; - struct novfs_schandle cur_scope; - struct list_head *sl; - int offset; - -- DbgPrint("Scope_Search4Scope: 0x%p:%p 0x%x 0x%x\n", Id.hTypeId, Id.hId, -- Session, Locked); -+ DbgPrint("Scope_Search4Scope: 0x%p:%p 0x%x 0x%x\n", Id.hTypeId, Id.hId, Session, Locked); - - if (Session) - offset = offsetof(struct novfs_scope_list, SessionId); -@@ -66,7 +63,7 @@ static struct novfs_scope_list *Scope_Se - while (sl != &Scope_List) { - scope = list_entry(sl, struct novfs_scope_list, ScopeList); - -- cur_scope = *(struct novfs_schandle *) ((char *)scope + offset); -+ cur_scope = *(struct novfs_schandle *)((char *)scope + offset); - if (SC_EQUAL(Id, cur_scope)) { - rscope = scope; - break; -@@ -92,8 +89,7 @@ static struct novfs_scope_list *Scope_Fi - - task = current; - -- DbgPrint("Scope_Find_Scope: %d %d %d %d\n", current_uid(), -- current_euid(), current_suid(), current_fsuid()); -+ DbgPrint("Scope_Find_Scope: %d %d %d %d\n", current_uid(), current_euid(), current_suid(), current_fsuid()); - - //scopeId = task->euid; - UID_TO_SCHANDLE(scopeId, current_euid()); -@@ -113,16 +109,11 @@ static struct novfs_scope_list *Scope_Fi - - if (!novfs_daemon_create_sessionId(&scope->SessionId)) { - DbgPrint("Scope_Find_Scope2: %d %d %d %d\n", -- current_uid(), current_euid(), -- current_suid(), current_fsuid()); -- memset(scope->ScopeUserName, 0, -- sizeof(scope->ScopeUserName)); -+ current_uid(), current_euid(), current_suid(), current_fsuid()); -+ memset(scope->ScopeUserName, 0, sizeof(scope->ScopeUserName)); - scope->ScopeUserNameLength = 0; -- novfs_daemon_getpwuid(current_euid(), -- sizeof(scope->ScopeUserName), -- scope->ScopeUserName); -- scope->ScopeUserNameLength = -- strlen(scope->ScopeUserName); -+ novfs_daemon_getpwuid(current_euid(), sizeof(scope->ScopeUserName), scope->ScopeUserName); -+ scope->ScopeUserNameLength = strlen(scope->ScopeUserName); - addscope = 1; - } - -@@ -141,27 +132,20 @@ static struct novfs_scope_list *Scope_Fi - scope->SessionId.hTypeId, scope->SessionId.hId, - scope->ScopePid, - scope->ScopeTask, -- scope->ScopeHash, -- scope->ScopeUid, -- scope->ScopeUserNameLength, -- scope->ScopeUserName); -+ scope->ScopeHash, scope->ScopeUid, scope->ScopeUserNameLength, scope->ScopeUserName); - - if (SC_PRESENT(scope->SessionId)) { - down(&Scope_Lock); -- pscope = -- Scope_Search4Scope(scopeId, 0, 1); -+ pscope = Scope_Search4Scope(scopeId, 0, 1); - - if (!pscope) { -- list_add(&scope->ScopeList, -- &Scope_List); -+ list_add(&scope->ScopeList, &Scope_List); - } - up(&Scope_Lock); - - if (pscope) { -- printk -- ("<6>Scope_Find_Scope scope not added because it was already there...\n"); -- novfs_daemon_destroy_sessionId(scope-> -- SessionId); -+ printk("<6>Scope_Find_Scope scope not added because it was already there...\n"); -+ novfs_daemon_destroy_sessionId(scope->SessionId); - kfree(scope); - scope = pscope; - addscope = 0; -@@ -170,7 +154,7 @@ static struct novfs_scope_list *Scope_Fi - kfree(scope); - scope = NULL; - } -- -+ - if (scope && addscope) - novfs_add_to_root(scope->ScopeUserName); - } -@@ -206,7 +190,7 @@ static int Scope_Validate_Scope(struct n - return (retVal); - } - --uid_t novfs_scope_get_uid(struct novfs_scope_list *scope) -+uid_t novfs_scope_get_uid(struct novfs_scope_list * scope) - { - uid_t uid = 0; - if (!scope) -@@ -231,7 +215,7 @@ char *novfs_scope_get_username(void) - } - - struct novfs_schandle novfs_scope_get_sessionId(struct novfs_scope_list -- *Scope) -+ *Scope) - { - struct novfs_schandle sessionId; - DbgPrint("Scope_Get_SessionId: 0x%p\n", Scope); -@@ -241,12 +225,11 @@ struct novfs_schandle novfs_scope_get_se - - if (Scope && Scope_Validate_Scope(Scope)) - sessionId = Scope->SessionId; -- DbgPrint("Scope_Get_SessionId: return 0x%p:%p\n", sessionId.hTypeId, -- sessionId.hId); -+ DbgPrint("Scope_Get_SessionId: return 0x%p:%p\n", sessionId.hTypeId, sessionId.hId); - return (sessionId); - } - --struct novfs_scope_list *novfs_get_scope_from_name(struct qstr * Name) -+struct novfs_scope_list *novfs_get_scope_from_name(struct qstr *Name) - { - struct novfs_scope_list *scope, *rscope = NULL; - struct list_head *sl; -@@ -259,9 +242,7 @@ struct novfs_scope_list *novfs_get_scope - while (sl != &Scope_List) { - scope = list_entry(sl, struct novfs_scope_list, ScopeList); - -- if ((Name->len == scope->ScopeUserNameLength) && -- (0 == strncmp(scope->ScopeUserName, Name->name, Name->len))) -- { -+ if ((Name->len == scope->ScopeUserNameLength) && (0 == strncmp(scope->ScopeUserName, Name->name, Name->len))) { - rscope = scope; - break; - } -@@ -274,8 +255,7 @@ struct novfs_scope_list *novfs_get_scope - return (rscope); - } - --int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, -- uint64_t * TotalEnties, uint64_t * FreeEnties) -+int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties) - { - struct novfs_scope_list *scope; - int retVal = 0; -@@ -296,8 +276,7 @@ int novfs_scope_set_userspace(uint64_t * - return (retVal); - } - --int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, -- uint64_t * TotalEnties, uint64_t * FreeEnties) -+int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties) - { - struct novfs_scope_list *scope; - int retVal = 0; -@@ -309,8 +288,7 @@ int novfs_scope_get_userspace(uint64_t * - td = fd = te = fe = 0; - if (scope) { - -- retVal = -- novfs_daemon_get_userspace(scope->SessionId, &td, &fd, &te, &fe); -+ retVal = novfs_daemon_get_userspace(scope->SessionId, &td, &fd, &te, &fe); - - scope->ScopeUSize = td; - scope->ScopeUFree = fd; -@@ -330,7 +308,7 @@ int novfs_scope_get_userspace(uint64_t * - return (retVal); - } - --struct novfs_scope_list *novfs_get_scope(struct dentry * Dentry) -+struct novfs_scope_list *novfs_get_scope(struct dentry *Dentry) - { - struct novfs_scope_list *scope = NULL; - char *buf, *path, *cp; -@@ -404,8 +382,7 @@ char *novfs_get_scopeusers(void) - while ((sl != &Scope_List) && (cp < ep)) { - scope = list_entry(sl, struct novfs_scope_list, ScopeList); - -- DbgPrint("Scope_Get_ScopeUsers found 0x%p %s\n", -- scope, scope->ScopeUserName); -+ DbgPrint("Scope_Get_ScopeUsers found 0x%p %s\n", scope, scope->ScopeUserName); - - cp = add_to_list(scope->ScopeUserName, cp, ep); - -@@ -486,8 +463,7 @@ static int Scope_Cleanup_Thread(void *Ar - - if (!rscope) { - list_move(&scope->ScopeList, &cleanup); -- DbgPrint("Scope_Cleanup_Thread: Scope=0x%p\n", -- rscope); -+ DbgPrint("Scope_Cleanup_Thread: Scope=0x%p\n", rscope); - } - } - -@@ -509,10 +485,7 @@ static int Scope_Cleanup_Thread(void *Ar - scope, - scope->ScopeId, - scope->SessionId, -- scope->ScopePid, -- scope->ScopeTask, -- scope->ScopeHash, -- scope->ScopeUid, scope->ScopeUserName); -+ scope->ScopePid, scope->ScopeTask, scope->ScopeHash, scope->ScopeUid, scope->ScopeUserName); - if (!Scope_Search4Scope(scope->SessionId, 1, 0)) { - novfs_remove_from_root(scope->ScopeUserName); - novfs_daemon_destroy_sessionId(scope->SessionId); -@@ -569,10 +542,7 @@ void novfs_scope_cleanup(void) - scope, - scope->ScopeId, - scope->SessionId, -- scope->ScopePid, -- scope->ScopeTask, -- scope->ScopeHash, -- scope->ScopeUid, scope->ScopeUserName); -+ scope->ScopePid, scope->ScopeTask, scope->ScopeHash, scope->ScopeUid, scope->ScopeUserName); - if (!Scope_Search4Scope(scope->SessionId, 1, 1)) { - novfs_remove_from_root(scope->ScopeUserName); - novfs_daemon_destroy_sessionId(scope->SessionId); -@@ -587,8 +557,7 @@ void novfs_scope_cleanup(void) - /* - * Walks the dentry chain building a path. - */ --char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, -- int Flags) -+char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, int Flags) - { - char *retval = &Buf[Buflen]; - struct dentry *p = Dentry; -@@ -654,5 +623,3 @@ void novfs_scope_exit(void) - printk(KERN_INFO "Scope_Uninit: Exit\n"); - - } -- -- ---- a/fs/novfs/vfs.h -+++ b/fs/novfs/vfs.h -@@ -23,11 +23,10 @@ - - #include "nwcapi.h" - -- - #ifndef XTIER_SCHANDLE - struct novfs_schandle { -- void * hTypeId; -- void * hId; -+ void *hTypeId; -+ void *hId; - - }; - -@@ -46,7 +45,6 @@ struct novfs_schandle { - #define XTIER_SCHANDLE - #endif - -- - /*===[ Manifest constants ]===============================================*/ - #define NOVFS_MAGIC 0x4e574653 - #define MODULE_NAME "novfs" -@@ -191,15 +189,14 @@ struct novfs_data_list { - int rwflag; - }; - -- - extern char *ctime_r(time_t * clock, char *buf); - - /* - * Converts a HANDLE to a u32 type. - */ --static inline u32 HandletoUint32(void * h) -+static inline u32 HandletoUint32(void *h) - { -- return (u32) ((unsigned long) h); -+ return (u32) ((unsigned long)h); - } - - /* -@@ -207,7 +204,7 @@ static inline u32 HandletoUint32(void * - */ - static inline void *Uint32toHandle(u32 ui32) - { -- return ((void *) (unsigned long) ui32); -+ return ((void *)(unsigned long)ui32); - } - - /* Global variables */ -@@ -219,7 +216,6 @@ extern int novfs_page_cache; - extern char *novfs_current_mnt; - extern int novfs_max_iosize; - -- - /* Global functions */ - extern int novfs_remove_from_root(char *); - extern void novfs_dump_inode(void *pf); -@@ -227,9 +223,9 @@ extern void novfs_dump_inode(void *pf); - extern void novfs_dump(int size, void *dumpptr); - - extern int Queue_Daemon_Command(void *request, unsigned long reqlen, void *data, -- int dlen, void **reply, unsigned long * replen, -- int interruptible); --extern int novfs_do_login(struct ncl_string * Server, struct ncl_string* Username, struct ncl_string * Password, void **lgnId, struct novfs_schandle *Session); -+ int dlen, void **reply, unsigned long *replen, int interruptible); -+extern int novfs_do_login(struct ncl_string *Server, struct ncl_string *Username, struct ncl_string *Password, void **lgnId, -+ struct novfs_schandle *Session); - - extern int novfs_proc_init(void); - extern void novfs_proc_exit(void); -@@ -241,111 +237,71 @@ extern void novfs_daemon_queue_init(void - extern void novfs_daemon_queue_exit(void); - extern int novfs_daemon_logout(struct qstr *Server, struct novfs_schandle *Session); - extern int novfs_daemon_set_mnt_point(char *Path); --extern int novfs_daemon_create_sessionId(struct novfs_schandle * SessionId); -+extern int novfs_daemon_create_sessionId(struct novfs_schandle *SessionId); - extern int novfs_daemon_destroy_sessionId(struct novfs_schandle SessionId); - extern int novfs_daemon_getpwuid(uid_t uid, int unamelen, char *uname); - extern int novfs_daemon_get_userspace(struct novfs_schandle SessionId, -- uint64_t * TotalSize, uint64_t * TotalFree, -- uint64_t * TotalDirectoryEnties, -- uint64_t * FreeDirectoryEnties); -+ uint64_t * TotalSize, uint64_t * TotalFree, -+ uint64_t * TotalDirectoryEnties, uint64_t * FreeDirectoryEnties); - extern int novfs_daemon_debug_cmd_send(char *Command); --extern ssize_t novfs_daemon_recv_reply(struct file *file, -- const char *buf, size_t nbytes, loff_t * ppos); --extern ssize_t novfs_daemon_cmd_send(struct file *file, char *buf, -- size_t len, loff_t * off); --extern int novfs_daemon_ioctl(struct inode *inode, struct file *file, -- unsigned int cmd, unsigned long arg); -+extern ssize_t novfs_daemon_recv_reply(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); -+extern ssize_t novfs_daemon_cmd_send(struct file *file, char *buf, size_t len, loff_t * off); -+extern int novfs_daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); - extern int novfs_daemon_lib_close(struct inode *inode, struct file *file); --extern int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, -- unsigned int cmd, unsigned long arg); -+extern int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); - extern int novfs_daemon_lib_open(struct inode *inode, struct file *file); --extern ssize_t novfs_daemon_lib_read(struct file *file, char *buf, -- size_t len, loff_t * off); --extern ssize_t novfs_daemon_lib_write(struct file *file, const char *buf, -- size_t len, loff_t * off); --extern loff_t novfs_daemon_lib_llseek(struct file *file, loff_t offset, -- int origin); -+extern ssize_t novfs_daemon_lib_read(struct file *file, char *buf, size_t len, loff_t * off); -+extern ssize_t novfs_daemon_lib_write(struct file *file, const char *buf, size_t len, loff_t * off); -+extern loff_t novfs_daemon_lib_llseek(struct file *file, loff_t offset, int origin); - extern int novfs_daemon_open_control(struct inode *Inode, struct file *File); - extern int novfs_daemon_close_control(struct inode *Inode, struct file *File); - extern int novfs_daemon_getversion(char *Buf, int Length); - -- - /* - * file.c functions - */ - extern int novfs_verify_file(struct qstr *Path, struct novfs_schandle SessionId); - extern int novfs_get_alltrees(struct dentry *parent); --extern int novfs_get_servers(unsigned char **ServerList, -- struct novfs_schandle SessionId); --extern int novfs_get_vols(struct qstr *Server, -- unsigned char **VolumeList, struct novfs_schandle SessionId); --extern int novfs_get_file_info(unsigned char *Path, -- struct novfs_entry_info *Info, struct novfs_schandle SessionId); -+extern int novfs_get_servers(unsigned char **ServerList, struct novfs_schandle SessionId); -+extern int novfs_get_vols(struct qstr *Server, unsigned char **VolumeList, struct novfs_schandle SessionId); -+extern int novfs_get_file_info(unsigned char *Path, struct novfs_entry_info *Info, struct novfs_schandle SessionId); - extern int novfs_getx_file_info(char *Path, const char *Name, -- char *buffer, ssize_t buffer_size, ssize_t *dataLen, -- struct novfs_schandle SessionId); --extern int novfs_listx_file_info(char *Path, char *buffer, -- ssize_t buffer_size, ssize_t *dataLen, -- struct novfs_schandle SessionId); -+ char *buffer, ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId); -+extern int novfs_listx_file_info(char *Path, char *buffer, ssize_t buffer_size, ssize_t * dataLen, struct novfs_schandle SessionId); - extern int novfs_setx_file_info(char *Path, const char *Name, const void *Value, -- unsigned long valueLen, -- unsigned long *bytesWritten, int flags, -- struct novfs_schandle SessionId); -+ unsigned long valueLen, unsigned long *bytesWritten, int flags, struct novfs_schandle SessionId); - - extern int novfs_get_dir_listex(unsigned char *Path, void **EnumHandle, -- int *Count, struct novfs_entry_info **Info, -- struct novfs_schandle SessionId); -+ int *Count, struct novfs_entry_info **Info, struct novfs_schandle SessionId); - extern int novfs_open_file(unsigned char *Path, int Flags, -- struct novfs_entry_info * Info, void **Handle, -- struct novfs_schandle SessionId); --extern int novfs_create(unsigned char *Path, int DirectoryFlag, -- struct novfs_schandle SessionId); --extern int novfs_close_file(void * Handle, struct novfs_schandle SessionId); --extern int novfs_read_file(void * Handle, unsigned char *Buffer, -- size_t * Bytes, loff_t * Offset, -- struct novfs_schandle SessionId); --extern int novfs_read_pages(void * Handle, struct novfs_data_list *DList, -- int DList_Cnt, size_t * Bytes, loff_t * Offset, -- struct novfs_schandle SessionId); --extern int novfs_write_file(void * Handle, unsigned char *Buffer, -- size_t * Bytes, loff_t * Offset, -- struct novfs_schandle SessionId); --extern int novfs_write_page(void * Handle, struct page *Page, -- struct novfs_schandle SessionId); --extern int novfs_write_pages(void * Handle, struct novfs_data_list *DList, -- int DList_Cnt, size_t Bytes, loff_t Offset, -- struct novfs_schandle SessionId); --extern int novfs_delete(unsigned char *Path, int DirectoryFlag, -- struct novfs_schandle SessionId); --extern int novfs_trunc(unsigned char *Path, int PathLen, -- struct novfs_schandle SessionId); --extern int novfs_trunc_ex(void * Handle, loff_t Offset, -- struct novfs_schandle SessionId); -+ struct novfs_entry_info *Info, void **Handle, struct novfs_schandle SessionId); -+extern int novfs_create(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId); -+extern int novfs_close_file(void *Handle, struct novfs_schandle SessionId); -+extern int novfs_read_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); -+extern int novfs_read_pages(void *Handle, struct novfs_data_list *DList, -+ int DList_Cnt, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); -+extern int novfs_write_file(void *Handle, unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); -+extern int novfs_write_page(void *Handle, struct page *Page, struct novfs_schandle SessionId); -+extern int novfs_write_pages(void *Handle, struct novfs_data_list *DList, -+ int DList_Cnt, size_t Bytes, loff_t Offset, struct novfs_schandle SessionId); -+extern int novfs_delete(unsigned char *Path, int DirectoryFlag, struct novfs_schandle SessionId); -+extern int novfs_trunc(unsigned char *Path, int PathLen, struct novfs_schandle SessionId); -+extern int novfs_trunc_ex(void *Handle, loff_t Offset, struct novfs_schandle SessionId); - extern int novfs_rename_file(int DirectoryFlag, unsigned char *OldName, -- int OldLen, unsigned char *NewName, int NewLen, -- struct novfs_schandle SessionId); --extern int novfs_set_attr(unsigned char *Path, struct iattr *Attr, -- struct novfs_schandle SessionId); --extern int novfs_get_file_cache_flag(unsigned char * Path, -- struct novfs_schandle SessionId); --extern int novfs_set_file_lock(struct novfs_schandle SessionId, void * fhandle, -- unsigned char fl_type, loff_t fl_start, -- loff_t len); -- --extern struct inode *novfs_get_inode(struct super_block *sb, int mode, -- int dev, uid_t uid, ino_t ino, struct qstr *name); --extern int novfs_read_stream(void * ConnHandle, unsigned char * Handle, -- unsigned char * Buffer, size_t * Bytes, loff_t * Offset, -- int User, struct novfs_schandle SessionId); --extern int novfs_write_stream(void * ConnHandle, unsigned char * Handle, -- unsigned char * Buffer, size_t * Bytes, loff_t * Offset, -- struct novfs_schandle SessionId); --extern int novfs_close_stream(void * ConnHandle, unsigned char * Handle, -- struct novfs_schandle SessionId); -+ int OldLen, unsigned char *NewName, int NewLen, struct novfs_schandle SessionId); -+extern int novfs_set_attr(unsigned char *Path, struct iattr *Attr, struct novfs_schandle SessionId); -+extern int novfs_get_file_cache_flag(unsigned char *Path, struct novfs_schandle SessionId); -+extern int novfs_set_file_lock(struct novfs_schandle SessionId, void *fhandle, unsigned char fl_type, loff_t fl_start, loff_t len); -+ -+extern struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, uid_t uid, ino_t ino, struct qstr *name); -+extern int novfs_read_stream(void *ConnHandle, unsigned char *Handle, -+ unsigned char *Buffer, size_t * Bytes, loff_t * Offset, int User, struct novfs_schandle SessionId); -+extern int novfs_write_stream(void *ConnHandle, unsigned char *Handle, -+ unsigned char *Buffer, size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId); -+extern int novfs_close_stream(void *ConnHandle, unsigned char *Handle, struct novfs_schandle SessionId); - - extern int novfs_add_to_root(char *); --extern int novfs_end_directory_enumerate(void *EnumHandle, -- struct novfs_schandle SessionId); -+extern int novfs_end_directory_enumerate(void *EnumHandle, struct novfs_schandle SessionId); - - /* - * scope.c functions -@@ -355,14 +311,11 @@ extern void novfs_scope_exit(void); - extern void *novfs_scope_lookup(void); - extern uid_t novfs_scope_get_uid(struct novfs_scope_list *); - extern struct novfs_schandle novfs_scope_get_sessionId(struct -- novfs_scope_list *); -+ novfs_scope_list *); - extern char *novfs_get_scopeusers(void); --extern int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, -- uint64_t * TotalEnties, uint64_t * FreeEnties); --extern int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, -- uint64_t * TotalEnties, uint64_t * FreeEnties); --extern char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, -- unsigned int Buflen, int Flags); -+extern int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties); -+extern int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, uint64_t * TotalEnties, uint64_t * FreeEnties); -+extern char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, int Flags); - extern void novfs_scope_cleanup(void); - extern struct novfs_scope_list *novfs_get_scope_from_name(struct qstr *); - extern struct novfs_scope_list *novfs_get_scope(struct dentry *); -@@ -382,73 +335,38 @@ extern void novfs_profile_exit(void); - /* - * nwcapi.c functions - */ --extern int novfs_auth_conn(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_conn_close(struct novfs_xplat *pdata, -- void **Handle, struct novfs_schandle Session); --extern int novfs_get_conn_info(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_set_conn_info(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_get_daemon_ver(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_get_id_info(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_license_conn(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_login_id(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_logout_id(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_open_conn_by_addr(struct novfs_xplat *pdata, -- void **Handle, struct novfs_schandle Session); --extern int novfs_open_conn_by_name(struct novfs_xplat *pdata, -- void **Handle, struct novfs_schandle Session); --extern int novfs_open_conn_by_ref(struct novfs_xplat *pdata, -- void **Handle, struct novfs_schandle Session); --extern int novfs_query_feature(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_raw_send(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_scan_conn_info(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_sys_conn_close(struct novfs_xplat *pdata, -- unsigned long *Handle, struct novfs_schandle Session); --extern int novfs_unauthenticate(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_unlicense_conn(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_change_auth_key(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_enum_ids(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_get_default_ctx(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_set_default_ctx(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_set_pri_conn(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_get_pri_conn(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_set_map_drive(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_unmap_drive(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_enum_drives(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_get_bcast_msg(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_set_key_value(struct novfs_xplat *pdata, -- struct novfs_schandle Session); --extern int novfs_verify_key_value(struct novfs_xplat *pdata, -- struct novfs_schandle Session); -- -- --#endif /* __NOVFS_H */ -+extern int novfs_auth_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_conn_close(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); -+extern int novfs_get_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_set_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_get_daemon_ver(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_get_id_info(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_license_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_login_id(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_logout_id(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); -+extern int novfs_open_conn_by_name(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); -+extern int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void **Handle, struct novfs_schandle Session); -+extern int novfs_query_feature(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_raw_send(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_scan_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_sys_conn_close(struct novfs_xplat *pdata, unsigned long *Handle, struct novfs_schandle Session); -+extern int novfs_unauthenticate(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_unlicense_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_change_auth_key(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_enum_ids(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_get_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_set_default_ctx(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_set_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_get_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_enum_drives(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_get_bcast_msg(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_set_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session); -+extern int novfs_verify_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session); - -+#endif /* __NOVFS_H */ diff --git a/patches.fixes/novfs-overflow-fixes b/patches.fixes/novfs-overflow-fixes deleted file mode 100644 index ff696c1..0000000 --- a/patches.fixes/novfs-overflow-fixes +++ /dev/null @@ -1,1720 +0,0 @@ -From: Sankar P -Subject: novfs: security: Add buffer overflow, integer wraparound fixes -Patch-mainline: no -References: bnc#594362 - -Security fixes that help in addressing buffer overflows, limiting -the amount of data that can be copied from user-space, etc. - -Signed-off-by: Sankar P -Signed-off-by: Marcus Meissner -Signed-off-by: Sebastian Krahmer ---- - fs/novfs/daemon.c | 36 +- - fs/novfs/file.c | 4 - fs/novfs/nwcapi.c | 787 +++++++++++++++++++++++++++--------------------------- - fs/novfs/nwcapi.h | 12 - 4 files changed, 439 insertions(+), 400 deletions(-) - ---- a/fs/novfs/daemon.c -+++ b/fs/novfs/daemon.c -@@ -811,6 +811,9 @@ static int daemon_login(struct novfs_log - struct ncl_string password; - - if (!copy_from_user(&lLogin, Login, sizeof(lLogin))) { -+ if (lLogin.Server.length > MAX_SERVER_NAME_LENGTH || lLogin.UserName.length > MAX_NAME_LEN || -+ lLogin.Password.length > MAX_PASSWORD_LENGTH) -+ return -EINVAL; - server.buffer = kmalloc(lLogin.Server.length, GFP_KERNEL); - if (server.buffer) { - server.len = lLogin.Server.length; -@@ -857,6 +860,8 @@ static int daemon_logout(struct novfs_lo - - if (copy_from_user(&lLogout, Logout, sizeof(lLogout))) - return -EFAULT; -+ if (lLogout.Server.length > MAX_SERVER_NAME_LENGTH) -+ return -EINVAL; - server.name = kmalloc(lLogout.Server.length, GFP_KERNEL); - if (!server.name) - return -ENOMEM; -@@ -1102,6 +1107,8 @@ int novfs_daemon_ioctl(struct inode *ino - char *buf; - io.length = 0; - cpylen = copy_from_user(&io, (char *)arg, sizeof(io)); -+ if (io.length <= 0 || io.length > 1024) -+ return -EINVAL; - if (io.length) { - buf = kmalloc(io.length + 1, GFP_KERNEL); - if (buf) { -@@ -1453,6 +1460,8 @@ int novfs_daemon_lib_ioctl(struct inode - cpylen = - copy_from_user(&io, (void *)arg, - sizeof(io)); -+ if (io.length <= 0 || io.length > 1024) -+ return -EINVAL; - if (io.length) { - buf = - kmalloc(io.length + 1, -@@ -1478,9 +1487,7 @@ int novfs_daemon_lib_ioctl(struct inode - cpylen = - copy_from_user(&data, (void *)arg, - sizeof(data)); -- retCode = -- ((data. -- xfunction & 0x0000FFFF) | 0xCC000000); -+ retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000); - - switch (data.xfunction) { - case NWC_OPEN_CONN_BY_NAME: -@@ -1815,8 +1822,7 @@ static int NwdConvertLocalHandle(struct - //sgled memcpy(lh.NwWareHandle, resource->handle, sizeof(resource->handle)); - memcpy(lh.NetWareHandle, resource->handle, sizeof(resource->handle)); //sgled - if (pdata->repLen >= sizeof(struct nwc_convert_local_handle)) { -- cpylen = -- copy_to_user(pdata->repData, &lh, -+ cpylen = copy_to_user(pdata->repData, &lh, - sizeof(struct nwc_convert_local_handle)); - retVal = 0; - } else { -@@ -1838,6 +1844,8 @@ static int NwdGetMountPath(struct novfs_ - unsigned long cpylen; - struct nwc_get_mount_path mp; - -+ if (pdata->reqLen != sizeof(mp)) -+ return -EINVAL; - cpylen = copy_from_user(&mp, pdata->reqData, pdata->reqLen); - - if (novfs_current_mnt) { -@@ -1878,21 +1886,19 @@ static int set_map_drive(struct novfs_xp - return retVal; - if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) - return -EFAULT; -- drivemap = -- kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, -+ if (symInfo.linkOffsetLength > MAX_NAME_LEN) -+ return -EINVAL; -+ drivemap = kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, - GFP_KERNEL); - if (!drivemap) - return -ENOMEM; - - path = (char *)pdata->reqData; - path += symInfo.linkOffset; -- cpylen = -- copy_from_user(drivemap->name, path, -- symInfo.linkOffsetLength); -+ cpylen = copy_from_user(drivemap->name, path, symInfo.linkOffsetLength); - - drivemap->session = Session; -- drivemap->hash = -- full_name_hash(drivemap->name, -+ drivemap->hash = full_name_hash(drivemap->name, - symInfo.linkOffsetLength - 1); - drivemap->namelen = symInfo.linkOffsetLength - 1; - DbgPrint("hash=0x%lx path=%s", drivemap->hash, drivemap->name); -@@ -1910,8 +1916,7 @@ static int set_map_drive(struct novfs_xp - dm, dm->hash, dm->namelen, dm->name); - - if (drivemap->hash == dm->hash) { -- if (0 == -- strcmp(dm->name, drivemap->name)) { -+ if (0 == strcmp(dm->name, drivemap->name)) { - dm = NULL; - break; - } -@@ -1950,7 +1955,8 @@ static int unmap_drive(struct novfs_xpla - return retVal; - if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) - return -EFAULT; -- -+ if (symInfo.linkLen > MAX_NAME_LEN || symInfo.linkLen == 0) -+ return -EINVAL; - path = kmalloc(symInfo.linkLen, GFP_KERNEL); - if (!path) - return -ENOMEM; ---- a/fs/novfs/file.c -+++ b/fs/novfs/file.c -@@ -1077,7 +1077,7 @@ int novfs_write_file(void *Handle, unsig - - DbgPrint("cmdlen=%ld len=%ld", cmdlen, len); - -- if ((cmdlen + len) > novfs_max_iosize) { -+ if (len > novfs_max_iosize - cmdlen) { - len = novfs_max_iosize - cmdlen; - len = (len / PAGE_SIZE) * PAGE_SIZE; - } -@@ -1449,6 +1449,8 @@ int novfs_write_stream(void *ConnHandle, - size_t len; - - len = *Bytes; -+ if (len > novfs_max_iosize) -+ len = novfs_max_iosize; - cmdlen = len + offsetof(struct novfs_write_stream_request, data); - *Bytes = 0; - ---- a/fs/novfs/nwcapi.c -+++ b/fs/novfs/nwcapi.c -@@ -37,16 +37,20 @@ static void GetConnData(struct nwc_get_c - /*++======================================================================*/ - int novfs_open_conn_by_name(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwd_open_conn_by_name *openConn, *connReply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwd_open_conn_by_name *openConn = NULL, *connReply = NULL; - struct nwc_open_conn_by_name ocbn; - int retCode = 0; -- unsigned long cmdlen, datalen, replylen, cpylen; -- char *data; -+ unsigned long cmdlen, datalen, replylen, cpylen, pnamelen, stypelen; -+ char *data = NULL; - - cpylen = copy_from_user(&ocbn, pdata->reqData, sizeof(ocbn)); -- datalen = sizeof(*openConn) + strlen_user(ocbn.pName->pString) + strlen_user(ocbn.pServiceType); -+ pnamelen = strlen_user(ocbn.pName->pString); -+ stypelen = strlen_user(ocbn.pServiceType); -+ if (pnamelen > MAX_NAME_LEN || stypelen > NW_MAX_SERVICE_TYPE_LEN) -+ return -EINVAL; -+ datalen = sizeof(*openConn) + pnamelen + stypelen; - cmdlen = datalen + sizeof(*cmd); - cmd = kmalloc(cmdlen, GFP_KERNEL); - -@@ -61,8 +65,8 @@ int novfs_open_conn_by_name(struct novfs - cmd->dataLen = datalen; - openConn = (struct nwd_open_conn_by_name *) cmd->data; - -- openConn->nameLen = strlen_user(ocbn.pName->pString); -- openConn->serviceLen = strlen_user(ocbn.pServiceType); -+ openConn->nameLen = pnamelen; -+ openConn->serviceLen = stypelen; - openConn->uConnFlags = ocbn.uConnFlags; - openConn->ConnHandle = Uint32toHandle(ocbn.ConnHandle); - data = (char *)openConn; -@@ -70,13 +74,9 @@ int novfs_open_conn_by_name(struct novfs - openConn->oName = sizeof(*openConn); - - openConn->oServiceType = openConn->oName + openConn->nameLen; -- cpylen = -- copy_from_user(data, ocbn.pName->pString, -- openConn->nameLen); -+ cpylen = copy_from_user(data, ocbn.pName->pString, openConn->nameLen); - data += openConn->nameLen; -- cpylen = -- copy_from_user(data, ocbn.pServiceType, -- openConn->serviceLen); -+ cpylen = copy_from_user(data, ocbn.pServiceType, openConn->serviceLen); - - retCode = - Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -@@ -109,9 +109,9 @@ int novfs_open_conn_by_name(struct novfs - - int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwd_open_conn_by_addr *openConn, *connReply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwd_open_conn_by_addr *openConn = NULL, *connReply = NULL; - struct nwc_open_conn_by_addr ocba; - struct nwc_tran_addr tranAddr; - int retCode = 0; -@@ -133,8 +133,11 @@ int novfs_open_conn_by_addr(struct novfs - cmd->dataLen = datalen; - openConn = (struct nwd_open_conn_by_addr *) cmd->data; - -- cpylen = -- copy_from_user(&tranAddr, ocba.pTranAddr, sizeof(tranAddr)); -+ cpylen = copy_from_user(&tranAddr, ocba.pTranAddr, sizeof(tranAddr)); -+ if (tranAddr.uAddressLength > sizeof(addr)) { -+ retCode = -EINVAL; -+ goto out; -+ } - - DbgPrint("tranAddr"); - novfs_dump(sizeof(tranAddr), &tranAddr); -@@ -143,17 +146,14 @@ int novfs_open_conn_by_addr(struct novfs - openConn->TranAddr.uAddressLength = tranAddr.uAddressLength; - memset(addr, 0xcc, sizeof(addr) - 1); - -- cpylen = -- copy_from_user(addr, tranAddr.puAddress, -- tranAddr.uAddressLength); -+ cpylen = copy_from_user(addr, tranAddr.puAddress, tranAddr.uAddressLength); - - DbgPrint("addr"); - novfs_dump(sizeof(addr), addr); - - openConn->TranAddr.oAddress = *(unsigned int *) (&addr[2]); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, - (void **)&reply, &replylen, - INTERRUPTIBLE); - if (reply) { -@@ -178,17 +178,17 @@ int novfs_open_conn_by_addr(struct novfs - kfree(reply); - } - -+out: - kfree(cmd); -- - return (retCode); - - } - - int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwd_open_conn_by_ref *openConn; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwd_open_conn_by_ref *openConn = NULL; - struct nwc_open_conn_by_ref ocbr; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; -@@ -207,8 +207,7 @@ int novfs_open_conn_by_ref(struct novfs_ - cmd->dataLen = datalen; - openConn = (struct nwd_open_conn_by_ref *) cmd->data; - -- openConn->uConnReference = -- (void *) (unsigned long) ocbr.uConnReference; -+ openConn->uConnReference = (void *) (unsigned long) ocbr.uConnReference; - openConn->uConnFlags = ocbr.uConnFlags; - - retCode = -@@ -225,13 +224,10 @@ int novfs_open_conn_by_ref(struct novfs_ - /* - * we got valid data. - */ -- ocbr.ConnHandle = -- HandletoUint32(openConn->ConnHandle); -+ ocbr.ConnHandle = HandletoUint32(openConn->ConnHandle); - *Handle = openConn->ConnHandle; - -- cpylen = -- copy_to_user(pdata->reqData, &ocbr, -- sizeof(ocbr)); -+ cpylen = copy_to_user(pdata->reqData, &ocbr, sizeof(ocbr)); - DbgPrint("New Conn Handle = %X", openConn->ConnHandle); - } - kfree(reply); -@@ -245,59 +241,63 @@ int novfs_open_conn_by_ref(struct novfs_ - int novfs_raw_send(struct novfs_xplat *pdata, struct novfs_schandle Session) - { - struct nwc_request xRequest; -- struct nwc_frag *frag, *cFrag, *reqFrag; -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- int retCode = -ENOMEM; -+ struct nwc_frag *frag = NULL, *cFrag = NULL, *reqFrag = NULL; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ int retCode = 0; - unsigned long cmdlen, datalen, replylen, cpylen, totalLen; - unsigned int x; -- struct nwd_ncp_req *ncpData; -- struct nwd_ncp_rep *ncpReply; -- unsigned char *reqData; -+ struct nwd_ncp_req *ncpData = NULL; -+ struct nwd_ncp_rep *ncpReply = NULL; -+ unsigned char *reqData = NULL; - unsigned long actualReplyLength = 0; - - DbgPrint("[XPLAT] Process Raw NCP Send"); - cpylen = copy_from_user(&xRequest, pdata->reqData, sizeof(xRequest)); - -+ if (xRequest.uNumReplyFrags > MAX_NUM_REPLIES || xRequest.uNumReplyFrags < MIN_NUM_REPLIES || -+ xRequest.uNumRequestFrags > MAX_NUM_REQUESTS || xRequest.uNumRequestFrags < MIN_NUM_REQUESTS) -+ return -EINVAL; -+ - /* - * Figure out the length of the request - */ -- frag = -- kmalloc(xRequest.uNumReplyFrags * sizeof(struct nwc_frag), GFP_KERNEL); -+ frag = kmalloc(xRequest.uNumReplyFrags * sizeof(struct nwc_frag), GFP_KERNEL); - -- DbgPrint("[XPLAT RawNCP] - Reply Frag Count 0x%X", -- xRequest.uNumReplyFrags); -+ DbgPrint("[XPLAT RawNCP] - Reply Frag Count 0x%X", xRequest.uNumReplyFrags); - - if (!frag) -- return (retCode); -+ return -ENOMEM; - -- cpylen = -- copy_from_user(frag, xRequest.pReplyFrags, -- xRequest.uNumReplyFrags * sizeof(struct nwc_frag)); -+ cpylen = copy_from_user(frag, xRequest.pReplyFrags, xRequest.uNumReplyFrags * sizeof(struct nwc_frag)); - totalLen = 0; - - cFrag = frag; - for (x = 0; x < xRequest.uNumReplyFrags; x++) { - DbgPrint("[XPLAT - RawNCP] - Frag Len = %d", cFrag->uLength); -+ if (cFrag->uLength > MAX_FRAG_SIZE || cFrag->uLength < MIN_FRAG_SIZE) { -+ retCode = -EINVAL; -+ goto out; -+ } - totalLen += cFrag->uLength; - cFrag++; - } - - DbgPrint("[XPLAT - RawNCP] - totalLen = %d", totalLen); - datalen = 0; -- reqFrag = -- kmalloc(xRequest.uNumRequestFrags * sizeof(struct nwc_frag), -- GFP_KERNEL); -+ reqFrag = kmalloc(xRequest.uNumRequestFrags * sizeof(struct nwc_frag), GFP_KERNEL); - if (!reqFrag) { -- kfree(frag); -- return (retCode); -+ retCode = -ENOMEM; -+ goto out; - } - -- cpylen = -- copy_from_user(reqFrag, xRequest.pRequestFrags, -- xRequest.uNumRequestFrags * sizeof(struct nwc_frag)); -+ cpylen = copy_from_user(reqFrag, xRequest.pRequestFrags, xRequest.uNumRequestFrags * sizeof(struct nwc_frag)); - cFrag = reqFrag; - for (x = 0; x < xRequest.uNumRequestFrags; x++) { -+ if (cFrag->uLength > MAX_FRAG_SIZE || cFrag->uLength < MIN_FRAG_SIZE) { -+ retCode = -EINVAL; -+ goto out; -+ } - datalen += cFrag->uLength; - cFrag++; - } -@@ -311,8 +311,10 @@ int novfs_raw_send(struct novfs_xplat *p - DbgPrint("[XPLAT RawNCP] - Total Command Data Len = %x", cmdlen); - - cmd = kmalloc(cmdlen, GFP_KERNEL); -- if (!cmd) -- return -ENOMEM; -+ if (!cmd) { -+ retCode = -ENOMEM; -+ goto out; -+ } - - cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; -@@ -333,9 +335,7 @@ int novfs_raw_send(struct novfs_xplat *p - cFrag = reqFrag; - - for (x = 0; x < xRequest.uNumRequestFrags; x++) { -- cpylen = -- copy_from_user(reqData, cFrag->pData, -- cFrag->uLength); -+ cpylen = copy_from_user(reqData, cFrag->pData, cFrag->uLength); - reqData += cFrag->uLength; - cFrag++; - } -@@ -369,12 +369,9 @@ int novfs_raw_send(struct novfs_xplat *p - DbgPrint("RawNCP - Copy Frag %d: 0x%X", x, - cFrag->uLength); - -- datalen = -- min((unsigned long) cFrag->uLength, totalLen); -+ datalen = min((unsigned long) cFrag->uLength, totalLen); - -- cpylen = -- copy_to_user(cFrag->pData, reqData, -- datalen); -+ cpylen = copy_to_user(cFrag->pData, reqData, datalen); - totalLen -= datalen; - reqData += datalen; - actualReplyLength += datalen; -@@ -387,10 +384,12 @@ int novfs_raw_send(struct novfs_xplat *p - retCode = -EIO; - } - -- kfree(cmd); -+ - xRequest.uActualReplyLength = actualReplyLength; - cpylen = copy_to_user(pdata->reqData, &xRequest, sizeof(xRequest)); - -+out: -+ kfree(cmd); - kfree(reqFrag); - kfree(frag); - -@@ -399,10 +398,10 @@ int novfs_raw_send(struct novfs_xplat *p - - int novfs_conn_close(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_close_conn cc; -- struct nwd_close_conn *nwdClose; -+ struct nwd_close_conn *nwdClose = NULL; - int retCode = 0; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -439,10 +438,10 @@ int novfs_conn_close(struct novfs_xplat - - int novfs_sys_conn_close(struct novfs_xplat *pdata, unsigned long *Handle, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_close_conn cc; -- struct nwd_close_conn *nwdClose; -+ struct nwd_close_conn *nwdClose = NULL; - unsigned int retCode = 0; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -479,7 +478,7 @@ int novfs_sys_conn_close(struct novfs_xp - - int novfs_login_id(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct nwc_login_id lgn, *plgn; -+ struct nwc_login_id lgn, *plgn = NULL; - int retCode = -ENOMEM; - struct ncl_string server; - struct ncl_string username; -@@ -487,6 +486,11 @@ int novfs_login_id(struct novfs_xplat *p - unsigned long cpylen; - struct nwc_string nwcStr; - -+ -+ memset(&server, 0, sizeof(server)); -+ memset(&username, 0, sizeof(username)); -+ memset(&password, 0, sizeof(password)); -+ - cpylen = copy_from_user(&lgn, pdata->reqData, sizeof(lgn)); - - DbgPrint(""); -@@ -496,6 +500,9 @@ int novfs_login_id(struct novfs_xplat *p - DbgPrint("DomainName\n"); - novfs_dump(sizeof(nwcStr), &nwcStr); - -+ if (nwcStr.DataLen > MAX_NAME_LEN) -+ return -EINVAL; -+ - if ((server.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { - server.type = nwcStr.DataType; - server.len = nwcStr.DataLen; -@@ -505,8 +512,11 @@ int novfs_login_id(struct novfs_xplat *p - - cpylen = copy_from_user(&nwcStr, lgn.pObjectName, sizeof(nwcStr)); - DbgPrint("ObjectName"); -+ if (nwcStr.DataLen > MAX_OBJECT_NAME_LENGTH) { -+ retCode = -EINVAL; -+ goto out; -+ } - novfs_dump(sizeof(nwcStr), &nwcStr); -- - if ((username.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { - username.type = nwcStr.DataType; - username.len = nwcStr.DataLen; -@@ -516,6 +526,10 @@ int novfs_login_id(struct novfs_xplat *p - - cpylen = copy_from_user(&nwcStr, lgn.pPassword, sizeof(nwcStr)); - DbgPrint("Password"); -+ if (nwcStr.DataLen > MAX_PASSWORD_LENGTH) { -+ retCode = -EINVAL; -+ goto out; -+ } - novfs_dump(sizeof(nwcStr), &nwcStr); - - if ((password.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { -@@ -531,24 +545,26 @@ int novfs_login_id(struct novfs_xplat *p - cpylen = copy_to_user(&plgn->AuthenticationId, &lgn.AuthenticationId, sizeof(plgn->AuthenticationId)); - } - memset(password.buffer, 0, password.len); -- kfree(password.buffer); -+ - } - } - memset(username.buffer, 0, username.len); -- kfree(username.buffer); - } - } -- kfree(server.buffer); - } -+out: -+ kfree(password.buffer); -+ kfree(username.buffer); -+ kfree(server.buffer); - return (retCode); - } - - int novfs_auth_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) - { - struct nwc_auth_with_id pauth; -- struct nwc_auth_wid *pDauth; -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct nwc_auth_wid *pDauth = NULL; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -583,10 +599,10 @@ int novfs_auth_conn(struct novfs_xplat * - - int novfs_license_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_license_conn lisc; -- struct nwc_lisc_id * pDLisc; -+ struct nwc_lisc_id * pDLisc = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -621,9 +637,9 @@ int novfs_license_conn(struct novfs_xpla - - int novfs_logout_id(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwc_lo_id logout, *pDLogout; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwc_lo_id logout, *pDLogout = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -659,9 +675,9 @@ int novfs_logout_id(struct novfs_xplat * - - int novfs_unlicense_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwc_unlic_conn *pUconn, ulc; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwc_unlic_conn *pUconn = NULL, ulc; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -697,9 +713,9 @@ int novfs_unlicense_conn(struct novfs_xp - - int novfs_unauthenticate(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwc_unauthenticate auth, *pDAuth; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwc_unauthenticate auth, *pDAuth = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -736,10 +752,10 @@ int novfs_unauthenticate(struct novfs_xp - - int novfs_get_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_get_conn_info connInfo; -- struct nwd_conn_info *pDConnInfo; -+ struct nwd_conn_info *pDConnInfo = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, replylen, cpylen; - -@@ -751,6 +767,11 @@ int novfs_get_conn_info(struct novfs_xpl - if (!cmd) - return -ENOMEM; - -+ if (connInfo.uInfoLength > MAX_INFO_LEN) { -+ retCode = -EINVAL; -+ goto out; -+ } -+ - cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; - cmd->Command.SessionId = Session; -@@ -775,6 +796,7 @@ int novfs_get_conn_info(struct novfs_xpl - - kfree(reply); - } -+out: - kfree(cmd); - return (retCode); - -@@ -782,20 +804,23 @@ int novfs_get_conn_info(struct novfs_xpl - - int novfs_set_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_set_conn_info connInfo; -- struct nwd_set_conn_info *pDConnInfo; -+ struct nwd_set_conn_info *pDConnInfo = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, replylen, cpylen; - - cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo); - cmd = kmalloc(cmdlen, GFP_KERNEL); -- cpylen = -- copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_set_conn_info)); -+ cpylen = copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_set_conn_info)); - - if (!cmd) - return -ENOMEM; -+ if (connInfo.uInfoLength > MAX_INFO_LEN) { -+ retCode = -EINVAL; -+ goto out; -+ } - - cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; - cmd->Command.SequenceNumber = 0; -@@ -817,6 +842,8 @@ int novfs_set_conn_info(struct novfs_xpl - retCode = reply->Reply.ErrorCode; - kfree(reply); - } -+ -+out: - kfree(cmd); - return (retCode); - -@@ -824,12 +851,12 @@ int novfs_set_conn_info(struct novfs_xpl - - int novfs_get_id_info(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwc_get_id_info qidInfo, *gId; -- struct nwd_get_id_info *idInfo; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwc_get_id_info qidInfo, *gId = NULL; -+ struct nwd_get_id_info *idInfo = NULL; - struct nwc_string xferStr; -- char *str; -+ char *str = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, replylen, cpylen; - -@@ -846,12 +873,10 @@ int novfs_get_id_info(struct novfs_xplat - cmd->NwcCommand = NWC_GET_IDENTITY_INFO; - - idInfo = (struct nwd_get_id_info *) cmd->data; -- - idInfo->AuthenticationId = qidInfo.AuthenticationId; - cmd->dataLen = sizeof(*idInfo); - -- retCode = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ retCode = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, - (void **)&reply, &replylen, - INTERRUPTIBLE); - if (reply) { -@@ -863,77 +888,77 @@ int novfs_get_id_info(struct novfs_xplat - */ - gId = pdata->reqData; - idInfo = (struct nwd_get_id_info *) reply->data; -- cpylen = -- copy_to_user(&gId->AuthenticationId, -+ cpylen = copy_to_user(&gId->AuthenticationId, - &idInfo->AuthenticationId, - sizeof(idInfo-> - AuthenticationId)); -- cpylen = -- copy_to_user(&gId->AuthType, -+ cpylen = copy_to_user(&gId->AuthType, - &idInfo->AuthType, - sizeof(idInfo->AuthType)); -- cpylen = -- copy_to_user(&gId->IdentityFlags, -+ cpylen = copy_to_user(&gId->IdentityFlags, - &idInfo->IdentityFlags, - sizeof(idInfo->IdentityFlags)); -- cpylen = -- copy_to_user(&gId->NameType, -+ cpylen = copy_to_user(&gId->NameType, - &idInfo->NameType, - sizeof(idInfo->NameType)); -- cpylen = -- copy_to_user(&gId->ObjectType, -+ cpylen = copy_to_user(&gId->ObjectType, - &idInfo->ObjectType, - sizeof(idInfo->ObjectType)); - -- cpylen = -- copy_from_user(&xferStr, gId->pDomainName, -+ cpylen = copy_from_user(&xferStr, gId->pDomainName, - sizeof(struct nwc_string)); -- str = -- (char *)((char *)reply->data + -- idInfo->pDomainNameOffset); -- cpylen = -- copy_to_user(xferStr.pBuffer, str, -- idInfo->domainLen); -+ if (idInfo->pDomainNameOffset >= reply->dataLen) { -+ retCode = -EINVAL; -+ goto out; -+ } -+ str = (char *)((char *)reply->data + idInfo->pDomainNameOffset); -+ if (idInfo->domainLen > reply->dataLen - idInfo->pDomainNameOffset ) { -+ retCode = -EINVAL; -+ goto out; -+ } -+ -+ cpylen = copy_to_user(xferStr.pBuffer, str, idInfo->domainLen); - xferStr.DataType = NWC_STRING_TYPE_ASCII; - xferStr.DataLen = idInfo->domainLen; -- cpylen = -- copy_to_user(gId->pDomainName, &xferStr, -- sizeof(struct nwc_string)); -+ cpylen = copy_to_user(gId->pDomainName, &xferStr, sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&xferStr, gId->pObjectName, sizeof(struct nwc_string)); - -- cpylen = -- copy_from_user(&xferStr, gId->pObjectName, -- sizeof(struct nwc_string)); -- str = -- (char *)((char *)reply->data + -- idInfo->pObjectNameOffset); -- cpylen = -- copy_to_user(xferStr.pBuffer, str, -- idInfo->objectLen); -+ if (idInfo->pObjectNameOffset >= reply->dataLen) { -+ retCode = -EINVAL; -+ goto out; -+ } -+ str = (char *)((char *)reply->data + idInfo->pObjectNameOffset); -+ if (idInfo->objectLen > reply->dataLen - idInfo->pObjectNameOffset) { -+ retCode = -EINVAL; -+ goto out; -+ } -+ cpylen = copy_to_user(xferStr.pBuffer, str, idInfo->objectLen); - xferStr.DataLen = idInfo->objectLen - 1; - xferStr.DataType = NWC_STRING_TYPE_ASCII; -- cpylen = -- copy_to_user(gId->pObjectName, &xferStr, -- sizeof(struct nwc_string)); -+ cpylen = copy_to_user(gId->pObjectName, &xferStr, sizeof(struct nwc_string)); -+ } - } - -+out: - kfree(reply); -- } - kfree(cmd); - return (retCode); - } - - int novfs_scan_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwc_scan_conn_info connInfo, *rInfo; -- struct nwd_scan_conn_info *pDConnInfo; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwc_scan_conn_info connInfo, *rInfo = NULL; -+ struct nwd_scan_conn_info *pDConnInfo = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, replylen, cpylen; -- unsigned char *localData; -+ unsigned char *localData = NULL; - -- cpylen = -- copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_scan_conn_info)); -+ cpylen = copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_scan_conn_info)); -+ -+ if (connInfo.uReturnInfoLength > MAX_INFO_LEN || connInfo.uScanInfoLen > MAX_INFO_LEN) -+ return -EINVAL; - - cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo) + connInfo.uScanInfoLen; - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -962,8 +987,7 @@ int novfs_scan_conn_info(struct novfs_xp - __DbgPrint(" connInfo.uScanFlags = 0x%X\n", connInfo.uScanFlags); - - pDConnInfo->uScanIndex = connInfo.uScanIndex; -- pDConnInfo->uConnectionReference = -- connInfo.uConnectionReference; -+ pDConnInfo->uConnectionReference = connInfo.uConnectionReference; - pDConnInfo->uScanInfoLevel = connInfo.uScanInfoLevel; - pDConnInfo->uScanInfoLen = connInfo.uScanInfoLen; - pDConnInfo->uReturnInfoLength = connInfo.uReturnInfoLength; -@@ -974,8 +998,7 @@ int novfs_scan_conn_info(struct novfs_xp - localData = (unsigned char *) pDConnInfo; - pDConnInfo->uScanConnInfoOffset = sizeof(*pDConnInfo); - localData += pDConnInfo->uScanConnInfoOffset; -- cpylen = -- copy_from_user(localData, connInfo.pScanConnInfo, -+ cpylen = copy_from_user(localData, connInfo.pScanConnInfo, - connInfo.uScanInfoLen); - } else { - pDConnInfo->uScanConnInfoOffset = 0; -@@ -1035,8 +1058,7 @@ int novfs_scan_conn_info(struct novfs_xp - static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) - { - unsigned long uLevel; -- struct nwd_scan_conn_info *pDConnInfo; -- -+ struct nwd_scan_conn_info *pDConnInfo = NULL; - unsigned char *srcData = NULL; - unsigned long dataLen = 0, cpylen; - -@@ -1082,26 +1104,17 @@ static void GetUserData(struct nwc_scan_ - DbgPrint("NWC_CONN_INFO_TRAN_ADDR 0x%p -> 0x%p :: 0x%X", - srcData, connInfo->pReturnConnInfo, dataLen); - -- cpylen = -- copy_from_user(&tranAddr, dstData, -- sizeof(tranAddr)); -- -- srcData += -- ((struct nwd_scan_conn_info *) srcData)-> -- uReturnConnInfoOffset; -- -- tranAddr.uTransportType = -- ((struct nwd_tran_addr *) srcData)->uTransportType; -- tranAddr.uAddressLength = -- ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; -- -- cpylen = -- copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); -- cpylen = -- copy_to_user(tranAddr.puAddress, -- ((struct tagNwdTranAddrEx *) srcData)->Buffer, -- ((struct tagNwdTranAddrEx *) srcData)-> -- uAddressLength); -+ cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); -+ if (((struct nwd_scan_conn_info *) srcData)->uReturnConnInfoOffset >= reply->dataLen) -+ goto out; -+ srcData += ((struct nwd_scan_conn_info *)srcData)->uReturnConnInfoOffset; -+ tranAddr.uTransportType = ((struct nwd_tran_addr *)srcData)->uTransportType; -+ tranAddr.uAddressLength = ((struct tagNwdTranAddrEx *)srcData)->uAddressLength; -+ if (tranAddr.uAddressLength > MAX_ADDRESS_LENGTH) -+ goto out; -+ cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); -+ cpylen = copy_to_user(tranAddr.puAddress, -+ ((struct tagNwdTranAddrEx *) srcData)->Buffer, tranAddr.uAddressLength); - dataLen = 0; - break; - } -@@ -1115,13 +1128,13 @@ static void GetUserData(struct nwc_scan_ - break; - } - -- if (srcData && dataLen) { -+ if (srcData && dataLen && dataLen <= reply->dataLen) { - DbgPrint("Copy Data 0x%p -> 0x%p :: 0x%X", - srcData, connInfo->pReturnConnInfo, dataLen); -- cpylen = -- copy_to_user(connInfo->pReturnConnInfo, srcData, dataLen); -+ cpylen = copy_to_user(connInfo->pReturnConnInfo, srcData, dataLen); - } - -+out: - return; - } - -@@ -1131,7 +1144,7 @@ static void GetUserData(struct nwc_scan_ - static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) - { - unsigned long uLevel; -- struct nwd_conn_info * pDConnInfo; -+ struct nwd_conn_info *pDConnInfo = NULL; - - unsigned char *srcData = NULL; - unsigned long dataLen = 0, cpylen; -@@ -1156,21 +1169,17 @@ static void GetConnData(struct nwc_get_c - - srcData = (unsigned char *) reply->data; - -- cpylen = -- copy_from_user(&tranAddr, dstData, -- sizeof(tranAddr)); -+ cpylen = copy_from_user(&tranAddr, dstData, sizeof(tranAddr)); - tranAddr.uTransportType = - ((struct tagNwdTranAddrEx *) srcData)->uTransportType; - tranAddr.uAddressLength = - ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; -- -- cpylen = -- copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); -- cpylen = -- copy_to_user(tranAddr.puAddress, -+ if (tranAddr.uAddressLength > MAX_ADDRESS_LENGTH) -+ goto out; -+ cpylen = copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); -+ cpylen = copy_to_user(tranAddr.puAddress, - ((struct tagNwdTranAddrEx *) srcData)->Buffer, -- ((struct tagNwdTranAddrEx *) srcData)-> -- uAddressLength); -+ tranAddr.uAddressLength); - dataLen = 0; - break; - } -@@ -1214,20 +1223,19 @@ static void GetConnData(struct nwc_get_c - break; - } - -- if (srcData && dataLen) { -- cpylen = -- copy_to_user(connInfo->pConnInfo, srcData, -- connInfo->uInfoLength); -+ if (srcData && dataLen && dataLen <= reply->dataLen) { -+ cpylen = copy_to_user(connInfo->pConnInfo, srcData, connInfo->uInfoLength); - } - -+out: - return; - } - - int novfs_get_daemon_ver(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwd_get_reqversion *pDVersion; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwd_get_reqversion *pDVersion = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; - -@@ -1261,17 +1269,18 @@ int novfs_get_daemon_ver(struct novfs_xp - - int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwd_get_pref_ds_tree *pDGetTree; -- struct nwc_get_pref_ds_tree xplatCall, *p; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwd_get_pref_ds_tree *pDGetTree = NULL; -+ struct nwc_get_pref_ds_tree xplatCall, *p = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; -- unsigned char *dPtr; -+ unsigned char *dPtr = NULL; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, - sizeof(struct nwc_get_pref_ds_tree)); -+ if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) -+ return -EINVAL; - datalen = sizeof(*pDGetTree) + xplatCall.uTreeLength; - cmdlen = datalen + sizeof(*cmd); - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -1292,10 +1301,12 @@ int novfs_get_preferred_DS_tree(struct n - if (reply) { - retCode = reply->Reply.ErrorCode; - if (!retCode) { -- pDGetTree = -- (struct nwd_get_pref_ds_tree *) reply->data; -- dPtr = -- reply->data + pDGetTree->DsTreeNameOffset; -+ pDGetTree = (struct nwd_get_pref_ds_tree *) reply->data; -+ if (pDGetTree->DsTreeNameOffset >= reply->dataLen) { -+ retCode = -EINVAL; -+ goto out; -+ } -+ dPtr = reply->data + pDGetTree->DsTreeNameOffset; - p = (struct nwc_get_pref_ds_tree *) pdata->reqData; - - DbgPrint("Reply recieved"); -@@ -1303,14 +1314,17 @@ int novfs_get_preferred_DS_tree(struct n - pDGetTree->uTreeLength); - __DbgPrint(" TreeName = %s\n", dPtr); - -- cpylen = -- copy_to_user(p, &pDGetTree->uTreeLength, 4); -- cpylen = -- copy_to_user(xplatCall.pDsTreeName, dPtr, -- pDGetTree->uTreeLength); -+ if (pDGetTree->uTreeLength > reply->dataLen - pDGetTree->DsTreeNameOffset) { -+ retCode = -EINVAL; -+ goto out; -+ } -+ cpylen = copy_to_user(p, &pDGetTree->uTreeLength, 4); -+ cpylen = copy_to_user(xplatCall.pDsTreeName, dPtr, pDGetTree->uTreeLength); - } -- kfree(reply); - } -+ -+out: -+ kfree(reply); - kfree(cmd); - return (retCode); - -@@ -1318,17 +1332,17 @@ int novfs_get_preferred_DS_tree(struct n - - int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwd_set_pref_ds_tree *pDSetTree; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwd_set_pref_ds_tree *pDSetTree = NULL; - struct nwc_set_pref_ds_tree xplatCall; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; -- unsigned char *dPtr; -+ unsigned char *dPtr = NULL; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, -- sizeof(struct nwc_set_pref_ds_tree)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_pref_ds_tree)); -+ if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) -+ return -EINVAL; - datalen = sizeof(*pDSetTree) + xplatCall.uTreeLength; - cmdlen = datalen + sizeof(*cmd); - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -1346,8 +1360,7 @@ int novfs_set_preferred_DS_tree(struct n - pDSetTree->uTreeLength = xplatCall.uTreeLength; - - dPtr = cmd->data + sizeof(*pDSetTree); -- cpylen = -- copy_from_user(dPtr, xplatCall.pDsTreeName, -+ cpylen = copy_from_user(dPtr, xplatCall.pDsTreeName, - xplatCall.uTreeLength); - - retCode = -@@ -1366,19 +1379,19 @@ int novfs_set_preferred_DS_tree(struct n - int novfs_set_default_ctx(struct novfs_xplat *pdata, - struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_set_def_name_ctx xplatCall; -- struct nwd_set_def_name_ctx * pDSet; -+ struct nwd_set_def_name_ctx * pDSet = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, datalen, replylen, cpylen; -- unsigned char *dPtr; -+ unsigned char *dPtr = NULL; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, - sizeof(struct nwc_set_def_name_ctx)); -- datalen = -- sizeof(*pDSet) + xplatCall.uTreeLength + xplatCall.uNameLength; -+ if (xplatCall.uNameLength > MAX_NAME_LEN || xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) -+ return -EINVAL; -+ datalen = sizeof(*pDSet) + xplatCall.uTreeLength + xplatCall.uNameLength; - cmdlen = datalen + sizeof(*cmd); - cmd = kmalloc(cmdlen, GFP_KERNEL); - -@@ -1388,23 +1401,19 @@ int novfs_set_default_ctx(struct novfs_x - cmd->Command.SequenceNumber = 0; - cmd->Command.SessionId = Session; - cmd->NwcCommand = NWC_SET_DEFAULT_NAME_CONTEXT; -- cmd->dataLen = -- sizeof(struct nwd_set_def_name_ctx) + -- xplatCall.uTreeLength + xplatCall.uNameLength; -+ cmd->dataLen = sizeof(struct nwd_set_def_name_ctx) + xplatCall.uTreeLength + xplatCall.uNameLength; - - pDSet = (struct nwd_set_def_name_ctx *) cmd->data; - dPtr = cmd->data; - - pDSet->TreeOffset = sizeof(struct nwd_set_def_name_ctx); - pDSet->uTreeLength = xplatCall.uTreeLength; -- pDSet->NameContextOffset = -- pDSet->TreeOffset + xplatCall.uTreeLength; -+ pDSet->NameContextOffset = pDSet->TreeOffset + xplatCall.uTreeLength; - pDSet->uNameLength = xplatCall.uNameLength; - - //sgled cpylen = copy_from_user(dPtr+pDSet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); - cpylen = copy_from_user(dPtr + pDSet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled -- cpylen = -- copy_from_user(dPtr + pDSet->NameContextOffset, -+ cpylen = copy_from_user(dPtr + pDSet->NameContextOffset, - xplatCall.pNameContext, - xplatCall.uNameLength); - -@@ -1424,20 +1433,20 @@ int novfs_set_default_ctx(struct novfs_x - int novfs_get_default_ctx(struct novfs_xplat *pdata, - struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_get_def_name_ctx xplatCall; -- struct nwd_get_def_name_ctx * pGet; -- char *dPtr; -+ struct nwd_get_def_name_ctx * pGet = NULL; -+ char *dPtr = NULL; - int retCode = -ENOMEM; - unsigned long cmdlen, replylen, cpylen; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, - sizeof(struct nwc_get_def_name_ctx)); -- cmdlen = -- sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx ) + -- xplatCall.uTreeLength; -+ if (xplatCall.uTreeLength > NW_MAX_TREE_NAME_LEN) -+ return -EINVAL; -+ -+ cmdlen = sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx ) + xplatCall.uTreeLength; - cmd = kmalloc(cmdlen, GFP_KERNEL); - - if (!cmd) -@@ -1512,16 +1521,20 @@ int novfs_query_feature(struct novfs_xpl - int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, - struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwc_get_tree_monitored_conn_ref xplatCall, *p; -- struct nwd_get_tree_monitored_conn_ref *pDConnRef; -- char *dPtr; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwc_get_tree_monitored_conn_ref xplatCall, *p = NULL; -+ struct nwd_get_tree_monitored_conn_ref *pDConnRef = NULL; -+ char *dPtr = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - - cpylen = - copy_from_user(&xplatCall, pdata->reqData, - sizeof(struct nwc_get_tree_monitored_conn_ref)); -+ if (!access_ok(VERIFY_READ, xplatCall.pTreeName, sizeof(struct nwc_string))) -+ return -EINVAL; -+ if (xplatCall.pTreeName->DataLen > NW_MAX_TREE_NAME_LEN) -+ return -EINVAL; - datalen = sizeof(*pDConnRef) + xplatCall.pTreeName->DataLen; - cmdlen = datalen + sizeof(*cmd); - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -1564,12 +1577,12 @@ int novfs_get_tree_monitored_conn(struct - - int novfs_enum_ids(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -- struct nwc_enum_ids xplatCall, *eId; -- struct nwd_enum_ids *pEnum; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; -+ struct nwc_enum_ids xplatCall, *eId = NULL; -+ struct nwd_enum_ids *pEnum = NULL; - struct nwc_string xferStr; -- char *str; -+ char *str = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - - cpylen = -@@ -1629,56 +1642,71 @@ int novfs_enum_ids(struct novfs_xplat *p - cpylen = - copy_from_user(&xferStr, eId->pDomainName, - sizeof(struct nwc_string)); -+ if (pEnum->domainNameOffset >= reply->dataLen) { -+ status = -EINVAL; -+ goto out; -+ } - str = - (char *)((char *)reply->data + - pEnum->domainNameOffset); -- DbgPrint("[XPLAT NWCAPI] Found Domain %s", -- str); -+ DbgPrint("[XPLAT NWCAPI] Found Domain %s", str); -+ if (pEnum->domainNameLen > reply->dataLen - pEnum->domainNameOffset) { -+ status = -EINVAL; -+ goto out; -+ } - cpylen = - copy_to_user(xferStr.pBuffer, str, - pEnum->domainNameLen); - xferStr.DataType = NWC_STRING_TYPE_ASCII; - xferStr.DataLen = pEnum->domainNameLen - 1; -- cpylen = -- copy_to_user(eId->pDomainName, &xferStr, -+ cpylen = copy_to_user(eId->pDomainName, &xferStr, - sizeof(struct nwc_string)); - -- cpylen = -- copy_from_user(&xferStr, eId->pObjectName, -+ cpylen = copy_from_user(&xferStr, eId->pObjectName, - sizeof(struct nwc_string)); -- str = -- (char *)((char *)reply->data + -- pEnum->objectNameOffset); -+ if (pEnum->objectNameOffset >= reply->dataLen) { -+ status = -EINVAL; -+ goto out; -+ } -+ str = (char *)((char *)reply->data + pEnum->objectNameOffset); - DbgPrint("[XPLAT NWCAPI] Found User %s", str); -- cpylen = -- copy_to_user(xferStr.pBuffer, str, -- pEnum->objectNameLen); -+ if (pEnum->objectNameLen > reply->dataLen - pEnum->objectNameOffset) { -+ status = -EINVAL; -+ goto out; -+ } -+ cpylen = copy_to_user(xferStr.pBuffer, str, pEnum->objectNameLen); - xferStr.DataType = NWC_STRING_TYPE_ASCII; - xferStr.DataLen = pEnum->objectNameLen - 1; -- cpylen = -- copy_to_user(eId->pObjectName, &xferStr, -- sizeof(struct nwc_string)); -+ cpylen = copy_to_user(eId->pObjectName, &xferStr, sizeof(struct nwc_string)); - } -- -- kfree(reply); -- - } -+out: -+ kfree(reply); - kfree(cmd); - return (status); - } - - int novfs_change_auth_key(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_change_key xplatCall; -- struct nwd_change_key *pNewKey; -+ struct nwd_change_key *pNewKey = NULL; - struct nwc_string xferStr; -- char *str; -- unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; -+ char *str = NULL; -+ unsigned long status = -ENOMEM, cmdlen = 0, datalen, replylen, cpylen; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_change_key)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_change_key)); -+ if (!access_ok(VERIFY_READ, xplatCall.pDomainName, sizeof(struct nwc_string)) || -+ !access_ok(VERIFY_READ, xplatCall.pObjectName, sizeof(struct nwc_string)) || -+ !access_ok(VERIFY_READ, xplatCall.pNewPassword, sizeof(struct nwc_string)) || -+ !access_ok(VERIFY_READ, xplatCall.pVerifyPassword, sizeof(struct nwc_string))) -+ return -EINVAL; -+ if (xplatCall.pDomainName->DataLen > MAX_DOMAIN_LEN || -+ xplatCall.pObjectName->DataLen > MAX_OBJECT_NAME_LENGTH || -+ xplatCall.pNewPassword->DataLen > MAX_PASSWORD_LENGTH || -+ xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) -+ return -EINVAL; - - datalen = - sizeof(struct nwd_change_key) + xplatCall.pDomainName->DataLen + -@@ -1707,10 +1735,12 @@ int novfs_change_auth_key(struct novfs_x - * Get the tree name - */ - str += sizeof(*pNewKey); -- cpylen = -- copy_from_user(&xferStr, xplatCall.pDomainName, -- sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&xferStr, xplatCall.pDomainName, sizeof(struct nwc_string)); - pNewKey->domainNameOffset = sizeof(*pNewKey); -+ if (xferStr.DataLen > MAX_DOMAIN_LEN) { -+ status = -EINVAL; -+ goto out; -+ } - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->domainNameLen = xferStr.DataLen; - -@@ -1718,11 +1748,12 @@ int novfs_change_auth_key(struct novfs_x - * Get the User Name - */ - str += pNewKey->domainNameLen; -- cpylen = -- copy_from_user(&xferStr, xplatCall.pObjectName, -- sizeof(struct nwc_string)); -- pNewKey->objectNameOffset = -- pNewKey->domainNameOffset + pNewKey->domainNameLen; -+ cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, sizeof(struct nwc_string)); -+ pNewKey->objectNameOffset = pNewKey->domainNameOffset + pNewKey->domainNameLen; -+ if (xferStr.DataLen > MAX_OBJECT_NAME_LENGTH) { -+ status = -EINVAL; -+ goto out; -+ } - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->objectNameLen = xferStr.DataLen; - -@@ -1730,11 +1761,12 @@ int novfs_change_auth_key(struct novfs_x - * Get the New Password - */ - str += pNewKey->objectNameLen; -- cpylen = -- copy_from_user(&xferStr, xplatCall.pNewPassword, -- sizeof(struct nwc_string)); -- pNewKey->newPasswordOffset = -- pNewKey->objectNameOffset + pNewKey->objectNameLen; -+ cpylen = copy_from_user(&xferStr, xplatCall.pNewPassword, sizeof(struct nwc_string)); -+ pNewKey->newPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; -+ if (xferStr.DataLen > MAX_PASSWORD_LENGTH) { -+ status = -EINVAL; -+ goto out; -+ } - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->newPasswordLen = xferStr.DataLen; - -@@ -1742,34 +1774,35 @@ int novfs_change_auth_key(struct novfs_x - * Get the Verify Password - */ - str += pNewKey->newPasswordLen; -- cpylen = -- copy_from_user(&xferStr, xplatCall.pVerifyPassword, -- sizeof(struct nwc_string)); -- pNewKey->verifyPasswordOffset = -- pNewKey->newPasswordOffset + pNewKey->newPasswordLen; -+ cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, sizeof(struct nwc_string)); -+ pNewKey->verifyPasswordOffset = pNewKey->newPasswordOffset + pNewKey->newPasswordLen; -+ if (xferStr.DataLen > MAX_PASSWORD_LENGTH) { -+ status = -EINVAL; -+ goto out; -+ } - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->verifyPasswordLen = xferStr.DataLen; - -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, - (void **)&reply, &replylen, - INTERRUPTIBLE); - if (reply) { - status = reply->Reply.ErrorCode; -- kfree(reply); -+ - } -+out: - memset(cmd, 0, cmdlen); -- -+ kfree(reply); - kfree(cmd); - return (status); - } - - int novfs_set_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_set_primary_conn xplatCall; -- struct nwd_set_primary_conn *pConn; -+ struct nwd_set_primary_conn *pConn = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - - cpylen = -@@ -1805,7 +1838,7 @@ int novfs_set_pri_conn(struct novfs_xpla - int novfs_get_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) - { - struct novfs_xplat_call_request cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_reply *reply = NULL; - unsigned long status = -ENOMEM, cmdlen, replylen, cpylen; - - cmdlen = (unsigned long) (&((struct novfs_xplat_call_request *) 0)->data); -@@ -1837,8 +1870,8 @@ int novfs_get_pri_conn(struct novfs_xpla - int novfs_set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) - { - -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - unsigned long status = 0, datalen, cmdlen, replylen; - struct nwc_map_drive_ex symInfo; - -@@ -1846,6 +1879,8 @@ int novfs_set_map_drive(struct novfs_xpl - cmdlen = sizeof(*cmd); - if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) - return -EFAULT; -+ if (symInfo.dirPathOffsetLength > MAX_OFFSET_LEN || symInfo.linkOffsetLength > MAX_OFFSET_LEN) -+ return -EINVAL; - datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength + - symInfo.linkOffsetLength; - -@@ -1890,14 +1925,16 @@ int novfs_set_map_drive(struct novfs_xpl - - int novfs_unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - unsigned long status = 0, datalen, cmdlen, replylen, cpylen; - struct nwc_unmap_drive_ex symInfo; - - DbgPrint(""); - - cpylen = copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo)); -+ if (symInfo.linkLen > MAX_NAME_LEN) -+ return -EINVAL; - cmdlen = sizeof(*cmd); - datalen = sizeof(symInfo) + symInfo.linkLen; - -@@ -1913,8 +1950,7 @@ int novfs_unmap_drive(struct novfs_xplat - cmd->NwcCommand = NWC_UNMAP_DRIVE; - - cpylen = copy_from_user(cmd->data, pdata->reqData, datalen); -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, - (void **)&reply, &replylen, - INTERRUPTIBLE); - -@@ -1928,11 +1964,11 @@ int novfs_unmap_drive(struct novfs_xplat - - int novfs_enum_drives(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - unsigned long status = 0, cmdlen, replylen, cpylen; - unsigned long offset; -- char *cp; -+ char *cp = NULL; - - DbgPrint(""); - -@@ -1955,37 +1991,36 @@ int novfs_enum_drives(struct novfs_xplat - status = reply->Reply.ErrorCode; - DbgPrint("Status Code = 0x%X", status); - if (!status) { -- offset = -- sizeof(((struct nwc_get_mapped_drives *) pdata-> -+ offset = sizeof(((struct nwc_get_mapped_drives *) pdata-> - repData)->MapBuffLen); - cp = reply->data; -- replylen = -- ((struct nwc_get_mapped_drives *) pdata->repData)-> -- MapBuffLen; -- cpylen = -- copy_to_user(pdata->repData, cp, offset); -+ replylen = ((struct nwc_get_mapped_drives *) pdata->repData)->MapBuffLen; -+ if (offset > reply->dataLen) { -+ status = -EINVAL; -+ goto out; -+ } -+ cpylen = copy_to_user(pdata->repData, cp, offset); - cp += offset; -- cpylen = -- copy_to_user(((struct nwc_get_mapped_drives *) pdata-> -+ cpylen = copy_to_user(((struct nwc_get_mapped_drives *) pdata-> - repData)->MapBuffer, cp, - min(replylen - offset, - reply->dataLen - offset)); - } -- -- kfree(reply); - } -+out: -+ kfree(reply); - kfree(cmd); - return (status); - } - - int novfs_get_bcast_msg(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - unsigned long cmdlen, replylen; - int status = 0x8866, cpylen; - struct nwc_get_bcast_notification msg; -- struct nwd_get_bcast_notification *dmsg; -+ struct nwd_get_bcast_notification *dmsg = NULL; - - cmdlen = sizeof(*cmd) + sizeof(*dmsg); - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -2013,33 +2048,21 @@ int novfs_get_bcast_msg(struct novfs_xpl - if (!status) { - char *cp = pdata->repData; - -- dmsg = -- (struct nwd_get_bcast_notification *) reply->data; -+ dmsg = (struct nwd_get_bcast_notification *) reply->data; - if (pdata->repLen < dmsg->messageLen) { - dmsg->messageLen = pdata->repLen; - } - msg.messageLen = dmsg->messageLen; -- cpylen = -- offsetof(struct -- nwc_get_bcast_notification, -- message); -+ cpylen = offsetof(struct nwc_get_bcast_notification, message); - cp += cpylen; -- cpylen = -- copy_to_user(pdata->repData, &msg, cpylen); -- cpylen = -- copy_to_user(cp, dmsg->message, -- msg.messageLen); -+ cpylen = copy_to_user(pdata->repData, &msg, cpylen); -+ cpylen = copy_to_user(cp, dmsg->message, msg.messageLen); - } else { - msg.messageLen = 0; - msg.message[0] = 0; -- cpylen = offsetof(struct -- nwc_get_bcast_notification, -- message); -- cpylen = -- copy_to_user(pdata->repData, &msg, -- sizeof(msg)); -+ cpylen = offsetof(struct nwc_get_bcast_notification, message); -+ cpylen = copy_to_user(pdata->repData, &msg, sizeof(msg)); - } -- - kfree(reply); - } - kfree(cmd); -@@ -2048,24 +2071,21 @@ int novfs_get_bcast_msg(struct novfs_xpl - - int novfs_set_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_set_key xplatCall; -- struct nwd_set_key *pNewKey; -+ struct nwd_set_key *pNewKey = NULL; - struct nwc_string cstrObjectName, cstrPassword; -- char *str; -+ char *str = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - - cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_key)); -- cpylen = -- copy_from_user(&cstrObjectName, xplatCall.pObjectName, -- sizeof(struct nwc_string)); -- cpylen = -- copy_from_user(&cstrPassword, xplatCall.pNewPassword, -- sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&cstrObjectName, xplatCall.pObjectName, sizeof(struct nwc_string)); -+ cpylen = copy_from_user(&cstrPassword, xplatCall.pNewPassword, sizeof(struct nwc_string)); - -- datalen = -- sizeof(struct nwd_set_key ) + cstrObjectName.DataLen + cstrPassword.DataLen; -+ if (cstrObjectName.DataLen > MAX_OBJECT_NAME_LENGTH || cstrPassword.DataLen > MAX_PASSWORD_LENGTH) -+ return -EINVAL; -+ datalen = sizeof(struct nwd_set_key ) + cstrObjectName.DataLen + cstrPassword.DataLen; - - cmdlen = sizeof(*cmd) + datalen; - cmd = kmalloc(cmdlen, GFP_KERNEL); -@@ -2099,16 +2119,12 @@ int novfs_set_key_value(struct novfs_xpl - /* - * Get the Verify Password - */ -- cpylen = -- copy_from_user(str, cstrPassword.pBuffer, -- cstrPassword.DataLen); -+ cpylen = copy_from_user(str, cstrPassword.pBuffer, cstrPassword.DataLen); - - pNewKey->newPasswordLen = cstrPassword.DataLen; -- pNewKey->newPasswordOffset = -- pNewKey->objectNameOffset + pNewKey->objectNameLen; -+ pNewKey->newPasswordOffset = pNewKey->objectNameOffset + pNewKey->objectNameLen; - -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, - (void **)&reply, &replylen, - INTERRUPTIBLE); - if (reply) { -@@ -2121,16 +2137,22 @@ int novfs_set_key_value(struct novfs_xpl - - int novfs_verify_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session) - { -- struct novfs_xplat_call_request *cmd; -- struct novfs_xplat_call_reply *reply; -+ struct novfs_xplat_call_request *cmd = NULL; -+ struct novfs_xplat_call_reply *reply = NULL; - struct nwc_verify_key xplatCall; -- struct nwd_verify_key *pNewKey; -+ struct nwd_verify_key *pNewKey = NULL; - struct nwc_string xferStr; -- char *str; -+ char *str = NULL; - unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; - -- cpylen = -- copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_verify_key)); -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_verify_key)); -+ -+ if (!access_ok(VERIFY_READ, xplatCall.pDomainName, sizeof(struct nwc_string)) || -+ !access_ok(VERIFY_READ, xplatCall.pVerifyPassword, sizeof(struct nwc_string))) -+ return -EINVAL; -+ if (xplatCall.pDomainName->DataLen > MAX_NAME_LEN || xplatCall.pObjectName->DataLen > MAX_OBJECT_NAME_LENGTH || -+ xplatCall.pVerifyPassword->DataLen > MAX_PASSWORD_LENGTH) -+ return -EINVAL; - - datalen = - sizeof(struct nwd_verify_key) + xplatCall.pDomainName->DataLen + -@@ -2169,11 +2191,9 @@ int novfs_verify_key_value(struct novfs_ - * Get the User Name - */ - str += pNewKey->domainNameLen; -- cpylen = -- copy_from_user(&xferStr, xplatCall.pObjectName, -+ cpylen = copy_from_user(&xferStr, xplatCall.pObjectName, - sizeof(struct nwc_string)); -- pNewKey->objectNameOffset = -- pNewKey->domainNameOffset + pNewKey->domainNameLen; -+ pNewKey->objectNameOffset = pNewKey->domainNameOffset + pNewKey->domainNameLen; - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->objectNameLen = xferStr.DataLen; - -@@ -2181,16 +2201,14 @@ int novfs_verify_key_value(struct novfs_ - * Get the Verify Password - */ - str += pNewKey->objectNameLen; -- cpylen = -- copy_from_user(&xferStr, xplatCall.pVerifyPassword, -+ cpylen = copy_from_user(&xferStr, xplatCall.pVerifyPassword, - sizeof(struct nwc_string)); - pNewKey->verifyPasswordOffset = - pNewKey->objectNameOffset + pNewKey->objectNameLen; - cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); - pNewKey->verifyPasswordLen = xferStr.DataLen; - -- status = -- Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ status = Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, - (void **)&reply, &replylen, - INTERRUPTIBLE); - if (reply) { -@@ -2200,3 +2218,4 @@ int novfs_verify_key_value(struct novfs_ - kfree(cmd); - return (status); - } -+ ---- a/fs/novfs/nwcapi.h -+++ b/fs/novfs/nwcapi.h -@@ -297,6 +297,18 @@ N_EXTERN_LIBRARY(NWRCODE) - #define MAX_ADDRESS_LENGTH 32 - #define MAX_NAME_SERVICE_PROVIDERS 10 - -+#define MAX_NAME_LEN 1024 -+#define MAX_NUM_REPLIES 4096 -+#define MIN_NUM_REPLIES 1 -+#define MAX_NUM_REQUESTS 4096 -+#define MIN_NUM_REQUESTS 1 -+#define MAX_FRAG_SIZE 4096 -+#define MIN_FRAG_SIZE 1 -+#define MAX_INFO_LEN 4096 -+#define MAX_DOMAIN_LEN MAX_NETWORK_NAME_LENGTH -+#define MAX_OFFSET_LEN 4096 -+ -+ - // - // Flags for the GetBroadcastMessage API - // diff --git a/patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir b/patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir deleted file mode 100644 index 799f5ec..0000000 --- a/patches.fixes/novfs-return-ENOTEMPTY-when-deleting-nonempty-dir +++ /dev/null @@ -1,37 +0,0 @@ -From: Sankar P -Subject: fs: novfs: Return ENOTEMPTY when tyring to delete a non-empty folder -References: bnc#583964 -Patch-mainline: no - -The patch returns the ENOTEMPTY error code, when an user issues -delete command on a non-empty folder. This fix makes Nautilus -behave correctly in novfs, just like other file-systems. - -Signed-off-by: Sankar P -Acked-by: Jan Kara ---- - fs/novfs/file.c | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) - ---- a/fs/novfs/file.c -+++ b/fs/novfs/file.c -@@ -1569,11 +1569,16 @@ int novfs_delete(unsigned char * Path, i - if (reply) { - retCode = 0; - if (reply->Reply.ErrorCode) { -- if ((reply->Reply.ErrorCode & 0xFFFF) == 0x0006) { /* Access Denied Error */ -+ -+ /* Refer to the file ncp.c, in xtier's -+ * NCP89_08 Function for various error codes */ -+ -+ if ((reply->Reply.ErrorCode & 0xFFFF) == 0x0006) - retCode = -EACCES; -- } else { -+ else if ((reply->Reply.ErrorCode & 0xFFFF) == 0x0513) -+ retCode = -ENOTEMPTY; -+ else - retCode = -EIO; -- } - } - kfree(reply); - } diff --git a/patches.fixes/novfs-truncate-fix b/patches.fixes/novfs-truncate-fix deleted file mode 100644 index 97a4fbb..0000000 --- a/patches.fixes/novfs-truncate-fix +++ /dev/null @@ -1,58 +0,0 @@ -From: Sankar P -Subject: novfs: Fixes corruption of OO documents on NSS Volumes -References: bnc#508259 -Patch-mainline: no - -OpenOffice documents stored in NSS volumes, when accessed -via NOVFS, get corrupted while using the ftruncate64 call. - -Removed stale code which unsets the size in the setattr call. - -Signed-off-by: Sankar P -Acked-by: Jan Kara - ---- - fs/novfs/inode.c | 25 ------------------------- - 1 file changed, 25 deletions(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -3183,7 +3183,6 @@ int novfs_i_setattr(struct dentry *dentr - unsigned int ia_valid = attr->ia_valid; - struct novfs_schandle session; - int retVal = 0; -- struct iattr mattr; - - if (IS_ROOT(dentry) || /* Root */ - IS_ROOT(dentry->d_parent) || /* User */ -@@ -3241,30 +3240,6 @@ int novfs_i_setattr(struct dentry *dentr - attr->ia_size, - atime_buf, mtime_buf, ctime_buf); - -- if ((attr->ia_valid & ATTR_FILE) -- && (attr->ia_valid & ATTR_SIZE)) { -- memcpy(&mattr, attr, sizeof(mattr)); -- mattr.ia_valid &= -- ~(ATTR_FILE | ATTR_SIZE); -- attr = &mattr; -- ia_valid = attr->ia_valid; --#if 0 // thanks to vfs changes in our tree... -- retVal = -- novfs_trunc_ex(attr-> -- ia_file-> -- private_data, -- attr-> -- ia_size, -- session); -- if (!retVal) { -- inode->i_size = attr->ia_size; -- ((struct inode_data *) inode-> -- i_private)->Flags |= -- UPDATE_INODE; -- } --#endif -- } -- - if (ia_valid - && !(retVal = - novfs_set_attr(path, attr, session))) { diff --git a/patches.fixes/novfs-unlink-oops b/patches.fixes/novfs-unlink-oops deleted file mode 100644 index 453d215..0000000 --- a/patches.fixes/novfs-unlink-oops +++ /dev/null @@ -1,36 +0,0 @@ -From: Sankar P -Subject: novfs: Fix for the issue of kernel dumps core on restart -References: bnc#641811 -Patch-mainline: No - -This patch fixes a bug that cause kernel to dump core on restart, -by rectifying the counter and dentry manipulation code. - -Signed-off-by: Sankar P -Acked-by: Jan Kara - -diff --git a/fs/novfs/daemon.c b/fs/novfs/daemon.c -index f0fd5d6..6e7fb5d 100644 ---- a/fs/novfs/daemon.c -+++ b/fs/novfs/daemon.c -@@ -1857,14 +1857,15 @@ static long local_unlink(const char *pathname) - while (*c != '\0') { - if (*c == '/') - name = ++c; -- c++; -+ else -+ c++; - } - dentry = lookup_one_len(name, nd.path.dentry, strlen(name)); - error = PTR_ERR(dentry); - - if (!IS_ERR(dentry)) { - DbgPrint("dentry %p", dentry); -- if (!(dentry->d_inode->i_mode & S_IFLNK)) { -+ if (!(dentry->d_inode) || !(dentry->d_inode->i_mode & S_IFLNK)) { - DbgPrint("%s not a link", name); - error = -ENOENT; - goto exit1; --- -1.7.1 - diff --git a/patches.fixes/novfs-xattr-errcode-cleanup b/patches.fixes/novfs-xattr-errcode-cleanup deleted file mode 100644 index 9b0a890..0000000 --- a/patches.fixes/novfs-xattr-errcode-cleanup +++ /dev/null @@ -1,40 +0,0 @@ -From: Sankar P -Subject: [PATCH] novfs: code cleanup for one case of novfs return value -References: bnc#624606 -Patch-mainline: No - -Code cleanup. One error case returns a positive error value. -Changed it to return in negative. - -Signed-off-by: Sankar P -Acked-by: Jan Kara ---- - fs/novfs/file.c | 10 ++++------ - 1 file changed, 4 insertions(+), 6 deletions(-) - ---- a/fs/novfs/file.c -+++ b/fs/novfs/file.c -@@ -280,9 +280,8 @@ int novfs_getx_file_info(char *Path, con - DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i", - Path, pathlen, Name, namelen); - -- if (namelen > MAX_XATTR_NAME_LEN) { -- return ENOATTR; -- } -+ if (namelen > MAX_XATTR_NAME_LEN) -+ return -ENOATTR; - - cmdlen = offsetof(struct novfs_xa_get_request, data) + pathlen + 1 + namelen + 1; // two '\0' - cmd = (struct novfs_xa_get_request *) kmalloc(cmdlen, GFP_KERNEL); -@@ -375,9 +374,8 @@ int novfs_setx_file_info(char *Path, con - DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i, " - "value len = %u", Path, pathlen, Name, namelen, valueLen); - -- if (namelen > MAX_XATTR_NAME_LEN) { -- return ENOATTR; -- } -+ if (namelen > MAX_XATTR_NAME_LEN) -+ return -ENOATTR; - - cmdlen = offsetof(struct novfs_xa_set_request, data) + pathlen + 1 + namelen + 1 + valueLen; - cmd = (struct novfs_xa_set_request *) kmalloc(cmdlen, GFP_KERNEL); diff --git a/patches.fixes/novfs-xattr-errcode-cleanup2 b/patches.fixes/novfs-xattr-errcode-cleanup2 deleted file mode 100644 index 3f2031f..0000000 --- a/patches.fixes/novfs-xattr-errcode-cleanup2 +++ /dev/null @@ -1,32 +0,0 @@ -From: Sankar P -Subject: [PATCH] novfs: Fix error codes for getxattr for novfs -Patch-mainline: No -References: bnc#529535 - -getxattr function should not use harsh error codes like ENOENT -if the named attribute cannot be obtained. This fixes makes -novfs return ENOATTR as suggested by the man page. - -Signed-off-by: Sankar P -Acked-by: Jan Kara ---- - fs/novfs/file.c | 8 +------- - 1 file changed, 1 insertion(+), 7 deletions(-) - ---- a/fs/novfs/file.c -+++ b/fs/novfs/file.c -@@ -319,13 +319,7 @@ int novfs_getx_file_info(char *Path, con - reply->Reply.ErrorCode); - DbgPrint("xattr: replylen=%d", replylen); - -- //0xC9 = EA not found (C9), 0xD1 = EA access denied -- if ((reply->Reply.ErrorCode == 0xC9) -- || (reply->Reply.ErrorCode == 0xD1)) { -- retCode = -ENOATTR; -- } else { -- retCode = -ENOENT; -- } -+ retCode = -ENOATTR; - } else { - - *dataLen = diff --git a/patches.fixes/novfs-xattr-memleak b/patches.fixes/novfs-xattr-memleak deleted file mode 100644 index 49affbe..0000000 --- a/patches.fixes/novfs-xattr-memleak +++ /dev/null @@ -1,30 +0,0 @@ -From: Sankar P -Subject: [PATCH] novfs: Fix a memory leak that causes an OOM condition -References: bnc#610828 -Patch-mainline: No - -The patch fixes a serious memory leak issue, that causes -machine go to OOM condition that makes it unusable. - -Signed-off-by: Sankar P -Acked-by: Jan Kara - ---- - fs/novfs/inode.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -3421,11 +3421,8 @@ ssize_t novfs_i_getxattr(struct dentry * - retxcode = -ERANGE; - } - } -- -- if (bufRead) { -- kfree(bufRead); -- } - } -+ kfree(bufRead); - } - } - kfree(buf); diff --git a/patches.fixes/oom-warning b/patches.fixes/oom-warning deleted file mode 100644 index b2f7134..0000000 --- a/patches.fixes/oom-warning +++ /dev/null @@ -1,30 +0,0 @@ -From: Andrea Arcangeli -Subject: Tell the end user they should not worry about GFP_ATOMIC failures -Patch-mainline: no -References: SUSE48965 - -x - -Signed-off-by: Andrea Arcangeli - ---- - mm/page_alloc.c | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -2087,7 +2087,13 @@ rebalance: - - nopage: - if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { -- printk(KERN_WARNING "%s: page allocation failure." -+ if (!wait) { -+ printk(KERN_INFO "The following is only an harmless informational message.\n"); -+ printk(KERN_INFO "Unless you get a _continuous_flood_ of these messages it means\n"); -+ printk(KERN_INFO "everything is working fine. Allocations from irqs cannot be\n"); -+ printk(KERN_INFO "perfectly reliable and the kernel is designed to handle that.\n"); -+ } -+ printk(KERN_INFO "%s: page allocation failure." - " order:%d, mode:0x%x\n", - current->comm, order, gfp_mask); - dump_stack(); diff --git a/patches.fixes/oprofile_bios_ctr.patch b/patches.fixes/oprofile_bios_ctr.patch deleted file mode 100644 index 4debe02..0000000 --- a/patches.fixes/oprofile_bios_ctr.patch +++ /dev/null @@ -1,103 +0,0 @@ -From: Naga Chumbalkar -Subject: detect oprofile counters reserved by bios -References: FATE#307426 -Patch-mainline: pending -Signed-off-by: Tony Jones - -Currently, oprofile fails silently on platforms where a non-OS entity such as -the system firmware "enables" and uses a performance counter. The patch below -suggests a workaround to the user in such instances. - -The patch below has been well tested on AMD and Intel based platforms. It -improves on a previous version via better macro usage, use of rdmsrl and more -accurate/informative error messages. - -Signed-off-by: Naga Chumbalkar -Tested-by: Shashi Belur - ---- ---- - arch/x86/oprofile/nmi_int.c | 58 ++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 58 insertions(+) - ---- a/arch/x86/oprofile/nmi_int.c -+++ b/arch/x86/oprofile/nmi_int.c -@@ -27,6 +27,19 @@ - #include "op_counter.h" - #include "op_x86_model.h" - -+static const char RSVD_MSG[] = -+ KERN_INFO "oprofile: performance counter #%d may already be" -+ " reserved." -+ " For counter #%d, EvntSel 0x%lx has value: 0x%llx.\n"; -+static const char PMC_MSG[] = -+ KERN_INFO "oprofile: if oprofile doesn't collect data, then" -+ " try using a different performance counter on your platform" -+ " to monitor the desired event." -+ " Delete counter #%d from the desired event by editing the" -+ " /usr/share/oprofile/%s//events file." -+ " If the event cannot be monitored by any other counter," -+ " contact your hardware or BIOS vendor.\n"; -+ - static struct op_x86_model_spec *model; - static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); - static DEFINE_PER_CPU(unsigned long, saved_lvtpc); -@@ -466,6 +479,50 @@ static struct notifier_block oprofile_cp - .notifier_call = oprofile_cpu_notifier - }; - -+#define P4_CCCR_ENABLE (1 << 12) -+ -+/* check if the counter/evtsel is already enabled, say, by firmware */ -+static void nmi_is_counter_enabled(struct op_msrs * const msrs) -+{ -+ __u8 vendor = boot_cpu_data.x86_vendor; -+ __u8 family = boot_cpu_data.x86; -+ u64 val; -+ unsigned int i; -+ char *arch = "arch"; -+ -+ /* Fill in at least the "arch" value to help the user */ -+ if (vendor == X86_VENDOR_AMD) { -+ if (family == 6) -+ arch = "i386"; -+ else -+ arch = "x86-64"; -+ } else if (vendor == X86_VENDOR_INTEL) { -+ arch = "i386"; -+ } -+ -+ for (i = 0; i < model->num_controls; ++i) { -+ if (!counter_config[i].enabled) -+ continue; -+ rdmsrl(msrs->controls[i].addr, val); -+ -+ /* P4 is special. Other Intel, and all AMD CPUs -+ ** are consistent in using "bit 22" as "enable" -+ */ -+ if ((vendor == X86_VENDOR_INTEL) && (family == 0xf)) { -+ if (val & P4_CCCR_ENABLE) -+ goto err_rsvd; -+ } else if (val & ARCH_PERFMON_EVENTSEL_ENABLE) { -+ goto err_rsvd; -+ } -+ } -+ return; -+ -+err_rsvd: -+ printk(RSVD_MSG, i, i, msrs->controls[i].addr, val); -+ printk(PMC_MSG, i, arch); -+ return; -+} -+ - static int nmi_setup(void) - { - int err = 0; -@@ -483,6 +540,7 @@ static int nmi_setup(void) - if (err) - goto fail; - -+ nmi_is_counter_enabled(&per_cpu(cpu_msrs, 0)); - for_each_possible_cpu(cpu) { - if (!cpu) - continue; diff --git a/patches.fixes/parport-mutex b/patches.fixes/parport-mutex deleted file mode 100644 index 5b82478..0000000 --- a/patches.fixes/parport-mutex +++ /dev/null @@ -1,42 +0,0 @@ -From: salina@us.ibm.com -Subject: No lp_release_parport while write is going on -References: bnc#62947 - LTC11483 -Patch-mainline: not yet - -This patch was done by IBM a while back, but apparently never made it -into mainline. It fixes a problem in the lp driver that can cause oopses. - -Scenario: - process A: calls lp_write, which in turn calls parport_ieee1284_write_compat, - and that invokes parport_wait_peripheral - process B: meanwhile does an ioctl(LPGETSTATUS), which call lp_release_parport - when done. This function will set physport->cad = NULL. - process A: parport_wait_peripheral tries to dereference physport->cad and - dies - -The patch below simply protects that code with the port_mutex in order to -protect against simultaneous calls to lp_read/lp_write. - -Similar protection is probably required for ioctl(LPRESET). - -Signed-off-by: okir@suse.de - ---- - drivers/char/lp.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/char/lp.c -+++ b/drivers/char/lp.c -@@ -622,9 +622,12 @@ static int lp_do_ioctl(unsigned int mino - return -EFAULT; - break; - case LPGETSTATUS: -+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) -+ return -EINTR; - lp_claim_parport_or_block (&lp_table[minor]); - status = r_str(minor); - lp_release_parport (&lp_table[minor]); -+ mutex_unlock(&lp_table[minor].port_mutex); - - if (copy_to_user(argp, &status, sizeof(int))) - return -EFAULT; diff --git a/patches.fixes/proc-scsi-scsi-fix.diff b/patches.fixes/proc-scsi-scsi-fix.diff deleted file mode 100644 index 55b7d37..0000000 --- a/patches.fixes/proc-scsi-scsi-fix.diff +++ /dev/null @@ -1,110 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] scsi: iterate over devices individually for /proc/scsi/scsi -References: 263731 -Patch-mainline: Probably never, hch wants to kill /proc/scsi/scsi anyway. - - On systems with very large numbers (> 1600 or so) of SCSI devices, - cat /proc/scsi/scsi ends up failing with -ENOMEM. This is due to - the show routine simply iterating over all of the devices with - bus_for_each_dev(), and trying to dump all of them into the buffer - at the same time. On my test system (using scsi_debug with 4064 devices), - the output ends up being ~ 632k, far more than kmalloc will typically allow. - - This patch defines its own seq_file opreations to iterate over the scsi - devices.The result is that each show() operation only dumps ~ 180 bytes - into the buffer at a time so we don't run out of memory. - - If the "Attached devices" header isn't required, we can dump the - sfile->private bit completely. - -Signed-off-by: Jeff Mahoney - ---- - - drivers/scsi/scsi_proc.c | 58 ++++++++++++++++++++++++++++++++++++++++++----- - 1 file changed, 52 insertions(+), 6 deletions(-) - ---- a/drivers/scsi/scsi_proc.c -+++ b/drivers/scsi/scsi_proc.c -@@ -386,13 +386,59 @@ static ssize_t proc_scsi_write(struct fi - * @s: output goes here - * @p: not used - */ --static int proc_scsi_show(struct seq_file *s, void *p) -+static int always_match(struct device *dev, void *data) - { -- seq_printf(s, "Attached devices:\n"); -- bus_for_each_dev(&scsi_bus_type, NULL, s, proc_print_scsidevice); -- return 0; -+ return 1; - } - -+static inline struct device *next_scsi_device(struct device *start) -+{ -+ struct device *next = bus_find_device(&scsi_bus_type, start, NULL, -+ always_match); -+ put_device(start); -+ return next; -+} -+ -+static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos) -+{ -+ struct device *dev = NULL; -+ loff_t n = *pos; -+ -+ while ((dev = next_scsi_device(dev))) { -+ if (!n--) -+ break; -+ sfile->private++; -+ } -+ return dev; -+} -+ -+static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos) -+{ -+ (*pos)++; -+ sfile->private++; -+ return next_scsi_device(v); -+} -+ -+static void scsi_seq_stop(struct seq_file *sfile, void *v) -+{ -+ put_device(v); -+} -+ -+static int scsi_seq_show(struct seq_file *sfile, void *dev) -+{ -+ if (!sfile->private) -+ seq_puts(sfile, "Attached devices:\n"); -+ -+ return proc_print_scsidevice(dev, sfile); -+} -+ -+static struct seq_operations scsi_seq_ops = { -+ .start = scsi_seq_start, -+ .next = scsi_seq_next, -+ .stop = scsi_seq_stop, -+ .show = scsi_seq_show -+}; -+ - /** - * proc_scsi_open - glue function - * @inode: not used -@@ -406,7 +452,7 @@ static int proc_scsi_open(struct inode * - * We don't really need this for the write case but it doesn't - * harm either. - */ -- return single_open(file, proc_scsi_show, NULL); -+ return seq_open(file, &scsi_seq_ops); - } - - static const struct file_operations proc_scsi_operations = { -@@ -415,7 +461,7 @@ static const struct file_operations proc - .read = seq_read, - .write = proc_scsi_write, - .llseek = seq_lseek, -- .release = single_release, -+ .release = seq_release, - }; - - /** diff --git a/patches.fixes/ptrace-getsiginfo b/patches.fixes/ptrace-getsiginfo deleted file mode 100644 index 92a953a..0000000 --- a/patches.fixes/ptrace-getsiginfo +++ /dev/null @@ -1,79 +0,0 @@ -From: Andreas Schwab -Subject: Add compat handler for PTRACE_GETSIGINFO -Patch-mainline: not yet - -Current versions of gdb require a working implementation of -PTRACE_GETSIGINFO for proper watchpoint support. Since struct siginfo -contains pointers it must be converted when passed to a 32-bit debugger. - -Signed-off-by: Andreas Schwab ---- - arch/powerpc/kernel/ppc32.h | 2 ++ - arch/powerpc/kernel/ptrace32.c | 27 +++++++++++++++++++++++++++ - 2 files changed, 29 insertions(+) - ---- a/arch/powerpc/kernel/ppc32.h -+++ b/arch/powerpc/kernel/ppc32.h -@@ -136,4 +136,6 @@ struct ucontext32 { - struct mcontext32 uc_mcontext; - }; - -+extern int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s); -+ - #endif /* _PPC64_PPC32_H */ ---- a/arch/powerpc/kernel/ptrace32.c -+++ b/arch/powerpc/kernel/ptrace32.c -@@ -28,12 +28,15 @@ - #include - #include - #include -+#include - - #include - #include - #include - #include - -+#include "ppc32.h" -+ - /* - * does not yet catch signals sent when the child dies. - * in exit.c or in signal.c. -@@ -69,6 +72,27 @@ static long compat_ptrace_old(struct tas - #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) - #define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0)) - -+static int compat_ptrace_getsiginfo(struct task_struct *child, compat_siginfo_t __user *data) -+{ -+ siginfo_t lastinfo; -+ int error = -ESRCH; -+ -+ read_lock(&tasklist_lock); -+ if (likely(child->sighand != NULL)) { -+ error = -EINVAL; -+ spin_lock_irq(&child->sighand->siglock); -+ if (likely(child->last_siginfo != NULL)) { -+ lastinfo = *child->last_siginfo; -+ error = 0; -+ } -+ spin_unlock_irq(&child->sighand->siglock); -+ } -+ read_unlock(&tasklist_lock); -+ if (!error) -+ return copy_siginfo_to_user32(data, &lastinfo); -+ return error; -+} -+ - long compat_arch_ptrace(struct task_struct *child, compat_long_t request, - compat_ulong_t caddr, compat_ulong_t cdata) - { -@@ -296,6 +320,9 @@ long compat_arch_ptrace(struct task_stru - 0, PT_REGS_COUNT * sizeof(compat_long_t), - compat_ptr(data)); - -+ case PTRACE_GETSIGINFO: -+ return compat_ptrace_getsiginfo(child, compat_ptr(data)); -+ - case PTRACE_GETFPREGS: - case PTRACE_SETFPREGS: - case PTRACE_GETVRREGS: diff --git a/patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash b/patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash deleted file mode 100644 index f12f9bf..0000000 --- a/patches.fixes/reiserfs-force-inode-evictions-before-umount-to-avoid-crash +++ /dev/null @@ -1,60 +0,0 @@ -From: Jeff Mahoney -Subject: reiserfs: Force inode evictions before umount to avoid crash -References: bnc#610598 bnc#680073 bnc#684112 -Patch-mainline: Submitted to reiserfs-devel Apr 6 2011 - - This patch fixes a crash in reiserfs_delete_xattrs during umount. - - When shrink_dcache_for_umount clears the dcache from - generic_shutdown_super, delayed evictions are forced to disk. If an - evicted inode has extended attributes associated with it, it will - need to walk the xattr tree to locate and remove them. - - But since shrink_dcache_for_umount will BUG if it encounters active - dentries, the xattr tree must be released before it's called or it will - crash during every umount. - - This patch forces the evictions to occur before generic_shutdown_super - by calling shrink_dcache_sb first. The additional evictions caused - by the removal of each associated xattr file and dir will be automatically - handled as they're added to the LRU list. - -Signed-off-by: Jeff Mahoney -Acked-by: Jeff Mahoney ---- - fs/reiserfs/super.c | 24 ++++++++++++++---------- - 1 file changed, 14 insertions(+), 10 deletions(-) - ---- a/fs/reiserfs/super.c -+++ b/fs/reiserfs/super.c -@@ -453,16 +453,20 @@ int remove_save_link(struct inode *inode - static void reiserfs_kill_sb(struct super_block *s) - { - if (REISERFS_SB(s)) { -- if (REISERFS_SB(s)->xattr_root) { -- d_invalidate(REISERFS_SB(s)->xattr_root); -- dput(REISERFS_SB(s)->xattr_root); -- REISERFS_SB(s)->xattr_root = NULL; -- } -- if (REISERFS_SB(s)->priv_root) { -- d_invalidate(REISERFS_SB(s)->priv_root); -- dput(REISERFS_SB(s)->priv_root); -- REISERFS_SB(s)->priv_root = NULL; -- } -+ /* -+ * Force any pending inode evictions to occur now. Any -+ * inodes to be removed that have extended attributes -+ * associated with them need to clean them up before -+ * we can release the extended attribute root dentries. -+ * shrink_dcache_for_umount will BUG if we don't release -+ * those before it's called so ->put_super is too late. -+ */ -+ shrink_dcache_sb(s); -+ -+ dput(REISERFS_SB(s)->xattr_root); -+ REISERFS_SB(s)->xattr_root = NULL; -+ dput(REISERFS_SB(s)->priv_root); -+ REISERFS_SB(s)->priv_root = NULL; - } - - kill_block_super(s); diff --git a/patches.fixes/reiserfs-remove-2-tb-file-size-limit b/patches.fixes/reiserfs-remove-2-tb-file-size-limit deleted file mode 100644 index 5f5bc6f..0000000 --- a/patches.fixes/reiserfs-remove-2-tb-file-size-limit +++ /dev/null @@ -1,66 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] reiserfs: Remove 2 TB file size limit -References: bnc#592100 -Patch-mainline: Submitted 30 Mar 2010 - - In its early life, reiserfs had an evolving s_max_bytes. It started out - at 4 GB, then was raised to MAX_LFS_FILESIZE, then dropped to 2 TiB when - it was observed that struct stat only had a 32-bit st_blocks field. - - Since then, both the kernel and glibc have evolved as well and now both - support 64-bit st_blocks. Applications that can't deal with these ranges - are assumed to be "legacy" or "broken." File systems now routinely - support file sizes much larger than can be represented by 2^32 * 512. - - But we never revisited that limitation. ReiserFS has always been able to - support larger file sizes (up to 16 TiB, in fact), but the s_max_bytes - limitation has prevented that. - - This patch adds a max_file_offset helper to set s_max_bytes to a more - appropriate value. I noticed that XFS adjusts the limit based on the - CPU but I'd prefer to err on the side of compatibility and place the - limit at the smaller of the 32-bit MAX_LFS_FILESIZE and the maximum - supported by the file system. At a 4k block size, this is conveniently - also the advertised maximum file size of reiserfs. - - This bug is tracked at: https://bugzilla.novell.com/show_bug.cgi?id=592100 - -Signed-off-by: Jeff Mahoney -Acked-by: Jeff Mahoney ---- - fs/reiserfs/super.c | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - ---- a/fs/reiserfs/super.c -+++ b/fs/reiserfs/super.c -@@ -1322,6 +1322,18 @@ out_err: - return err; - } - -+static inline loff_t -+reiserfs_max_file_offset(struct super_block *sb) -+{ -+ /* Limited by stat_data->sd_blocks, 2^32-1 blocks */ -+ loff_t fs_max = ((u64)sb->s_blocksize << 32) - sb->s_blocksize; -+ -+ /* Limited by 32-bit MAX_LFS_FILESIZE */ -+ loff_t page_cache_max = (((u64)PAGE_CACHE_SIZE << 31)-1); -+ -+ return min(fs_max, page_cache_max); -+} -+ - static int read_super_block(struct super_block *s, int offset) - { - struct buffer_head *bh; -@@ -1411,10 +1423,7 @@ static int read_super_block(struct super - s->dq_op = &reiserfs_quota_operations; - #endif - -- /* new format is limited by the 32 bit wide i_blocks field, want to -- ** be one full block below that. -- */ -- s->s_maxbytes = (512LL << 32) - s->s_blocksize; -+ s->s_maxbytes = reiserfs_max_file_offset(s); - return 0; - } - diff --git a/patches.fixes/remount-no-shrink-dcache b/patches.fixes/remount-no-shrink-dcache deleted file mode 100644 index 87d3925..0000000 --- a/patches.fixes/remount-no-shrink-dcache +++ /dev/null @@ -1,89 +0,0 @@ -From: Olaf Kirch -Subject: Do not call shrink_dcache_sb when remounting procfs etc -Patch-mainline: Not yet -References: 165672 -Patch-mainline: not yet - -Avoid calls to shrink_dcache_sb when mounting a file system that -uses get_sb_single. shrink_dcache_sb is costly. On large ia64 -systems, this will keep the dcache lock for > 60 seconds at -a stretch. - -Signed-off-by: Olaf Kirch - - fs/super.c | 36 +++++++++++++++++++++++------------- - 1 file changed, 23 insertions(+), 13 deletions(-) - ---- a/fs/super.c -+++ b/fs/super.c -@@ -521,16 +521,10 @@ rescan: - return NULL; - } - --/** -- * do_remount_sb - asks filesystem to change mount options. -- * @sb: superblock in question -- * @flags: numeric part of options -- * @data: the rest of options -- * @force: whether or not to force the change -- * -- * Alters the mount options of a mounted file system. -- */ --int do_remount_sb(struct super_block *sb, int flags, void *data, int force) -+#define REMOUNT_FORCE 1 -+#define REMOUNT_SHRINK_DCACHE 2 -+ -+static int __do_remount_sb(struct super_block *sb, int flags, void *data, int rflags) - { - int retval; - int remount_ro; -@@ -545,7 +539,8 @@ int do_remount_sb(struct super_block *sb - - if (flags & MS_RDONLY) - acct_auto_close(sb); -- shrink_dcache_sb(sb); -+ if (rflags & REMOUNT_SHRINK_DCACHE) -+ shrink_dcache_sb(sb); - sync_filesystem(sb); - - remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); -@@ -553,7 +548,7 @@ int do_remount_sb(struct super_block *sb - /* If we are remounting RDONLY and current sb is read/write, - make sure there are no rw files opened */ - if (remount_ro) { -- if (force) -+ if (rflags & REMOUNT_FORCE) - mark_files_ro(sb); - else if (!fs_may_remount_ro(sb)) - return -EBUSY; -@@ -579,6 +574,21 @@ int do_remount_sb(struct super_block *sb - return 0; - } - -+/** -+ * do_remount_sb - asks filesystem to change mount options. -+ * @sb: superblock in question -+ * @flags: numeric part of options -+ * @data: the rest of options -+ * @force: whether or not to force the change -+ * -+ * Alters the mount options of a mounted file system. -+ */ -+int do_remount_sb(struct super_block *sb, int flags, void *data, int force) -+{ -+ return __do_remount_sb(sb, flags, data, -+ REMOUNT_SHRINK_DCACHE|(force? REMOUNT_FORCE : 0)); -+} -+ - static void do_emergency_remount(struct work_struct *work) - { - struct super_block *sb, *p = NULL; -@@ -888,7 +898,7 @@ int get_sb_single(struct file_system_typ - } - s->s_flags |= MS_ACTIVE; - } else { -- do_remount_sb(s, flags, data, 0); -+ __do_remount_sb(s, flags, data, 0); - } - return dget(s->s_root); - } diff --git a/patches.fixes/scsi-add-tgps-setting b/patches.fixes/scsi-add-tgps-setting deleted file mode 100644 index 5e16c4a..0000000 --- a/patches.fixes/scsi-add-tgps-setting +++ /dev/null @@ -1,325 +0,0 @@ -Subject: Add TGPS setting to scsi devices -From: Hannes Reinecke -Patch-mainline: Not yet - -Some multipath-capable storage arrays are capable of running -in compatible mode, ie supporting both the original vendor-specific -failover mode and the SPC-3 compliant ALUA mode. -This patch stores the TGPS setting in the sdev so that we can directly -match onto it and select the correct device handler automatically. -And we can save code in the ALUA device handler. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/device_handler/scsi_dh.c | 9 ++- - drivers/scsi/device_handler/scsi_dh_alua.c | 70 +++------------------------- - drivers/scsi/device_handler/scsi_dh_emc.c | 8 +-- - drivers/scsi/device_handler/scsi_dh_hp_sw.c | 10 ++-- - drivers/scsi/device_handler/scsi_dh_rdac.c | 58 +++++++++++------------ - drivers/scsi/scsi_scan.c | 1 - drivers/scsi/scsi_sysfs.c | 2 - include/scsi/scsi_device.h | 4 + - 8 files changed, 60 insertions(+), 102 deletions(-) - ---- a/drivers/scsi/device_handler/scsi_dh.c -+++ b/drivers/scsi/device_handler/scsi_dh.c -@@ -29,6 +29,7 @@ struct scsi_dh_devinfo_list { - struct list_head node; - char vendor[9]; - char model[17]; -+ char tgps; - struct scsi_device_handler *handler; - }; - -@@ -61,7 +62,8 @@ scsi_dh_cache_lookup(struct scsi_device - spin_lock(&list_lock); - list_for_each_entry(tmp, &scsi_dh_dev_list, node) { - if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) && -- !strncmp(sdev->model, tmp->model, strlen(tmp->model))) { -+ !strncmp(sdev->model, tmp->model, strlen(tmp->model)) && -+ (!tmp->tgps || (sdev->tgps & tmp->tgps) != 0)) { - found_dh = tmp->handler; - break; - } -@@ -80,7 +82,9 @@ static int scsi_dh_handler_lookup(struct - if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor, - strlen(scsi_dh->devlist[i].vendor)) && - !strncmp(sdev->model, scsi_dh->devlist[i].model, -- strlen(scsi_dh->devlist[i].model))) { -+ strlen(scsi_dh->devlist[i].model)) && -+ (!scsi_dh->devlist[i].tgps || -+ (sdev->tgps & scsi_dh->devlist[i].tgps) != 0)) { - found = 1; - break; - } -@@ -129,6 +133,7 @@ device_handler_match(struct scsi_device_ - strncpy(tmp->model, sdev->model, 16); - tmp->vendor[8] = '\0'; - tmp->model[16] = '\0'; -+ tmp->tgps = sdev->tgps; - tmp->handler = found_dh; - spin_lock(&list_lock); - list_add(&tmp->node, &scsi_dh_dev_list); ---- a/drivers/scsi/device_handler/scsi_dh_alua.c -+++ b/drivers/scsi/device_handler/scsi_dh_alua.c -@@ -125,43 +125,6 @@ static struct request *get_alua_req(stru - } - - /* -- * submit_std_inquiry - Issue a standard INQUIRY command -- * @sdev: sdev the command should be send to -- */ --static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) --{ -- struct request *rq; -- int err = SCSI_DH_RES_TEMP_UNAVAIL; -- -- rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ); -- if (!rq) -- goto done; -- -- /* Prepare the command. */ -- rq->cmd[0] = INQUIRY; -- rq->cmd[1] = 0; -- rq->cmd[2] = 0; -- rq->cmd[4] = ALUA_INQUIRY_SIZE; -- rq->cmd_len = COMMAND_SIZE(INQUIRY); -- -- rq->sense = h->sense; -- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); -- rq->sense_len = h->senselen = 0; -- -- err = blk_execute_rq(rq->q, NULL, rq, 1); -- if (err == -EIO) { -- sdev_printk(KERN_INFO, sdev, -- "%s: std inquiry failed with %x\n", -- ALUA_DH_NAME, rq->errors); -- h->senselen = rq->sense_len; -- err = SCSI_DH_IO; -- } -- blk_put_request(rq); --done: -- return err; --} -- --/* - * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command - * @sdev: sdev the command should be sent to - */ -@@ -333,23 +296,19 @@ static unsigned submit_stpg(struct alua_ - } - - /* -- * alua_std_inquiry - Evaluate standard INQUIRY command -+ * alua_check_tgps - Evaluate TGPS setting - * @sdev: device to be checked - * -- * Just extract the TPGS setting to find out if ALUA -+ * Just examine the TPGS setting of the device to find out if ALUA - * is supported. - */ --static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) -+static int alua_check_tgps(struct scsi_device *sdev, struct alua_dh_data *h) - { -- int err; -- -- err = submit_std_inquiry(sdev, h); -- -- if (err != SCSI_DH_OK) -- return err; -+ int err = SCSI_DH_OK; - - /* Check TPGS setting */ -- h->tpgs = (h->inq[5] >> 4) & 0x3; -+ h->tpgs = sdev->tgps; -+ - switch (h->tpgs) { - case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: - sdev_printk(KERN_INFO, sdev, -@@ -611,7 +570,7 @@ static int alua_initialize(struct scsi_d - { - int err; - -- err = alua_std_inquiry(sdev, h); -+ err = alua_check_tgps(sdev, h); - if (err != SCSI_DH_OK) - goto out; - -@@ -685,19 +644,8 @@ static int alua_prep_fn(struct scsi_devi - } - - static const struct scsi_dh_devlist alua_dev_list[] = { -- {"HP", "MSA VOLUME" }, -- {"HP", "HSV101" }, -- {"HP", "HSV111" }, -- {"HP", "HSV200" }, -- {"HP", "HSV210" }, -- {"HP", "HSV300" }, -- {"IBM", "2107900" }, -- {"IBM", "2145" }, -- {"Pillar", "Axiom" }, -- {"Intel", "Multi-Flex"}, -- {"NETAPP", "LUN"}, -- {"AIX", "NVDISK"}, -- {NULL, NULL} -+ {"", "", 3 }, -+ {NULL, NULL, 0} - }; - - static int alua_bus_attach(struct scsi_device *sdev); ---- a/drivers/scsi/device_handler/scsi_dh_emc.c -+++ b/drivers/scsi/device_handler/scsi_dh_emc.c -@@ -622,10 +622,10 @@ done: - } - - static const struct scsi_dh_devlist clariion_dev_list[] = { -- {"DGC", "RAID"}, -- {"DGC", "DISK"}, -- {"DGC", "VRAID"}, -- {NULL, NULL}, -+ {"DGC", "RAID", 0}, -+ {"DGC", "DISK", 0}, -+ {"DGC", "VRAID", 0}, -+ {NULL, NULL, 0}, - }; - - static int clariion_bus_attach(struct scsi_device *sdev); ---- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c -+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c -@@ -311,11 +311,11 @@ static int hp_sw_activate(struct scsi_de - } - - static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { -- {"COMPAQ", "MSA1000 VOLUME"}, -- {"COMPAQ", "HSV110"}, -- {"HP", "HSV100"}, -- {"DEC", "HSG80"}, -- {NULL, NULL}, -+ {"COMPAQ", "MSA1000 VOLUME", 0}, -+ {"COMPAQ", "HSV110", 0}, -+ {"HP", "HSV100", 0}, -+ {"DEC", "HSG80", 0}, -+ {NULL, NULL, 0}, - }; - - static int hp_sw_bus_attach(struct scsi_device *sdev); ---- a/drivers/scsi/device_handler/scsi_dh_rdac.c -+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c -@@ -745,37 +745,37 @@ static int rdac_check_sense(struct scsi_ - } - - static const struct scsi_dh_devlist rdac_dev_list[] = { -- {"IBM", "1722"}, -- {"IBM", "1724"}, -- {"IBM", "1726"}, -- {"IBM", "1742"}, -- {"IBM", "1745"}, -- {"IBM", "1746"}, -- {"IBM", "1814"}, -- {"IBM", "1815"}, -- {"IBM", "1818"}, -- {"IBM", "3526"}, -- {"SGI", "TP9400"}, -- {"SGI", "TP9500"}, -- {"SGI", "IS"}, -- {"STK", "OPENstorage D280"}, -- {"SUN", "CSM200_R"}, -- {"SUN", "LCSM100_I"}, -- {"SUN", "LCSM100_S"}, -- {"SUN", "LCSM100_E"}, -- {"SUN", "LCSM100_F"}, -- {"DELL", "MD3000"}, -- {"DELL", "MD3000i"}, -- {"DELL", "MD32xx"}, -- {"DELL", "MD32xxi"}, -- {"DELL", "MD36xxi"}, -- {"LSI", "INF-01-00"}, -- {"ENGENIO", "INF-01-00"}, -- {"STK", "FLEXLINE 380"}, -- {"SUN", "CSM100_R_FC"}, -- {"SUN", "STK6580_6780"}, -- {"SUN", "SUN_6180"}, -- {NULL, NULL}, -+ {"IBM", "1722", 0}, -+ {"IBM", "1724", 0}, -+ {"IBM", "1726", 0}, -+ {"IBM", "1742", 0}, -+ {"IBM", "1745", 0}, -+ {"IBM", "1746", 0}, -+ {"IBM", "1814", 0}, -+ {"IBM", "1815", 0}, -+ {"IBM", "1818", 0}, -+ {"IBM", "3526", 0}, -+ {"SGI", "TP9400", 0}, -+ {"SGI", "TP9500", 0}, -+ {"SGI", "IS", 0}, -+ {"STK", "OPENstorage D280", 0}, -+ {"SUN", "CSM200_R", 0}, -+ {"SUN", "LCSM100_I", 0}, -+ {"SUN", "LCSM100_S", 0}, -+ {"SUN", "LCSM100_E", 0}, -+ {"SUN", "LCSM100_F", 0}, -+ {"DELL", "MD3000", 0}, -+ {"DELL", "MD3000i", 0}, -+ {"DELL", "MD32xx", 0}, -+ {"DELL", "MD32xxi", 0}, -+ {"DELL", "MD36xxi", 0}, -+ {"LSI", "INF-01-00", 0}, -+ {"ENGENIO", "INF-01-00", 0}, -+ {"STK", "FLEXLINE 380", 0}, -+ {"SUN", "CSM100_R_FC", 0}, -+ {"SUN", "STK6580_6780", 0}, -+ {"SUN", "SUN_6180", 0}, -+ {NULL, NULL, 0}, - }; - - static int rdac_bus_attach(struct scsi_device *sdev); ---- a/drivers/scsi/scsi_scan.c -+++ b/drivers/scsi/scsi_scan.c -@@ -838,6 +838,7 @@ static int scsi_add_lun(struct scsi_devi - sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; - sdev->lockable = sdev->removable; - sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); -+ sdev->tgps = (inq_result[5] >> 4) & 3; - - if (sdev->scsi_level >= SCSI_3 || - (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) ---- a/drivers/scsi/scsi_sysfs.c -+++ b/drivers/scsi/scsi_sysfs.c -@@ -501,6 +501,7 @@ sdev_rd_attr (scsi_level, "%d\n"); - sdev_rd_attr (vendor, "%.8s\n"); - sdev_rd_attr (model, "%.16s\n"); - sdev_rd_attr (rev, "%.4s\n"); -+sdev_rd_attr (tgps, "%d\n"); - - /* - * TODO: can we make these symlinks to the block layer ones? -@@ -686,6 +687,7 @@ static struct attribute *scsi_sdev_attrs - &dev_attr_vendor.attr, - &dev_attr_model.attr, - &dev_attr_rev.attr, -+ &dev_attr_tgps.attr, - &dev_attr_rescan.attr, - &dev_attr_delete.attr, - &dev_attr_state.attr, ---- a/include/scsi/scsi_device.h -+++ b/include/scsi/scsi_device.h -@@ -99,7 +99,8 @@ struct scsi_device { - void *hostdata; /* available to low-level driver */ - char type; - char scsi_level; -- char inq_periph_qual; /* PQ from INQUIRY data */ -+ char inq_periph_qual; /* PQ from INQUIRY data */ -+ char tgps; /* Target port group support */ - unsigned char inquiry_len; /* valid bytes in 'inquiry' */ - unsigned char * inquiry; /* INQUIRY response data */ - const char * vendor; /* [back_compat] point into 'inquiry' ... */ -@@ -176,6 +177,7 @@ struct scsi_device { - struct scsi_dh_devlist { - char *vendor; - char *model; -+ char tgps; - }; - - typedef void (*activate_complete)(void *, int); diff --git a/patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file b/patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file deleted file mode 100644 index a9033f3..0000000 --- a/patches.fixes/scsi-aic94xx-world-writable-sysfs-update_bios-file +++ /dev/null @@ -1,26 +0,0 @@ -From: Vasiliy Kulikov -Subject: scsi: aic94xx: world-writable sysfs update_bios file -Introduced-by: v2.6.25-rc1 -Patch-mainline: Submitted 4 Feb 2011 - -Don't allow everybody to load firmware. - -Signed-off-by: Vasiliy Kulikov -Acked-by: Jeff Mahoney ---- - Compile tested only. - drivers/scsi/aic94xx/aic94xx_init.c | 2 +- - 1 files changed, 1 insertions(+), 1 deletions(-) -diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c -index 3b7e83d..d5ff142 100644 ---- a/drivers/scsi/aic94xx/aic94xx_init.c -+++ b/drivers/scsi/aic94xx/aic94xx_init.c -@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev, - flash_error_table[i].reason); - } - --static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO, -+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR, - asd_show_update_bios, asd_store_update_bios); - - static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) diff --git a/patches.fixes/scsi-check-host-lookup-failure b/patches.fixes/scsi-check-host-lookup-failure deleted file mode 100644 index f4da1cd..0000000 --- a/patches.fixes/scsi-check-host-lookup-failure +++ /dev/null @@ -1,29 +0,0 @@ -From: Laurie Barry -Subject: Correct scsi_host_lookup return value -References: bnc#456532 -Patch-mainline: not yet - -In the scsi_generic_msg_handler routine it make a call to scsi_host_lookup and -checks the return value for NULL, but the scsi_host_lookup routine can return -an error when it fails instead of NULL. So when the scsi_host_lookup fails the -scsi_generic_msg_handler crashes the kernel with "BUG: unable to handle kernel -NULL pointer dereference at 00000000000000aa" - -Signed-off-by: Laurie Barry -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/scsi_netlink.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/scsi/scsi_netlink.c -+++ b/drivers/scsi/scsi_netlink.c -@@ -259,7 +259,7 @@ scsi_generic_msg_handler(struct sk_buff - - /* if successful, scsi_host_lookup takes a shost reference */ - shost = scsi_host_lookup(msg->host_no); -- if (!shost) { -+ if (IS_ERR(shost)) { - err = -ENODEV; - goto driver_exit; - } diff --git a/patches.fixes/scsi-dh-alua-retry-UA b/patches.fixes/scsi-dh-alua-retry-UA deleted file mode 100644 index 4d18eb4..0000000 --- a/patches.fixes/scsi-dh-alua-retry-UA +++ /dev/null @@ -1,53 +0,0 @@ -From: Hannes Reinecke -Subject: Retry ALUA device handler initialization on Unit Attention -Patch-mainline: not yet - -Whenever we receive a UNIT ATTENTION sense code we should just retry -the command. No point in checking the various sense codes here. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/device_handler/scsi_dh_alua.c | 31 +++-------------------------- - 1 file changed, 4 insertions(+), 27 deletions(-) - ---- a/drivers/scsi/device_handler/scsi_dh_alua.c -+++ b/drivers/scsi/device_handler/scsi_dh_alua.c -@@ -496,33 +496,10 @@ static int alua_check_sense(struct scsi_ - return SUCCESS; - break; - case UNIT_ATTENTION: -- if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) -- /* -- * Power On, Reset, or Bus Device Reset, just retry. -- */ -- return ADD_TO_MLQUEUE; -- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { -- /* -- * ALUA state changed -- */ -- return ADD_TO_MLQUEUE; -- } -- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { -- /* -- * Implicit ALUA state transition failed -- */ -- return ADD_TO_MLQUEUE; -- } -- if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) { -- /* -- * REPORTED_LUNS_DATA_HAS_CHANGED is reported -- * when switching controllers on targets like -- * Intel Multi-Flex. We can just retry. -- */ -- return ADD_TO_MLQUEUE; -- } -- -- break; -+ /* -+ * Just retry for UNIT_ATTENTION -+ */ -+ return ADD_TO_MLQUEUE; - } - - return SCSI_RETURN_NOT_HANDLED; diff --git a/patches.fixes/scsi-dh-alua-send-stpg b/patches.fixes/scsi-dh-alua-send-stpg deleted file mode 100644 index 6b06d08..0000000 --- a/patches.fixes/scsi-dh-alua-send-stpg +++ /dev/null @@ -1,33 +0,0 @@ -From: Hannes Reinecke -Subject: Always send STPG for explicit tgps mode -Patch-mainline: not yet - -When we are in explicit tgps mode we should always send an STPG -command to enable the active/optimized mode. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/device_handler/scsi_dh_alua.c | 11 +++++------ - 1 file changed, 5 insertions(+), 6 deletions(-) - ---- a/drivers/scsi/device_handler/scsi_dh_alua.c -+++ b/drivers/scsi/device_handler/scsi_dh_alua.c -@@ -602,13 +602,11 @@ static int alua_activate(struct scsi_dev - struct alua_dh_data *h = get_alua_data(sdev); - int err = SCSI_DH_OK; - -- if (h->group_id != -1) { -- err = alua_rtpg(sdev, h); -- if (err != SCSI_DH_OK) -- goto out; -- } -+ err = alua_rtpg(sdev, h); -+ if (err != SCSI_DH_OK) -+ goto out; - -- if (h->tpgs & TPGS_MODE_EXPLICIT && -+ if ((h->tpgs & TPGS_MODE_EXPLICIT) && - h->state != TPGS_STATE_OPTIMIZED && - h->state != TPGS_STATE_LBA_DEPENDENT) { - h->callback_fn = fn; diff --git a/patches.fixes/scsi-dh-queuedata-accessors b/patches.fixes/scsi-dh-queuedata-accessors deleted file mode 100644 index 86769a1..0000000 --- a/patches.fixes/scsi-dh-queuedata-accessors +++ /dev/null @@ -1,98 +0,0 @@ -From: Hannes Reinecke -Subject: Kernel bug triggered in multipath -References: bnc#486001 -Patch-mainline: not yet - -Starting multipath on a cciss device will cause a kernel -warning to be triggered. Problem is that we're using the -->queuedata field of the request_queue to derefence the -scsi device; however, for other (non-SCSI) devices this -points to a totally different structure. -So we should rather be using accessors here which make -sure we're only returning valid SCSI device structures. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/device_handler/scsi_dh.c | 10 +++++----- - drivers/scsi/scsi_lib.c | 11 +++++++++++ - include/scsi/scsi_device.h | 1 + - 3 files changed, 17 insertions(+), 5 deletions(-) - ---- a/drivers/scsi/device_handler/scsi_dh.c -+++ b/drivers/scsi/device_handler/scsi_dh.c -@@ -439,7 +439,7 @@ int scsi_dh_activate(struct request_queu - struct scsi_device_handler *scsi_dh = NULL; - - spin_lock_irqsave(q->queue_lock, flags); -- sdev = q->queuedata; -+ sdev = scsi_device_from_queue(q); - if (sdev && sdev->scsi_dh_data) - scsi_dh = sdev->scsi_dh_data->scsi_dh; - if (!scsi_dh || !get_device(&sdev->sdev_gendev) || -@@ -501,7 +501,7 @@ int scsi_dh_handler_exist(const char *na - EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); - - /* -- * scsi_dh_handler_attach - Attach device handler -+ * scsi_dh_attach - Attach device handler - * @sdev - sdev the handler should be attached to - * @name - name of the handler to attach - */ -@@ -517,7 +517,7 @@ int scsi_dh_attach(struct request_queue - return -EINVAL; - - spin_lock_irqsave(q->queue_lock, flags); -- sdev = q->queuedata; -+ sdev = scsi_device_from_queue(q); - if (!sdev || !get_device(&sdev->sdev_gendev)) - err = -ENODEV; - spin_unlock_irqrestore(q->queue_lock, flags); -@@ -531,7 +531,7 @@ int scsi_dh_attach(struct request_queue - EXPORT_SYMBOL_GPL(scsi_dh_attach); - - /* -- * scsi_dh_handler_detach - Detach device handler -+ * scsi_dh_detach - Detach device handler - * @sdev - sdev the handler should be detached from - * - * This function will detach the device handler only -@@ -545,7 +545,7 @@ void scsi_dh_detach(struct request_queue - struct scsi_device_handler *scsi_dh = NULL; - - spin_lock_irqsave(q->queue_lock, flags); -- sdev = q->queuedata; -+ sdev = scsi_device_from_queue(q); - if (!sdev || !get_device(&sdev->sdev_gendev)) - sdev = NULL; - spin_unlock_irqrestore(q->queue_lock, flags); ---- a/drivers/scsi/scsi_lib.c -+++ b/drivers/scsi/scsi_lib.c -@@ -1587,6 +1587,17 @@ static void scsi_request_fn(struct reque - spin_lock_irq(q->queue_lock); - } - -+struct scsi_device *scsi_device_from_queue(struct request_queue *q) -+{ -+ struct scsi_device *sdev = NULL; -+ -+ if (q->request_fn == scsi_request_fn) -+ sdev = q->queuedata; -+ -+ return sdev; -+} -+EXPORT_SYMBOL_GPL(scsi_device_from_queue); -+ - u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) - { - struct device *host_dev; ---- a/include/scsi/scsi_device.h -+++ b/include/scsi/scsi_device.h -@@ -297,6 +297,7 @@ extern void starget_for_each_device(stru - extern void __starget_for_each_device(struct scsi_target *, void *, - void (*fn)(struct scsi_device *, - void *)); -+extern struct scsi_device *scsi_device_from_queue(struct request_queue *); - - /* only exposed to implement shost_for_each_device */ - extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, diff --git a/patches.fixes/scsi-dh-rdac-add-stk b/patches.fixes/scsi-dh-rdac-add-stk deleted file mode 100644 index 59b7685..0000000 --- a/patches.fixes/scsi-dh-rdac-add-stk +++ /dev/null @@ -1,25 +0,0 @@ -From: Goldwyn Rodrigues -Subject: STK arrays missing from rdac devicehandler -References: bnc#503855 -Patch-mainline: not yet - -Some STK arrays are missing from the RDAC device handler, -causing multipath to malfunction. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/device_handler/scsi_dh_rdac.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/scsi/device_handler/scsi_dh_rdac.c -+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c -@@ -759,6 +759,8 @@ static const struct scsi_dh_devlist rdac - {"SGI", "TP9500", 0}, - {"SGI", "IS", 0}, - {"STK", "OPENstorage D280", 0}, -+ {"STK", "FLEXLINE 380", 0}, -+ {"SUN", "STK6580_6780", 0}, - {"SUN", "CSM200_R", 0}, - {"SUN", "LCSM100_I", 0}, - {"SUN", "LCSM100_S", 0}, diff --git a/patches.fixes/scsi-ibmvscsi-module_alias.patch b/patches.fixes/scsi-ibmvscsi-module_alias.patch deleted file mode 100644 index 741c391..0000000 --- a/patches.fixes/scsi-ibmvscsi-module_alias.patch +++ /dev/null @@ -1,39 +0,0 @@ -Subject: map scsi proc_name to module name -From: olh@suse.de -References: 459933 - LTC50724 -Patch-mainline: not yet - ---- - drivers/scsi/ibmvscsi/ibmvscsi.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - ---- a/drivers/scsi/ibmvscsi/ibmvscsi.c -+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c -@@ -108,6 +108,9 @@ static struct scsi_transport_template *i - - static struct ibmvscsi_ops *ibmvscsi_ops; - -+#define IBMVSCSI_PROC_NAME "ibmvscsi" -+/* The driver is named ibmvscsic, map ibmvscsi to module name */ -+MODULE_ALIAS(IBMVSCSI_PROC_NAME); - MODULE_DESCRIPTION("IBM Virtual SCSI"); - MODULE_AUTHOR("Dave Boutcher"); - MODULE_LICENSE("GPL"); -@@ -1802,7 +1805,7 @@ static struct device_attribute *ibmvscsi - static struct scsi_host_template driver_template = { - .module = THIS_MODULE, - .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, -- .proc_name = "ibmvscsi", -+ .proc_name = IBMVSCSI_PROC_NAME, - .queuecommand = ibmvscsi_queuecommand, - .eh_abort_handler = ibmvscsi_eh_abort_handler, - .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, -@@ -2069,7 +2072,7 @@ static struct vio_driver ibmvscsi_driver - .remove = ibmvscsi_remove, - .get_desired_dma = ibmvscsi_get_desired_dma, - .driver = { -- .name = "ibmvscsi", -+ .name = IBMVSCSI_PROC_NAME, - .owner = THIS_MODULE, - .pm = &ibmvscsi_pm_ops, - } diff --git a/patches.fixes/scsi-ibmvscsi-show-config.patch b/patches.fixes/scsi-ibmvscsi-show-config.patch deleted file mode 100644 index ecf8574..0000000 --- a/patches.fixes/scsi-ibmvscsi-show-config.patch +++ /dev/null @@ -1,80 +0,0 @@ -Subject: /sys/class/scsi_host/hostX/config doesn't show any information -From: Linda Xie -References: 439970 - LTC49349 -Patch-mainline: not yet - -This patch changes the size of the buffer used for transfering config -data to 4K. It was tested against 2.6.19-rc2 tree. - -Signed-off-by: lxie@us.ibm.com -Signed-off-by: Olaf Hering - ---- - drivers/scsi/ibmvscsi/ibmvscsi.c | 14 ++++++++------ - 1 file changed, 8 insertions(+), 6 deletions(-) - ---- a/drivers/scsi/ibmvscsi/ibmvscsi.c -+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c -@@ -99,6 +99,8 @@ static int max_requests = IBMVSCSI_MAX_R - static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; - static int fast_fail = 1; - static int client_reserve = 1; -+/*host data buffer size*/ -+#define buff_size 4096 - - static struct scsi_transport_template *ibmvscsi_transport_template; - -@@ -1667,7 +1669,7 @@ static ssize_t show_host_srp_version(str - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - int len; - -- len = snprintf(buf, PAGE_SIZE, "%s\n", -+ len = snprintf(buf, buff_size, "%s\n", - hostdata->madapter_info.srp_version); - return len; - } -@@ -1688,7 +1690,7 @@ static ssize_t show_host_partition_name( - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - int len; - -- len = snprintf(buf, PAGE_SIZE, "%s\n", -+ len = snprintf(buf, buff_size, "%s\n", - hostdata->madapter_info.partition_name); - return len; - } -@@ -1709,7 +1711,7 @@ static ssize_t show_host_partition_numbe - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - int len; - -- len = snprintf(buf, PAGE_SIZE, "%d\n", -+ len = snprintf(buf, buff_size, "%d\n", - hostdata->madapter_info.partition_number); - return len; - } -@@ -1729,7 +1731,7 @@ static ssize_t show_host_mad_version(str - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - int len; - -- len = snprintf(buf, PAGE_SIZE, "%d\n", -+ len = snprintf(buf, buff_size, "%d\n", - hostdata->madapter_info.mad_version); - return len; - } -@@ -1749,7 +1751,7 @@ static ssize_t show_host_os_type(struct - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - int len; - -- len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type); -+ len = snprintf(buf, buff_size, "%d\n", hostdata->madapter_info.os_type); - return len; - } - -@@ -1768,7 +1770,7 @@ static ssize_t show_host_config(struct d - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - - /* returns null-terminated host config data */ -- if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0) -+ if (ibmvscsi_do_host_config(hostdata, buf, buff_size) == 0) - return strlen(buf); - else - return 0; diff --git a/patches.fixes/scsi-inquiry-too-short-ratelimit b/patches.fixes/scsi-inquiry-too-short-ratelimit deleted file mode 100644 index 4ca497a..0000000 --- a/patches.fixes/scsi-inquiry-too-short-ratelimit +++ /dev/null @@ -1,26 +0,0 @@ -From: Hannes Reinecke -Subject: INQUIRY result too short (5) message flood -References: bnc#432535 -Patch-mainline: not yet - -During installation with vioserver/vioclient lots of -scsi scan: INQUIRY result too short (5), using 36 -messages are printed. These really should be ratelimited. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/scsi_scan.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/scsi/scsi_scan.c -+++ b/drivers/scsi/scsi_scan.c -@@ -696,7 +696,7 @@ static int scsi_probe_lun(struct scsi_de - * and displaying garbage for the Vendor, Product, or Revision - * strings. - */ -- if (sdev->inquiry_len < 36) { -+ if (sdev->inquiry_len < 36 && printk_ratelimit()) { - printk(KERN_INFO "scsi scan: INQUIRY result too short (%d)," - " using 36\n", sdev->inquiry_len); - sdev->inquiry_len = 36; diff --git a/patches.fixes/scsi-retry-alua-transition-in-progress b/patches.fixes/scsi-retry-alua-transition-in-progress deleted file mode 100644 index f2c7a2f..0000000 --- a/patches.fixes/scsi-retry-alua-transition-in-progress +++ /dev/null @@ -1,33 +0,0 @@ -From: Rajashekhar M A -Subject: I/O errors for ALUA state transitions -References: bnc#491289 -Patch-mainline: not yet - -When a SLES11 host is configured with a few LUNs and IO is running, -injecting FC faults repeatedly leads to path recovery problems. -The LUNs have 4 paths each and 3 of them come back active after -say an FC fault which makes two of the paths go down, instead of -all 4. This happens after several iterations of continuous FC faults. - -Reason here is that we're returning an I/O error whenever we're -encountering sense code 06/04/0a (LOGICAL UNIT NOT ACCESSIBLE, -ASYMMETRIC ACCESS STATE TRANSITION) instead of retrying. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/scsi_error.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/scsi/scsi_error.c -+++ b/drivers/scsi/scsi_error.c -@@ -376,7 +376,8 @@ static int scsi_check_sense(struct scsi_ - * if the device is in the process of becoming ready, we - * should retry. - */ -- if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01)) -+ if ((sshdr.asc == 0x04) && -+ (sshdr.ascq == 0x01 || sshdr.ascq == 0x0a)) - return NEEDS_RETRY; - /* - * if the device is not started, we need to wake diff --git a/patches.fixes/scsi-scan-blist-update b/patches.fixes/scsi-scan-blist-update deleted file mode 100644 index ef81c38..0000000 --- a/patches.fixes/scsi-scan-blist-update +++ /dev/null @@ -1,26 +0,0 @@ -From: Kurt Garloff -Subject: Add BLIST_REPORTLUN2 to EMC SYMMETRIX -Patch-mainline: not yet -References: bnc#185164, bnc#191648, bnc#505578 - -All EMC SYMMETRIX support REPORT_LUNS, even if configured to report -SCSI-2 for whatever reason. - -Signed-off-by: Kurt Garloff -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/scsi_devinfo.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/scsi/scsi_devinfo.c -+++ b/drivers/scsi/scsi_devinfo.c -@@ -160,7 +160,7 @@ static struct { - {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ - {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ - {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, -- {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, -+ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2}, - {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, - {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, - {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, diff --git a/patches.fixes/sd_liberal_28_sense_invalid.diff b/patches.fixes/sd_liberal_28_sense_invalid.diff deleted file mode 100644 index 17dfba4..0000000 --- a/patches.fixes/sd_liberal_28_sense_invalid.diff +++ /dev/null @@ -1,28 +0,0 @@ -From: Oliver Neukum -Subject: fix medium presence misdetection in usb storage device -References: bnc#362850 -Patch-mainline: not yet - -From reading the SCSI spec it seems that having the valid bit 0 (0x70 -checked in scsi_sense_valid) should does not invalidate the ASC or ASQ. -[See page 37 of spc4r02.pdf]. It should only invalidate the INFORMATION -field. Therefore remove the sense_valid check from the USB quirk. - -Signed-off-by: Brandon Philips - ---- - drivers/scsi/sd.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/scsi/sd.c -+++ b/drivers/scsi/sd.c -@@ -1390,8 +1390,7 @@ sd_spinup_disk(struct scsi_disk *sdkp) - * Yes, this sense key/ASC combination shouldn't - * occur here. It's characteristic of these devices. - */ -- } else if (sense_valid && -- sshdr.sense_key == UNIT_ATTENTION && -+ } else if (sshdr.sense_key == UNIT_ATTENTION && - sshdr.asc == 0x28) { - if (!spintime) { - spintime_expire = jiffies + 5 * HZ; diff --git a/patches.fixes/seccomp-disable-tsc-option b/patches.fixes/seccomp-disable-tsc-option deleted file mode 100644 index 3e4e6e1..0000000 --- a/patches.fixes/seccomp-disable-tsc-option +++ /dev/null @@ -1,56 +0,0 @@ -From: Andrea Arcangeli -Subject: [PATCH seccomp: make tsc disabling optional -Patch-mainline: unknown -References: 191123 - -Make the TSC disable purely paranoid feature optional, so by default seccomp -returns absolutely zerocost. - -Ported from 2.6.19 to 2.6.24-rc7 by Jeff Mahoney. -Addition of x86-64 by Jan Beulich. - -Signed-off-by: Andrea Arcangeli -Acked-by: Jeff Mahoney ---- - arch/x86/Kconfig | 11 +++++++++++ - arch/x86/kernel/process.c | 2 ++ - 2 files changed, 13 insertions(+) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -1482,6 +1482,17 @@ config SECCOMP - - If unsure, say Y. Only embedded should say N here. - -+config SECCOMP_DISABLE_TSC -+ bool "Disable the TSC for seccomp tasks" -+ depends on SECCOMP -+ help -+ This feature mathematically prevents covert channels -+ for tasks running under SECCOMP. This can generate -+ a minuscule overhead in the scheduler. -+ -+ If you care most about performance say N. Say Y only if you're -+ paranoid about covert channels. -+ - config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - ---help--- ---- a/arch/x86/kernel/process.c -+++ b/arch/x86/kernel/process.c -@@ -135,6 +135,7 @@ static void hard_disable_TSC(void) - - void disable_TSC(void) - { -+#ifdef CONFIG_SECCOMP_DISABLE_TSC - preempt_disable(); - if (!test_and_set_thread_flag(TIF_NOTSC)) - /* -@@ -143,6 +144,7 @@ void disable_TSC(void) - */ - hard_disable_TSC(); - preempt_enable(); -+#endif - } - - static void hard_enable_TSC(void) diff --git a/patches.fixes/tg3-fix-default-wol.patch b/patches.fixes/tg3-fix-default-wol.patch deleted file mode 100644 index b706c0c..0000000 --- a/patches.fixes/tg3-fix-default-wol.patch +++ /dev/null @@ -1,43 +0,0 @@ -From: Rafael J. Wysocki -Subject: net (tg3): Fix failure to enable WoL by default when possible -References: bnc#447371 -Patch-mainline: not yet - -tg3 is supposed to enable WoL by default on adapters which support -that, but it fails to do so unless the adapter's -/sys/devices/.../power/wakeup file contains 'enabled' during the -initialization of the adapter. Fix that by making tg3 update the -device's 'should_wakeup' bit automatically whenever WoL should be -enabled by default. - -Signed-off-by: Rafael J. Wysocki ---- - drivers/net/tg3.c | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - ---- a/drivers/net/tg3.c -+++ b/drivers/net/tg3.c -@@ -12048,8 +12048,10 @@ static void __devinit tg3_get_eeprom_hw_ - if (val & VCPU_CFGSHDW_ASPM_DBNC) - tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; - if ((val & VCPU_CFGSHDW_WOL_ENABLE) && -- (val & VCPU_CFGSHDW_WOL_MAGPKT)) -+ (val & VCPU_CFGSHDW_WOL_MAGPKT)) { - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; -+ device_set_wakeup_enable(&tp->pdev->dev, true); -+ } - goto done; - } - -@@ -12182,8 +12184,10 @@ static void __devinit tg3_get_eeprom_hw_ - tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; - - if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && -- (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) -+ (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; -+ device_set_wakeup_enable(&tp->pdev->dev, true); -+ } - - if (cfg2 & (1 << 17)) - tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; diff --git a/patches.fixes/tulip-quad-NIC-ifdown b/patches.fixes/tulip-quad-NIC-ifdown deleted file mode 100644 index a310bee..0000000 --- a/patches.fixes/tulip-quad-NIC-ifdown +++ /dev/null @@ -1,27 +0,0 @@ -Subject: MCA when shutting down tulip quad-NIC -From: andrew.patterson@hp.com -References: SUSE39204 -Patch-mainline: not yet - -Shutting down the network causes an MCA because of an IO TLB error when -a DEC quad 10/100 card is in any slot. This problem was originally seen -on an HP rx4640. - -Acked-by: Olaf Kirch - - drivers/net/tulip/tulip_core.c | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/drivers/net/tulip/tulip_core.c -+++ b/drivers/net/tulip/tulip_core.c -@@ -1943,6 +1943,10 @@ static void __devexit tulip_remove_one ( - return; - - tp = netdev_priv(dev); -+ -+ /* shoot NIC in the head before deallocating descriptors */ -+ pci_disable_device(tp->pdev); -+ - unregister_netdev(dev); - pci_free_consistent (pdev, - sizeof (struct tulip_rx_desc) * RX_RING_SIZE + diff --git a/patches.fixes/ubifs-restrict-world-writable-debugfs-files b/patches.fixes/ubifs-restrict-world-writable-debugfs-files deleted file mode 100644 index e9c5ba1..0000000 --- a/patches.fixes/ubifs-restrict-world-writable-debugfs-files +++ /dev/null @@ -1,45 +0,0 @@ -From: Vasiliy Kulikov -Date: Fri, 4 Feb 2011 15:24:19 +0300 -Subject: UBIFS: restrict world-writable debugfs files -Patch-mainline: v2.6.39-rc1 -Git-commit: 8c559d30b4e59cf6994215ada1fe744928f494bf -Introduced-by: v2.6.29-rc1 -References: bnc#673934 - -Don't allow everybody to dump sensitive information about filesystems. - -Signed-off-by: Vasiliy Kulikov -Signed-off-by: Artem Bityutskiy -Acked-by: Jeff Mahoney ---- - fs/ubifs/debug.c | 6 +++--- - 1 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c -index 0bee4db..bcb1acb 100644 ---- a/fs/ubifs/debug.c -+++ b/fs/ubifs/debug.c -@@ -2813,19 +2813,19 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) - } - - fname = "dump_lprops"; -- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); -+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); - if (IS_ERR(dent)) - goto out_remove; - d->dfs_dump_lprops = dent; - - fname = "dump_budg"; -- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); -+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); - if (IS_ERR(dent)) - goto out_remove; - d->dfs_dump_budg = dent; - - fname = "dump_tnc"; -- dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); -+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); - if (IS_ERR(dent)) - goto out_remove; - d->dfs_dump_tnc = dent; - diff --git a/patches.fixes/xen-blkfront-connect-overflow.patch b/patches.fixes/xen-blkfront-connect-overflow.patch deleted file mode 100644 index 4ba1b2c..0000000 --- a/patches.fixes/xen-blkfront-connect-overflow.patch +++ /dev/null @@ -1,14 +0,0 @@ -barrier variable is int, not long. This overflow caused another variable -override: err (in PV code) and binfo (in xenlinux code). - ---- linux-2.6.38.3/drivers/xen/blkfront/blkfront.c.orig 2011-05-02 01:56:29.572723580 +0200 -+++ linux-2.6.38.3/drivers/xen/blkfront/blkfront.c 2011-05-02 12:44:20.512128661 +0200 -@@ -364,7 +364,7 @@ - } - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, -- "feature-barrier", "%lu", &barrier, -+ "feature-barrier", "%u", &barrier, - NULL); - /* - * If there's no "feature-barrier" defined, then it means diff --git a/patches.fixes/xen-disable-cdrom-dbgprints.diff b/patches.fixes/xen-disable-cdrom-dbgprints.diff deleted file mode 100644 index 14c87e4..0000000 --- a/patches.fixes/xen-disable-cdrom-dbgprints.diff +++ /dev/null @@ -1,18 +0,0 @@ ---- linux-2.6.38.3/drivers/xen/blkback/cdrom.c.orig 2011-07-24 18:44:14.795354823 +0200 -+++ linux-2.6.38.3/drivers/xen/blkback/cdrom.c 2011-07-24 18:45:46.278542130 +0200 -@@ -35,9 +35,15 @@ - #include "common.h" - - #undef DPRINTK -+ -+#undef DEBUG -+#ifdef DEBUG - #define DPRINTK(_f, _a...) \ - printk(KERN_DEBUG "(%s() file=%s, line=%d) " _f "\n", \ - __func__, __FILE__ , __LINE__ , ##_a ) -+#else -+#define DPRINTK(_f, _a...) -+#endif - - - #define MEDIA_PRESENT "media-present" diff --git a/patches.fixes/xencons_close_deadlock.patch b/patches.fixes/xencons_close_deadlock.patch deleted file mode 100644 index c62faee..0000000 --- a/patches.fixes/xencons_close_deadlock.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- linux-2.6.38.3/drivers/xen/console/console.c.orig 2011-07-21 13:20:34.120001602 +0200 -+++ linux-2.6.38.3/drivers/xen/console/console.c 2011-07-21 13:36:14.848001956 +0200 -@@ -618,7 +618,12 @@ static void xencons_close(struct tty_str - if (DUMMY_TTY(tty)) - return; - -+ /* We are called under tty_lock(). As other functions in tty core -+ obtain locks in tty_mutex,tty_lock order, we must do the same to -+ avoid a deadlock. Thus we must first tty_unlock(), and then lock. */ -+ tty_unlock(); - mutex_lock(&tty_mutex); -+ tty_lock(); - - if (tty->count != 1) { - mutex_unlock(&tty_mutex); diff --git a/patches.kernel.org/patch-2.6.38.1 b/patches.kernel.org/patch-2.6.38.1 deleted file mode 100644 index 8f8bae3..0000000 --- a/patches.kernel.org/patch-2.6.38.1 +++ /dev/null @@ -1,2788 +0,0 @@ -From: Jiri Slaby -Subject: Linux 2.6.38.1 -Patch-mainline: Linux 2.6.38.1 -References: bnc#558740 - -Signed-off-by: Jiri Slaby ---- -diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices -index 87da405..9edb75d 100644 ---- a/Documentation/i2c/instantiating-devices -+++ b/Documentation/i2c/instantiating-devices -@@ -100,7 +100,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) - (...) - i2c_adap = i2c_get_adapter(2); - memset(&i2c_info, 0, sizeof(struct i2c_board_info)); -- strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); -+ strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); - isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, - normal_i2c, NULL); - i2c_put_adapter(i2c_adap); -diff --git a/Makefile b/Makefile -index d6592b6..167ef45 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - VERSION = 2 - PATCHLEVEL = 6 - SUBLEVEL = 38 --EXTRAVERSION = -+EXTRAVERSION = .1 - NAME = Flesh-Eating Bats with Fangs - - # *DOCUMENTATION* -diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c -index 0ca90b8..556bbd4 100644 ---- a/arch/arm/mach-davinci/board-dm644x-evm.c -+++ b/arch/arm/mach-davinci/board-dm644x-evm.c -@@ -440,11 +440,6 @@ evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c) - gpio_request(gpio + 7, "nCF_SEL"); - gpio_direction_output(gpio + 7, 1); - -- /* irlml6401 switches over 1A, in under 8 msec; -- * now it can be managed by nDRV_VBUS ... -- */ -- davinci_setup_usb(1000, 8); -- - return 0; - } - -@@ -705,6 +700,9 @@ static __init void davinci_evm_init(void) - davinci_serial_init(&uart_config); - dm644x_init_asp(&dm644x_evm_snd_data); - -+ /* irlml6401 switches over 1A, in under 8 msec */ -+ davinci_setup_usb(1000, 8); -+ - soc_info->emac_pdata->phy_id = DM644X_EVM_PHY_ID; - /* Register the fixup for PHY on DaVinci */ - phy_register_fixup_for_uid(LXT971_PHY_ID, LXT971_PHY_MASK, -diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h -index d840f4a..5bb95a1 100644 ---- a/arch/microblaze/include/asm/uaccess.h -+++ b/arch/microblaze/include/asm/uaccess.h -@@ -120,16 +120,16 @@ static inline unsigned long __must_check __clear_user(void __user *to, - { - /* normal memset with two words to __ex_table */ - __asm__ __volatile__ ( \ -- "1: sb r0, %2, r0;" \ -+ "1: sb r0, %1, r0;" \ - " addik %0, %0, -1;" \ - " bneid %0, 1b;" \ -- " addik %2, %2, 1;" \ -+ " addik %1, %1, 1;" \ - "2: " \ - __EX_TABLE_SECTION \ - ".word 1b,2b;" \ - ".previous;" \ -- : "=r"(n) \ -- : "0"(n), "r"(to) -+ : "=r"(n), "=r"(to) \ -+ : "0"(n), "1"(to) - ); - return n; - } -diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c -index d7d94b8..3948f1d 100644 ---- a/arch/parisc/kernel/irq.c -+++ b/arch/parisc/kernel/irq.c -@@ -108,7 +108,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) - int cpu_dest; - - /* timer and ipi have to always be received on all CPUs */ -- if (CHECK_IRQ_PER_CPU(irq)) { -+ if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { - /* Bad linux design decision. The mask has already - * been set; we must reset it */ - cpumask_setall(irq_desc[irq].affinity); -diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h -index 125fc1a..7626fa7 100644 ---- a/arch/powerpc/include/asm/reg.h -+++ b/arch/powerpc/include/asm/reg.h -@@ -880,6 +880,7 @@ - #define PV_970 0x0039 - #define PV_POWER5 0x003A - #define PV_POWER5p 0x003B -+#define PV_POWER7 0x003F - #define PV_970FX 0x003C - #define PV_630 0x0040 - #define PV_630p 0x0041 -diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c -index ab6f6be..97e0ae4 100644 ---- a/arch/powerpc/kernel/perf_event.c -+++ b/arch/powerpc/kernel/perf_event.c -@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) - return ip; - } - -+static bool pmc_overflow(unsigned long val) -+{ -+ if ((int)val < 0) -+ return true; -+ -+ /* -+ * Events on POWER7 can roll back if a speculative event doesn't -+ * eventually complete. Unfortunately in some rare cases they will -+ * raise a performance monitor exception. We need to catch this to -+ * ensure we reset the PMC. In all cases the PMC will be 256 or less -+ * cycles from overflow. -+ * -+ * We only do this if the first pass fails to find any overflowing -+ * PMCs because a user might set a period of less than 256 and we -+ * don't want to mistakenly reset them. -+ */ -+ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) -+ return true; -+ -+ return false; -+} -+ - /* - * Performance monitor interrupt stuff - */ -@@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) - if (is_limited_pmc(i + 1)) - continue; - val = read_pmc(i + 1); -- if ((int)val < 0) -+ if (pmc_overflow(val)) - write_pmc(i + 1, 0); - } - } -diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h -index 94b979d..effff47 100644 ---- a/arch/x86/include/asm/pgtable-3level.h -+++ b/arch/x86/include/asm/pgtable-3level.h -@@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd) - - static inline void pud_clear(pud_t *pudp) - { -- unsigned long pgd; -- - set_pud(pudp, __pud(0)); - - /* -@@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp) - * section 8.1: in PAE mode we explicitly have to flush the - * TLB via cr3 if the top-level pgd is changed... - * -- * Make sure the pud entry we're updating is within the -- * current pgd to avoid unnecessary TLB flushes. -+ * Currently all places where pud_clear() is called either have -+ * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or -+ * pud_clear_bad()), so we don't need TLB flush here. - */ -- pgd = read_cr3(); -- if (__pa(pudp) >= pgd && __pa(pudp) < -- (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) -- write_cr3(pgd); - } - - #ifdef CONFIG_SMP -diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c -index 7038b95..4db3554 100644 ---- a/arch/x86/kernel/alternative.c -+++ b/arch/x86/kernel/alternative.c -@@ -620,7 +620,12 @@ static int __kprobes stop_machine_text_poke(void *data) - flush_icache_range((unsigned long)p->addr, - (unsigned long)p->addr + p->len); - } -- -+ /* -+ * Intel Archiecture Software Developer's Manual section 7.1.3 specifies -+ * that a core serializing instruction such as "cpuid" should be -+ * executed on _each_ core before the new instruction is made visible. -+ */ -+ sync_core(); - return 0; - } - -diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c -index 294f26d..0b5e2b5 100644 ---- a/arch/x86/kernel/e820.c -+++ b/arch/x86/kernel/e820.c -@@ -847,15 +847,21 @@ static int __init parse_memopt(char *p) - if (!p) - return -EINVAL; - --#ifdef CONFIG_X86_32 - if (!strcmp(p, "nopentium")) { -+#ifdef CONFIG_X86_32 - setup_clear_cpu_cap(X86_FEATURE_PSE); - return 0; -- } -+#else -+ printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); -+ return -EINVAL; - #endif -+ } - - userdef = 1; - mem_size = memparse(p, &p); -+ /* don't remove all of memory when handling "mem={invalid}" param */ -+ if (mem_size == 0) -+ return -EINVAL; - e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); - - return 0; -diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c -index 9efbdcc..3755ef4 100644 ---- a/arch/x86/kernel/early-quirks.c -+++ b/arch/x86/kernel/early-quirks.c -@@ -159,7 +159,12 @@ static void __init ati_bugs_contd(int num, int slot, int func) - if (rev >= 0x40) - acpi_fix_pin2_polarity = 1; - -- if (rev > 0x13) -+ /* -+ * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... -+ * SB700: revisions 0x39, 0x3a, ... -+ * SB800: revisions 0x40, 0x41, ... -+ */ -+ if (rev >= 0x39) - return; - - if (acpi_use_timer_override) -diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S -index aed1ffb..bbd5c80 100644 ---- a/arch/x86/kernel/entry_64.S -+++ b/arch/x86/kernel/entry_64.S -@@ -1248,7 +1248,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) - decl PER_CPU_VAR(irq_count) - jmp error_exit - CFI_ENDPROC --END(do_hypervisor_callback) -+END(xen_do_hypervisor_callback) - - /* - * Hypervisor uses this for application faults while it executes. -diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c -index 0113d19..8573b83 100644 ---- a/arch/x86/mm/pgtable.c -+++ b/arch/x86/mm/pgtable.c -@@ -168,8 +168,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) - * section 8.1: in PAE mode we explicitly have to flush the - * TLB via cr3 if the top-level pgd is changed... - */ -- if (mm == current->active_mm) -- write_cr3(read_cr3()); -+ flush_tlb_mm(mm); - } - #else /* !CONFIG_X86_PAE */ - -diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c -index b8d96ce..34e08f6 100644 ---- a/drivers/ata/ahci.c -+++ b/drivers/ata/ahci.c -@@ -260,6 +260,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { - { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ - { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ - { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ -+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ - { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ - - /* JMicron 360/1/3/5/6, match class to avoid IDE function */ -@@ -383,6 +384,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { - .class = PCI_CLASS_STORAGE_SATA_AHCI, - .class_mask = 0xffffff, - .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ -+ { PCI_DEVICE(0x1b4b, 0x9125), -+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ - - /* Promise */ - { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ -diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c -index 17a6378..e16850e 100644 ---- a/drivers/ata/libata-eh.c -+++ b/drivers/ata/libata-eh.c -@@ -1618,7 +1618,7 @@ static void ata_eh_analyze_serror(struct ata_link *link) - * host links. For disabled PMP links, only N bit is - * considered as X bit is left at 1 for link plugging. - */ -- if (link->lpm_policy != ATA_LPM_MAX_POWER) -+ if (link->lpm_policy > ATA_LPM_MAX_POWER) - hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ - else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) - hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; -diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c -index 85da4c4..2eee8e0 100644 ---- a/drivers/gpu/drm/drm_sysfs.c -+++ b/drivers/gpu/drm/drm_sysfs.c -@@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device, - { - struct drm_connector *connector = to_drm_connector(device); - enum drm_connector_status status; -+ int ret; -+ -+ ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex); -+ if (ret) -+ return ret; - - status = connector->funcs->detect(connector, true); -+ mutex_unlock(&connector->dev->mode_config.mutex); -+ - return snprintf(buf, PAGE_SIZE, "%s\n", - drm_get_connector_status_name(status)); - } -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 8a9e08b..2347bc1 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -1377,7 +1377,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) - else - i915_enable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE); -+ -+ /* maintain vblank delivery even in deep C-states */ -+ if (dev_priv->info->gen == 3) -+ I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -+ - return 0; - } - -@@ -1390,6 +1395,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); -+ if (dev_priv->info->gen == 3) -+ I915_WRITE(INSTPM, -+ INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); -+ - if (HAS_PCH_SPLIT(dev)) - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h -index 2abe240..12c547a 100644 ---- a/drivers/gpu/drm/i915/i915_reg.h -+++ b/drivers/gpu/drm/i915/i915_reg.h -@@ -405,9 +405,12 @@ - #define I915_ERROR_INSTRUCTION (1<<0) - #define INSTPM 0x020c0 - #define INSTPM_SELF_EN (1<<12) /* 915GM only */ -+#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts -+ will not assert AGPBUSY# and will only -+ be delivered when out of C3. */ - #define ACTHD 0x020c8 - #define FW_BLC 0x020d8 --#define FW_BLC2 0x020dc -+#define FW_BLC2 0x020dc - #define FW_BLC_SELF 0x020e0 /* 915+ only */ - #define FW_BLC_SELF_EN_MASK (1<<31) - #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c -index a4e5e53..4a5a73b 100644 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c -@@ -61,8 +61,8 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, - args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); - args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); - } else if (a2 > a1) { -- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); -- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); -+ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); -+ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); - } - break; - case RMX_FULL: -diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig -index 2560f01..ae445b1 100644 ---- a/drivers/hid/Kconfig -+++ b/drivers/hid/Kconfig -@@ -68,9 +68,15 @@ config HID_A4TECH - ---help--- - Support for A4 tech X5 and WOP-35 / Trust 450L mice. - --config HID_ACRUX_FF -- tristate "ACRUX force feedback" -+config HID_ACRUX -+ tristate "ACRUX game controller support" - depends on USB_HID -+ ---help--- -+ Say Y here if you want to enable support for ACRUX game controllers. -+ -+config HID_ACRUX_FF -+ tristate "ACRUX force feedback support" -+ depends on HID_ACRUX - select INPUT_FF_MEMLESS - ---help--- - Say Y here if you want to enable force feedback support for ACRUX -@@ -319,10 +325,10 @@ config HID_NTRIG - Support for N-Trig touch screen. - - config HID_ORTEK -- tristate "Ortek WKB-2000 wireless keyboard and mouse trackpad" -+ tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad" - depends on USB_HID - ---help--- -- Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. -+ Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad. - - config HID_PANTHERLORD - tristate "Pantherlord/GreenAsia game controller" -diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile -index 6efc2a0..13e6248 100644 ---- a/drivers/hid/Makefile -+++ b/drivers/hid/Makefile -@@ -27,7 +27,7 @@ endif - - obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o - obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o --obj-$(CONFIG_HID_ACRUX_FF) += hid-axff.o -+obj-$(CONFIG_HID_ACRUX) += hid-axff.o - obj-$(CONFIG_HID_APPLE) += hid-apple.o - obj-$(CONFIG_HID_BELKIN) += hid-belkin.o - obj-$(CONFIG_HID_CANDO) += hid-cando.o -diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c -index 61aa712..b85744f 100644 ---- a/drivers/hid/hid-apple.c -+++ b/drivers/hid/hid-apple.c -@@ -481,6 +481,12 @@ static const struct hid_device_id apple_devices[] = { - .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), - .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), -+ .driver_data = APPLE_HAS_FN }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), -+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), -+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), - .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), -diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c -index e5b961d..b455428 100644 ---- a/drivers/hid/hid-axff.c -+++ b/drivers/hid/hid-axff.c -@@ -33,6 +33,8 @@ - #include - - #include "hid-ids.h" -+ -+#ifdef CONFIG_HID_ACRUX_FF - #include "usbhid/usbhid.h" - - struct axff_device { -@@ -109,6 +111,12 @@ err_free_mem: - kfree(axff); - return error; - } -+#else -+static inline int axff_init(struct hid_device *hid) -+{ -+ return 0; -+} -+#endif - - static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) - { -@@ -139,9 +147,25 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) - error); - } - -+ /* -+ * We need to start polling device right away, otherwise -+ * it will go into a coma. -+ */ -+ error = hid_hw_open(hdev); -+ if (error) { -+ dev_err(&hdev->dev, "hw open failed\n"); -+ return error; -+ } -+ - return 0; - } - -+static void ax_remove(struct hid_device *hdev) -+{ -+ hid_hw_close(hdev); -+ hid_hw_stop(hdev); -+} -+ - static const struct hid_device_id ax_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), }, - { } -@@ -149,9 +173,10 @@ static const struct hid_device_id ax_devices[] = { - MODULE_DEVICE_TABLE(hid, ax_devices); - - static struct hid_driver ax_driver = { -- .name = "acrux", -- .id_table = ax_devices, -- .probe = ax_probe, -+ .name = "acrux", -+ .id_table = ax_devices, -+ .probe = ax_probe, -+ .remove = ax_remove, - }; - - static int __init ax_init(void) -diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c -index d678cf3..9477b2a 100644 ---- a/drivers/hid/hid-core.c -+++ b/drivers/hid/hid-core.c -@@ -1256,9 +1256,7 @@ static const struct hid_device_id hid_have_special_driver[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, - { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, - { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, --#if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE) - { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, --#endif - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, -@@ -1302,6 +1300,9 @@ static const struct hid_device_id hid_have_special_driver[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, -@@ -1400,6 +1401,7 @@ static const struct hid_device_id hid_have_special_driver[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, - { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, - { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, - { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, -@@ -1801,6 +1803,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, - { } -diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h -index 92a0d61..090bf48 100644 ---- a/drivers/hid/hid-ids.h -+++ b/drivers/hid/hid-ids.h -@@ -103,6 +103,9 @@ - #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 - #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 - #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 -+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 -+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 -+#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 - #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 - #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a - #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b -@@ -466,6 +469,7 @@ - #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 - - #define USB_VENDOR_ID_ORTEK 0x05a4 -+#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 - #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 - - #define USB_VENDOR_ID_PANJIT 0x134c -diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c -index 7f552bf..ebcc02a 100644 ---- a/drivers/hid/hid-input.c -+++ b/drivers/hid/hid-input.c -@@ -290,14 +290,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel - goto ignore; - } - -- if (field->report_type == HID_FEATURE_REPORT) { -- if (device->driver->feature_mapping) { -- device->driver->feature_mapping(device, hidinput, field, -- usage); -- } -- goto ignore; -- } -- - if (device->driver->input_mapping) { - int ret = device->driver->input_mapping(device, hidinput, field, - usage, &bit, &max); -@@ -835,6 +827,24 @@ static void hidinput_close(struct input_dev *dev) - hid_hw_close(hid); - } - -+static void report_features(struct hid_device *hid) -+{ -+ struct hid_driver *drv = hid->driver; -+ struct hid_report_enum *rep_enum; -+ struct hid_report *rep; -+ int i, j; -+ -+ if (!drv->feature_mapping) -+ return; -+ -+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; -+ list_for_each_entry(rep, &rep_enum->report_list, list) -+ for (i = 0; i < rep->maxfield; i++) -+ for (j = 0; j < rep->field[i]->maxusage; j++) -+ drv->feature_mapping(hid, rep->field[i], -+ rep->field[i]->usage + j); -+} -+ - /* - * Register the input device; print a message. - * Configure the input layer interface -@@ -863,7 +873,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) - return -1; - } - -- for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) { -+ report_features(hid); -+ -+ for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { - if (k == HID_OUTPUT_REPORT && - hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) - continue; -diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c -index 698e645..318cc40 100644 ---- a/drivers/hid/hid-magicmouse.c -+++ b/drivers/hid/hid-magicmouse.c -@@ -258,7 +258,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda - input_report_abs(input, ABS_MT_TRACKING_ID, id); - input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2); - input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2); -- input_report_abs(input, ABS_MT_ORIENTATION, orientation); -+ input_report_abs(input, ABS_MT_ORIENTATION, -orientation); - input_report_abs(input, ABS_MT_POSITION_X, x); - input_report_abs(input, ABS_MT_POSITION_Y, y); - -@@ -397,7 +397,7 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h - input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); - input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); - input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); -- input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); -+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); - - /* Note: Touch Y position from the device is inverted relative - * to how pointer motion is reported (and relative to how USB -diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c -index 07d3183..2bbc954 100644 ---- a/drivers/hid/hid-multitouch.c -+++ b/drivers/hid/hid-multitouch.c -@@ -122,7 +122,7 @@ struct mt_class mt_classes[] = { - { } - }; - --static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi, -+static void mt_feature_mapping(struct hid_device *hdev, - struct hid_field *field, struct hid_usage *usage) - { - if (usage->hid == HID_DG_INPUTMODE) { -diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c -index e90edfc..ad6faa6 100644 ---- a/drivers/hid/hid-ortek.c -+++ b/drivers/hid/hid-ortek.c -@@ -1,5 +1,5 @@ - /* -- * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). -+ * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad). - * Fixes LogicalMaximum error in USB report description, see - * http://bugzilla.kernel.org/show_bug.cgi?id=14787 - * -@@ -30,6 +30,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, - } - - static const struct hid_device_id ortek_devices[] = { -+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, - { } - }; -diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c -index a610e78..38a41d2 100644 ---- a/drivers/hwmon/sht15.c -+++ b/drivers/hwmon/sht15.c -@@ -333,11 +333,11 @@ static inline int sht15_calc_humid(struct sht15_data *data) - - const int c1 = -4; - const int c2 = 40500; /* x 10 ^ -6 */ -- const int c3 = -2800; /* x10 ^ -9 */ -+ const int c3 = -28; /* x 10 ^ -7 */ - - RHlinear = c1*1000 - + c2 * data->val_humid/1000 -- + (data->val_humid * data->val_humid * c3)/1000000; -+ + (data->val_humid * data->val_humid * c3) / 10000; - return (temp - 25000) * (10000 + 80 * data->val_humid) - / 1000000 + RHlinear; - } -diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c -index 64e0903..1d9616b 100644 ---- a/drivers/infiniband/core/cm.c -+++ b/drivers/infiniband/core/cm.c -@@ -2989,6 +2989,7 @@ static int cm_sidr_req_handler(struct cm_work *work) - goto out; /* No match. */ - } - atomic_inc(&cur_cm_id_priv->refcount); -+ atomic_inc(&cm_id_priv->refcount); - spin_unlock_irq(&cm.lock); - - cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; -diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c -index 6884da2..e450c5a 100644 ---- a/drivers/infiniband/core/cma.c -+++ b/drivers/infiniband/core/cma.c -@@ -1210,6 +1210,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) - cm_id->context = conn_id; - cm_id->cm_handler = cma_ib_handler; - -+ /* -+ * Protect against the user destroying conn_id from another thread -+ * until we're done accessing it. -+ */ -+ atomic_inc(&conn_id->refcount); - ret = conn_id->id.event_handler(&conn_id->id, &event); - if (!ret) { - /* -@@ -1222,8 +1227,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) - ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); - mutex_unlock(&lock); - mutex_unlock(&conn_id->handler_mutex); -+ cma_deref_id(conn_id); - goto out; - } -+ cma_deref_id(conn_id); - - /* Destroy the CM ID by returning a non-zero value. */ - conn_id->cm_id.ib = NULL; -@@ -1425,17 +1432,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, - event.param.conn.private_data_len = iw_event->private_data_len; - event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; - event.param.conn.responder_resources = attr.max_qp_rd_atom; -+ -+ /* -+ * Protect against the user destroying conn_id from another thread -+ * until we're done accessing it. -+ */ -+ atomic_inc(&conn_id->refcount); - ret = conn_id->id.event_handler(&conn_id->id, &event); - if (ret) { - /* User wants to destroy the CM ID */ - conn_id->cm_id.iw = NULL; - cma_exch(conn_id, CMA_DESTROYING); - mutex_unlock(&conn_id->handler_mutex); -+ cma_deref_id(conn_id); - rdma_destroy_id(&conn_id->id); - goto out; - } - - mutex_unlock(&conn_id->handler_mutex); -+ cma_deref_id(conn_id); - - out: - if (dev) -diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c -index ee82851..3185314 100644 ---- a/drivers/input/mouse/bcm5974.c -+++ b/drivers/input/mouse/bcm5974.c -@@ -63,6 +63,10 @@ - #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 - #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 - #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 -+/* Macbook8 (unibody, March 2011) */ -+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 -+#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 -+#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 - - #define BCM5974_DEVICE(prod) { \ - .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ -@@ -96,6 +100,10 @@ static const struct usb_device_id bcm5974_table[] = { - BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), - BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), - BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), -+ /* MacbookPro8 */ -+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), -+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), -+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), - /* Terminating entry */ - {} - }; -@@ -274,6 +282,18 @@ static const struct bcm5974_config bcm5974_config_table[] = { - { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, - { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } - }, -+ { -+ USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI, -+ USB_DEVICE_ID_APPLE_WELLSPRING5_ISO, -+ USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, -+ HAS_INTEGRATED_BUTTON, -+ 0x84, sizeof(struct bt_data), -+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, -+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, -+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, -+ { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, -+ { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } -+ }, - {} - }; - -diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c -index ebc62ad..fbe1ea4 100644 ---- a/drivers/mmc/core/sdio.c -+++ b/drivers/mmc/core/sdio.c -@@ -395,6 +395,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, - if (err) - goto remove; - -+ /* -+ * Update oldcard with the new RCA received from the SDIO -+ * device -- we're doing this so that it's updated in the -+ * "card" struct when oldcard overwrites that later. -+ */ -+ if (oldcard) -+ oldcard->rca = card->rca; -+ - mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); - } - -diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c -index 0dc905b..f7e622c 100644 ---- a/drivers/mmc/host/sdhci-pci.c -+++ b/drivers/mmc/host/sdhci-pci.c -@@ -547,6 +547,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { - }, - - { -+ .vendor = PCI_VENDOR_ID_RICOH, -+ .device = 0xe823, -+ .subvendor = PCI_ANY_ID, -+ .subdevice = PCI_ANY_ID, -+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, -+ }, -+ -+ { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB712_SD, - .subvendor = PCI_ANY_ID, -diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c -index 9f01e50..7c0a7c4 100644 ---- a/drivers/net/wireless/ath/ath9k/hw.c -+++ b/drivers/net/wireless/ath/ath9k/hw.c -@@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah) - if (ah->hw_version.devid == AR5416_AR9100_DEVID) - ah->hw_version.macVersion = AR_SREV_VERSION_9100; - -+ /* -+ * Read back AR_WA into a permanent copy and set bits 14 and 17. -+ * We need to do this to avoid RMW of this register. We cannot -+ * read the reg when chip is asleep. -+ */ -+ ah->WARegVal = REG_READ(ah, AR_WA); -+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE | -+ AR_WA_ASPM_TIMER_BASED_DISABLE); -+ -+ ath9k_hw_read_revisions(ah); -+ - if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { - ath_err(common, "Couldn't reset chip\n"); - return -EIO; -@@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) - - ath9k_hw_init_mode_regs(ah); - -- /* -- * Read back AR_WA into a permanent copy and set bits 14 and 17. -- * We need to do this to avoid RMW of this register. We cannot -- * read the reg when chip is asleep. -- */ -- ah->WARegVal = REG_READ(ah, AR_WA); -- ah->WARegVal |= (AR_WA_D3_L1_DISABLE | -- AR_WA_ASPM_TIMER_BASED_DISABLE); - - if (ah->is_pciexpress) - ath9k_hw_configpcipowersave(ah, 0, 0); -@@ -1082,8 +1085,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) - return false; - } - -- ath9k_hw_read_revisions(ah); -- - return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); - } - -diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c -index b2497b8..3867a2e 100644 ---- a/drivers/net/wireless/ath/ath9k/recv.c -+++ b/drivers/net/wireless/ath/ath9k/recv.c -@@ -439,9 +439,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) - * mode interface or when in monitor mode. AP mode does not need this - * since it receives all in-BSS frames anyway. - */ -- if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && -- (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || -- (sc->sc_ah->is_monitoring)) -+ if (sc->sc_ah->is_monitoring) - rfilt |= ATH9K_RX_FILTER_PROM; - - if (sc->rx.rxfilter & FIF_CONTROL) -diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c -index 6b82cac..2bb5297 100644 ---- a/drivers/net/wireless/rtl818x/rtl8187/dev.c -+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c -@@ -871,23 +871,35 @@ static void rtl8187_work(struct work_struct *work) - /* The RTL8187 returns the retry count through register 0xFFFA. In - * addition, it appears to be a cumulative retry count, not the - * value for the current TX packet. When multiple TX entries are -- * queued, the retry count will be valid for the last one in the queue. -- * The "error" should not matter for purposes of rate setting. */ -+ * waiting in the queue, the retry count will be the total for all. -+ * The "error" may matter for purposes of rate setting, but there is -+ * no other choice with this hardware. -+ */ - struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, - work.work); - struct ieee80211_tx_info *info; - struct ieee80211_hw *dev = priv->dev; - static u16 retry; - u16 tmp; -+ u16 avg_retry; -+ int length; - - mutex_lock(&priv->conf_mutex); - tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA); -+ length = skb_queue_len(&priv->b_tx_status.queue); -+ if (unlikely(!length)) -+ length = 1; -+ if (unlikely(tmp < retry)) -+ tmp = retry; -+ avg_retry = (tmp - retry) / length; - while (skb_queue_len(&priv->b_tx_status.queue) > 0) { - struct sk_buff *old_skb; - - old_skb = skb_dequeue(&priv->b_tx_status.queue); - info = IEEE80211_SKB_CB(old_skb); -- info->status.rates[0].count = tmp - retry + 1; -+ info->status.rates[0].count = avg_retry + 1; -+ if (info->status.rates[0].count > RETRY_COUNT) -+ info->flags &= ~IEEE80211_TX_STAT_ACK; - ieee80211_tx_status_irqsafe(dev, old_skb); - } - retry = tmp; -@@ -933,8 +945,8 @@ static int rtl8187_start(struct ieee80211_hw *dev) - rtl818x_iowrite32(priv, &priv->map->TX_CONF, - RTL818X_TX_CONF_HW_SEQNUM | - RTL818X_TX_CONF_DISREQQSIZE | -- (7 << 8 /* short retry limit */) | -- (7 << 0 /* long retry limit */) | -+ (RETRY_COUNT << 8 /* short retry limit */) | -+ (RETRY_COUNT << 0 /* long retry limit */) | - (7 << 21 /* MAX TX DMA */)); - rtl8187_init_urbs(dev); - rtl8187b_init_status_urb(dev); -@@ -1378,6 +1390,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, - dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | - IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_RX_INCLUDES_FCS; -+ /* Initialize rate-control variables */ -+ dev->max_rates = 1; -+ dev->max_rate_tries = RETRY_COUNT; - - eeprom.data = dev; - eeprom.register_read = rtl8187_eeprom_register_read; -diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h -index 0d7b142..f1cc907 100644 ---- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h -+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h -@@ -35,6 +35,8 @@ - #define RFKILL_MASK_8187_89_97 0x2 - #define RFKILL_MASK_8198 0x4 - -+#define RETRY_COUNT 7 -+ - struct rtl8187_rx_info { - struct urb *urb; - struct ieee80211_hw *dev; -diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c -index ea25e5b..c85438a 100644 ---- a/drivers/pci/pci-sysfs.c -+++ b/drivers/pci/pci-sysfs.c -@@ -1088,7 +1088,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) - attr->write = write_vpd_attr; - retval = sysfs_create_bin_file(&dev->dev.kobj, attr); - if (retval) { -- kfree(dev->vpd->attr); -+ kfree(attr); - return retval; - } - dev->vpd->attr = attr; -diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c -index 53a786f..bd80f63 100644 ---- a/drivers/pci/quirks.c -+++ b/drivers/pci/quirks.c -@@ -533,6 +533,17 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); - -+#define ICH_PMBASE 0x40 -+#define ICH_ACPI_CNTL 0x44 -+#define ICH4_ACPI_EN 0x10 -+#define ICH6_ACPI_EN 0x80 -+#define ICH4_GPIOBASE 0x58 -+#define ICH4_GPIO_CNTL 0x5c -+#define ICH4_GPIO_EN 0x10 -+#define ICH6_GPIOBASE 0x48 -+#define ICH6_GPIO_CNTL 0x4c -+#define ICH6_GPIO_EN 0x10 -+ - /* - * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at - * 0x40 (128 bytes of ACPI, GPIO & TCO registers) -@@ -541,12 +552,33 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui - static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) - { - u32 region; -+ u8 enable; - -- pci_read_config_dword(dev, 0x40, ®ion); -- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); -+ /* -+ * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict -+ * with low legacy (and fixed) ports. We don't know the decoding -+ * priority and can't tell whether the legacy device or the one created -+ * here is really at that address. This happens on boards with broken -+ * BIOSes. -+ */ -+ -+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); -+ if (enable & ICH4_ACPI_EN) { -+ pci_read_config_dword(dev, ICH_PMBASE, ®ion); -+ region &= PCI_BASE_ADDRESS_IO_MASK; -+ if (region >= PCIBIOS_MIN_IO) -+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, -+ "ICH4 ACPI/GPIO/TCO"); -+ } - -- pci_read_config_dword(dev, 0x58, ®ion); -- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); -+ pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); -+ if (enable & ICH4_GPIO_EN) { -+ pci_read_config_dword(dev, ICH4_GPIOBASE, ®ion); -+ region &= PCI_BASE_ADDRESS_IO_MASK; -+ if (region >= PCIBIOS_MIN_IO) -+ quirk_io_region(dev, region, 64, -+ PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO"); -+ } - } - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); -@@ -562,12 +594,25 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui - static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) - { - u32 region; -+ u8 enable; - -- pci_read_config_dword(dev, 0x40, ®ion); -- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); -+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); -+ if (enable & ICH6_ACPI_EN) { -+ pci_read_config_dword(dev, ICH_PMBASE, ®ion); -+ region &= PCI_BASE_ADDRESS_IO_MASK; -+ if (region >= PCIBIOS_MIN_IO) -+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, -+ "ICH6 ACPI/GPIO/TCO"); -+ } - -- pci_read_config_dword(dev, 0x48, ®ion); -- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); -+ pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); -+ if (enable & ICH4_GPIO_EN) { -+ pci_read_config_dword(dev, ICH6_GPIOBASE, ®ion); -+ region &= PCI_BASE_ADDRESS_IO_MASK; -+ if (region >= PCIBIOS_MIN_IO) -+ quirk_io_region(dev, region, 64, -+ PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO"); -+ } - } - - static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) -@@ -2618,58 +2663,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, - - #endif /* CONFIG_PCI_MSI */ - --#ifdef CONFIG_PCI_IOV -- --/* -- * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the -- * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the -- * old Flash Memory Space. -- */ --static void __devinit quirk_i82576_sriov(struct pci_dev *dev) --{ -- int pos, flags; -- u32 bar, start, size; -- -- if (PAGE_SIZE > 0x10000) -- return; -- -- flags = pci_resource_flags(dev, 0); -- if ((flags & PCI_BASE_ADDRESS_SPACE) != -- PCI_BASE_ADDRESS_SPACE_MEMORY || -- (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) != -- PCI_BASE_ADDRESS_MEM_TYPE_32) -- return; -- -- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); -- if (!pos) -- return; -- -- pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar); -- if (bar & PCI_BASE_ADDRESS_MEM_MASK) -- return; -- -- start = pci_resource_start(dev, 1); -- size = pci_resource_len(dev, 1); -- if (!start || size != 0x400000 || start & (size - 1)) -- return; -- -- pci_resource_flags(dev, 1) = 0; -- pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0); -- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start); -- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2); -- -- dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n"); --} --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov); --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); --DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov); -- --#endif /* CONFIG_PCI_IOV */ -- - /* Allow manual resource allocation for PCI hotplug bridges - * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For - * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), -diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c -index 6b72932..30f2b33 100644 ---- a/drivers/scsi/device_handler/scsi_dh_alua.c -+++ b/drivers/scsi/device_handler/scsi_dh_alua.c -@@ -285,7 +285,8 @@ static void stpg_endio(struct request *req, int error) - print_alua_state(h->state)); - } - done: -- blk_put_request(req); -+ req->end_io_data = NULL; -+ __blk_put_request(req->q, req); - if (h->callback_fn) { - h->callback_fn(h->callback_data, err); - h->callback_fn = h->callback_data = NULL; -diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c -index b47d7aa..e2fe165 100644 ---- a/drivers/staging/tidspbridge/rmgr/proc.c -+++ b/drivers/staging/tidspbridge/rmgr/proc.c -@@ -781,12 +781,14 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, - (u32)pmpu_addr, - ul_size, dir); - -+ mutex_lock(&proc_lock); -+ - /* find requested memory are in cached mapping information */ - map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); - if (!map_obj) { - pr_err("%s: find_containing_mapping failed\n", __func__); - status = -EFAULT; -- goto err_out; -+ goto no_map; - } - - if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { -@@ -795,6 +797,8 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, - status = -EFAULT; - } - -+no_map: -+ mutex_unlock(&proc_lock); - err_out: - - return status; -@@ -819,21 +823,24 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, - (u32)pmpu_addr, - ul_size, dir); - -+ mutex_lock(&proc_lock); -+ - /* find requested memory are in cached mapping information */ - map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); - if (!map_obj) { - pr_err("%s: find_containing_mapping failed\n", __func__); - status = -EFAULT; -- goto err_out; -+ goto no_map; - } - - if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { - pr_err("%s: InValid address parameters %p %x\n", - __func__, pmpu_addr, ul_size); - status = -EFAULT; -- goto err_out; - } - -+no_map: -+ mutex_unlock(&proc_lock); - err_out: - return status; - } -@@ -1726,9 +1733,8 @@ int proc_un_map(void *hprocessor, void *map_addr, - (p_proc_object->hbridge_context, va_align, size_align); - } - -- mutex_unlock(&proc_lock); - if (status) -- goto func_end; -+ goto unmap_failed; - - /* - * A successful unmap should be followed by removal of map_obj -@@ -1737,6 +1743,9 @@ int proc_un_map(void *hprocessor, void *map_addr, - */ - remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); - -+unmap_failed: -+ mutex_unlock(&proc_lock); -+ - func_end: - dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", - __func__, hprocessor, map_addr, status); -diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h -index d7b3aca..6160b2f 100644 ---- a/drivers/staging/winbond/core.h -+++ b/drivers/staging/winbond/core.h -@@ -3,6 +3,7 @@ - - #include - #include -+#include - - #include "wbhal.h" - #include "mto.h" -diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c -index 366080b..7f19c8b 100644 ---- a/drivers/target/target_core_cdb.c -+++ b/drivers/target/target_core_cdb.c -@@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd) - { - struct se_device *dev = SE_DEV(cmd); - unsigned char *buf = cmd->t_task->t_task_buf; -- u32 blocks = dev->transport->get_blocks(dev); -+ unsigned long long blocks_long = dev->transport->get_blocks(dev); -+ u32 blocks; -+ -+ if (blocks_long >= 0x00000000ffffffff) -+ blocks = 0xffffffff; -+ else -+ blocks = (u32)blocks_long; - - buf[0] = (blocks >> 24) & 0xff; - buf[1] = (blocks >> 16) & 0xff; -diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c -index 3975df6..b3b881b 100644 ---- a/drivers/tty/serial/8250.c -+++ b/drivers/tty/serial/8250.c -@@ -954,6 +954,23 @@ static int broken_efr(struct uart_8250_port *up) - return 0; - } - -+static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) -+{ -+ unsigned char status; -+ -+ status = serial_in(up, 0x04); /* EXCR2 */ -+#define PRESL(x) ((x) & 0x30) -+ if (PRESL(status) == 0x10) { -+ /* already in high speed mode */ -+ return 0; -+ } else { -+ status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ -+ status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ -+ serial_outp(up, 0x04, status); -+ } -+ return 1; -+} -+ - /* - * We know that the chip has FIFOs. Does it have an EFR? The - * EFR is located in the same register position as the IIR and -@@ -1025,12 +1042,8 @@ static void autoconfig_16550a(struct uart_8250_port *up) - quot = serial_dl_read(up); - quot <<= 3; - -- status1 = serial_in(up, 0x04); /* EXCR2 */ -- status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ -- status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ -- serial_outp(up, 0x04, status1); -- -- serial_dl_write(up, quot); -+ if (ns16550a_goto_highspeed(up)) -+ serial_dl_write(up, quot); - - serial_outp(up, UART_LCR, 0); - -@@ -3025,17 +3038,13 @@ void serial8250_resume_port(int line) - struct uart_8250_port *up = &serial8250_ports[line]; - - if (up->capabilities & UART_NATSEMI) { -- unsigned char tmp; -- - /* Ensure it's still in high speed mode */ - serial_outp(up, UART_LCR, 0xE0); - -- tmp = serial_in(up, 0x04); /* EXCR2 */ -- tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ -- tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ -- serial_outp(up, 0x04, tmp); -+ ns16550a_goto_highspeed(up); - - serial_outp(up, UART_LCR, 0); -+ up->port.uartclk = 921600*16; - } - uart_resume_port(&serial8250_reg, &up->port); - } -diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c -index b62857b..37e13c3 100644 ---- a/drivers/tty/serial/mrst_max3110.c -+++ b/drivers/tty/serial/mrst_max3110.c -@@ -51,7 +51,7 @@ - struct uart_max3110 { - struct uart_port port; - struct spi_device *spi; -- char name[24]; -+ char name[SPI_NAME_SIZE]; - - wait_queue_head_t wq; - struct task_struct *main_thread; -diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c -index f71e8e3..d370885 100644 ---- a/drivers/usb/core/hcd-pci.c -+++ b/drivers/usb/core/hcd-pci.c -@@ -363,8 +363,7 @@ static int check_root_hub_suspended(struct device *dev) - struct pci_dev *pci_dev = to_pci_dev(dev); - struct usb_hcd *hcd = pci_get_drvdata(pci_dev); - -- if (!(hcd->state == HC_STATE_SUSPENDED || -- hcd->state == HC_STATE_HALT)) { -+ if (HCD_RH_RUNNING(hcd)) { - dev_warn(dev, "Root hub is not suspended\n"); - return -EBUSY; - } -@@ -386,7 +385,7 @@ static int suspend_common(struct device *dev, bool do_wakeup) - if (retval) - return retval; - -- if (hcd->driver->pci_suspend) { -+ if (hcd->driver->pci_suspend && !HCD_DEAD(hcd)) { - /* Optimization: Don't suspend if a root-hub wakeup is - * pending and it would cause the HCD to wake up anyway. - */ -@@ -427,7 +426,7 @@ static int resume_common(struct device *dev, int event) - struct usb_hcd *hcd = pci_get_drvdata(pci_dev); - int retval; - -- if (hcd->state != HC_STATE_SUSPENDED) { -+ if (HCD_RH_RUNNING(hcd)) { - dev_dbg(dev, "can't resume, not suspended!\n"); - return 0; - } -@@ -442,7 +441,7 @@ static int resume_common(struct device *dev, int event) - - clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); - -- if (hcd->driver->pci_resume) { -+ if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) { - if (event != PM_EVENT_AUTO_RESUME) - wait_for_companions(pci_dev, hcd); - -@@ -475,10 +474,10 @@ static int hcd_pci_suspend_noirq(struct device *dev) - - pci_save_state(pci_dev); - -- /* If the root hub is HALTed rather than SUSPENDed, -+ /* If the root hub is dead rather than suspended, - * disallow remote wakeup. - */ -- if (hcd->state == HC_STATE_HALT) -+ if (HCD_DEAD(hcd)) - device_set_wakeup_enable(dev, 0); - dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev)); - -diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c -index e935f71..c34a935 100644 ---- a/drivers/usb/core/hcd.c -+++ b/drivers/usb/core/hcd.c -@@ -983,7 +983,7 @@ static int register_root_hub(struct usb_hcd *hcd) - spin_unlock_irq (&hcd_root_hub_lock); - - /* Did the HC die before the root hub was registered? */ -- if (hcd->state == HC_STATE_HALT) -+ if (HCD_DEAD(hcd) || hcd->state == HC_STATE_HALT) - usb_hc_died (hcd); /* This time clean up */ - } - -@@ -1089,13 +1089,10 @@ int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb) - * Check the host controller's state and add the URB to the - * endpoint's queue. - */ -- switch (hcd->state) { -- case HC_STATE_RUNNING: -- case HC_STATE_RESUMING: -+ if (HCD_RH_RUNNING(hcd)) { - urb->unlinked = 0; - list_add_tail(&urb->urb_list, &urb->ep->urb_list); -- break; -- default: -+ } else { - rc = -ESHUTDOWN; - goto done; - } -@@ -1913,7 +1910,7 @@ int usb_hcd_get_frame_number (struct usb_device *udev) - { - struct usb_hcd *hcd = bus_to_hcd(udev->bus); - -- if (!HC_IS_RUNNING (hcd->state)) -+ if (!HCD_RH_RUNNING(hcd)) - return -ESHUTDOWN; - return hcd->driver->get_frame_number (hcd); - } -@@ -1930,9 +1927,15 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) - - dev_dbg(&rhdev->dev, "bus %s%s\n", - (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend"); -+ if (HCD_DEAD(hcd)) { -+ dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); -+ return 0; -+ } -+ - if (!hcd->driver->bus_suspend) { - status = -ENOENT; - } else { -+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); - hcd->state = HC_STATE_QUIESCING; - status = hcd->driver->bus_suspend(hcd); - } -@@ -1940,7 +1943,12 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) - usb_set_device_state(rhdev, USB_STATE_SUSPENDED); - hcd->state = HC_STATE_SUSPENDED; - } else { -- hcd->state = old_state; -+ spin_lock_irq(&hcd_root_hub_lock); -+ if (!HCD_DEAD(hcd)) { -+ set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); -+ hcd->state = old_state; -+ } -+ spin_unlock_irq(&hcd_root_hub_lock); - dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", - "suspend", status); - } -@@ -1955,9 +1963,13 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) - - dev_dbg(&rhdev->dev, "usb %s%s\n", - (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); -+ if (HCD_DEAD(hcd)) { -+ dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); -+ return 0; -+ } - if (!hcd->driver->bus_resume) - return -ENOENT; -- if (hcd->state == HC_STATE_RUNNING) -+ if (HCD_RH_RUNNING(hcd)) - return 0; - - hcd->state = HC_STATE_RESUMING; -@@ -1966,10 +1978,15 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) - if (status == 0) { - /* TRSMRCY = 10 msec */ - msleep(10); -- usb_set_device_state(rhdev, rhdev->actconfig -- ? USB_STATE_CONFIGURED -- : USB_STATE_ADDRESS); -- hcd->state = HC_STATE_RUNNING; -+ spin_lock_irq(&hcd_root_hub_lock); -+ if (!HCD_DEAD(hcd)) { -+ usb_set_device_state(rhdev, rhdev->actconfig -+ ? USB_STATE_CONFIGURED -+ : USB_STATE_ADDRESS); -+ set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); -+ hcd->state = HC_STATE_RUNNING; -+ } -+ spin_unlock_irq(&hcd_root_hub_lock); - } else { - hcd->state = old_state; - dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", -@@ -2080,7 +2097,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) - */ - local_irq_save(flags); - -- if (unlikely(hcd->state == HC_STATE_HALT || !HCD_HW_ACCESSIBLE(hcd))) { -+ if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) { - rc = IRQ_NONE; - } else if (hcd->driver->irq(hcd) == IRQ_NONE) { - rc = IRQ_NONE; -@@ -2114,6 +2131,8 @@ void usb_hc_died (struct usb_hcd *hcd) - dev_err (hcd->self.controller, "HC died; cleaning up\n"); - - spin_lock_irqsave (&hcd_root_hub_lock, flags); -+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); -+ set_bit(HCD_FLAG_DEAD, &hcd->flags); - if (hcd->rh_registered) { - clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - -@@ -2256,6 +2275,12 @@ int usb_add_hcd(struct usb_hcd *hcd, - */ - device_init_wakeup(&rhdev->dev, 1); - -+ /* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is -+ * registered. But since the controller can die at any time, -+ * let's initialize the flag before touching the hardware. -+ */ -+ set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); -+ - /* "reset" is misnamed; its role is now one-time init. the controller - * should already have been reset (and boot firmware kicked off etc). - */ -@@ -2323,6 +2348,7 @@ int usb_add_hcd(struct usb_hcd *hcd, - return retval; - - error_create_attr_group: -+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); - if (HC_IS_RUNNING(hcd->state)) - hcd->state = HC_STATE_QUIESCING; - spin_lock_irq(&hcd_root_hub_lock); -@@ -2375,6 +2401,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) - usb_get_dev(rhdev); - sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group); - -+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); - if (HC_IS_RUNNING (hcd->state)) - hcd->state = HC_STATE_QUIESCING; - -diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c -index c14fc08..ae334b0 100644 ---- a/drivers/usb/core/urb.c -+++ b/drivers/usb/core/urb.c -@@ -366,7 +366,16 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) - if (xfertype == USB_ENDPOINT_XFER_ISOC) { - int n, len; - -- /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */ -+ /* SuperSpeed isoc endpoints have up to 16 bursts of up to -+ * 3 packets each -+ */ -+ if (dev->speed == USB_SPEED_SUPER) { -+ int burst = 1 + ep->ss_ep_comp.bMaxBurst; -+ int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); -+ max *= burst; -+ max *= mult; -+ } -+ - /* "high bandwidth" mode, 1-3 packets/uframe? */ - if (dev->speed == USB_SPEED_HIGH) { - int mult = 1 + ((max >> 11) & 0x03); -diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c -index 8a515f0..72ae77c 100644 ---- a/drivers/usb/host/ehci-hub.c -+++ b/drivers/usb/host/ehci-hub.c -@@ -106,6 +106,27 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) - ehci->owned_ports = 0; - } - -+static int ehci_port_change(struct ehci_hcd *ehci) -+{ -+ int i = HCS_N_PORTS(ehci->hcs_params); -+ -+ /* First check if the controller indicates a change event */ -+ -+ if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD) -+ return 1; -+ -+ /* -+ * Not all controllers appear to update this while going from D3 to D0, -+ * so check the individual port status registers as well -+ */ -+ -+ while (i--) -+ if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC) -+ return 1; -+ -+ return 0; -+} -+ - static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, - bool suspending, bool do_wakeup) - { -@@ -173,7 +194,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, - } - - /* Does the root hub have a port wakeup pending? */ -- if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) -+ if (!suspending && ehci_port_change(ehci)) - usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); - - spin_unlock_irqrestore(&ehci->lock, flags); -diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c -index bdba8c5..c470cc8 100644 ---- a/drivers/usb/host/isp1760-hcd.c -+++ b/drivers/usb/host/isp1760-hcd.c -@@ -33,6 +33,7 @@ struct isp1760_hcd { - struct inter_packet_info atl_ints[32]; - struct inter_packet_info int_ints[32]; - struct memory_chunk memory_pool[BLOCKS]; -+ u32 atl_queued; - - /* periodic schedule support */ - #define DEFAULT_I_TDPS 1024 -@@ -850,6 +851,11 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, - skip_map &= ~queue_entry; - isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG); - -+ priv->atl_queued++; -+ if (priv->atl_queued == 2) -+ isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, -+ hcd->regs + HC_INTERRUPT_ENABLE); -+ - buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG); - buffstatus |= ATL_BUFFER; - isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG); -@@ -992,6 +998,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) - u32 dw3; - - status = 0; -+ priv->atl_queued--; - - queue_entry = __ffs(done_map); - done_map &= ~(1 << queue_entry); -@@ -1054,11 +1061,6 @@ static void do_atl_int(struct usb_hcd *usb_hcd) - * device is not able to send data fast enough. - * This happens mostly on slower hardware. - */ -- printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " -- "%d of %zu done: %08x cur: %08x\n", qtd, -- urb, qh, PTD_XFERRED_LENGTH(dw3), -- qtd->length, done_map, -- (1 << queue_entry)); - - /* RL counter = ERR counter */ - dw3 &= ~(0xf << 19); -@@ -1086,6 +1088,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) - priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs + - atl_regs, sizeof(ptd)); - -+ priv->atl_queued++; -+ if (priv->atl_queued == 2) -+ isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, -+ usb_hcd->regs + HC_INTERRUPT_ENABLE); -+ - buffstatus = isp1760_readl(usb_hcd->regs + - HC_BUFFER_STATUS_REG); - buffstatus |= ATL_BUFFER; -@@ -1191,6 +1198,9 @@ static void do_atl_int(struct usb_hcd *usb_hcd) - skip_map = isp1760_readl(usb_hcd->regs + - HC_ATL_PTD_SKIPMAP_REG); - } -+ if (priv->atl_queued <= 1) -+ isp1760_writel(INTERRUPT_ENABLE_MASK, -+ usb_hcd->regs + HC_INTERRUPT_ENABLE); - } - - static void do_intl_int(struct usb_hcd *usb_hcd) -@@ -1770,7 +1780,7 @@ static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd) - goto leave; - - isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG); -- if (imask & HC_ATL_INT) -+ if (imask & (HC_ATL_INT | HC_SOT_INT)) - do_atl_int(usb_hcd); - - if (imask & HC_INTL_INT) -diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h -index 6931ef5..612bce5 100644 ---- a/drivers/usb/host/isp1760-hcd.h -+++ b/drivers/usb/host/isp1760-hcd.h -@@ -69,6 +69,7 @@ void deinit_kmem_cache(void); - - #define HC_INTERRUPT_ENABLE 0x314 - #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT) -+#define INTERRUPT_ENABLE_SOT_MASK (HC_INTL_INT | HC_SOT_INT | HC_EOT_INT) - - #define HC_ISO_INT (1 << 9) - #define HC_ATL_INT (1 << 8) -diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c -index 3289bf4..d3f0406 100644 ---- a/drivers/usb/host/xhci-ring.c -+++ b/drivers/usb/host/xhci-ring.c -@@ -500,15 +500,26 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, - state->new_cycle_state = ~(state->new_cycle_state) & 0x1; - next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); - -+ /* -+ * If there is only one segment in a ring, find_trb_seg()'s while loop -+ * will not run, and it will return before it has a chance to see if it -+ * needs to toggle the cycle bit. It can't tell if the stalled transfer -+ * ended just before the link TRB on a one-segment ring, or if the TD -+ * wrapped around the top of the ring, because it doesn't have the TD in -+ * question. Look for the one-segment case where stalled TRB's address -+ * is greater than the new dequeue pointer address. -+ */ -+ if (ep_ring->first_seg == ep_ring->first_seg->next && -+ state->new_deq_ptr < dev->eps[ep_index].stopped_trb) -+ state->new_cycle_state ^= 0x1; -+ xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); -+ - /* Don't update the ring cycle state for the producer (us). */ - xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", - state->new_deq_seg); - addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); - xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", - (unsigned long long) addr); -- xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); -- ep_ring->dequeue = state->new_deq_ptr; -- ep_ring->deq_seg = state->new_deq_seg; - } - - static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, -@@ -951,9 +962,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, - } else { - xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", - ep_ctx->deq); -+ if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, -+ dev->eps[ep_index].queued_deq_ptr) == -+ (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { -+ /* Update the ring's dequeue segment and dequeue pointer -+ * to reflect the new position. -+ */ -+ ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; -+ ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; -+ } else { -+ xhci_warn(xhci, "Mismatch between completed Set TR Deq " -+ "Ptr command & xHCI internal state.\n"); -+ xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", -+ dev->eps[ep_index].queued_deq_seg, -+ dev->eps[ep_index].queued_deq_ptr); -+ } - } - - dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; -+ dev->eps[ep_index].queued_deq_seg = NULL; -+ dev->eps[ep_index].queued_deq_ptr = NULL; - /* Restart any rings with pending URBs */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); - } -@@ -3229,6 +3257,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, - u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); - u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); - u32 type = TRB_TYPE(TRB_SET_DEQ); -+ struct xhci_virt_ep *ep; - - addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); - if (addr == 0) { -@@ -3237,6 +3266,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, - deq_seg, deq_ptr); - return 0; - } -+ ep = &xhci->devs[slot_id]->eps[ep_index]; -+ if ((ep->ep_state & SET_DEQ_PENDING)) { -+ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); -+ xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); -+ return 0; -+ } -+ ep->queued_deq_seg = deq_seg; -+ ep->queued_deq_ptr = deq_ptr; - return queue_command(xhci, lower_32_bits(addr) | cycle_state, - upper_32_bits(addr), trb_stream_id, - trb_slot_id | trb_ep_index | type, false); -diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h -index 7f127df..62bc1bc 100644 ---- a/drivers/usb/host/xhci.h -+++ b/drivers/usb/host/xhci.h -@@ -644,6 +644,9 @@ struct xhci_ep_ctx { - #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) - #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) - -+/* deq bitmasks */ -+#define EP_CTX_CYCLE_MASK (1 << 0) -+ - - /** - * struct xhci_input_control_context -@@ -746,6 +749,12 @@ struct xhci_virt_ep { - struct timer_list stop_cmd_timer; - int stop_cmds_pending; - struct xhci_hcd *xhci; -+ /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue -+ * command. We'll need to update the ring's dequeue segment and dequeue -+ * pointer after the command completes. -+ */ -+ struct xhci_segment *queued_deq_seg; -+ union xhci_trb *queued_deq_ptr; - /* - * Sometimes the xHC can not process isochronous endpoint ring quickly - * enough, and it will miss some isoc tds on the ring and generate -diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c -index 7b8815d..14ac87e 100644 ---- a/drivers/usb/serial/ch341.c -+++ b/drivers/usb/serial/ch341.c -@@ -75,6 +75,7 @@ static int debug; - static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x4348, 0x5523) }, - { USB_DEVICE(0x1a86, 0x7523) }, -+ { USB_DEVICE(0x1a86, 0x5523) }, - { }, - }; - MODULE_DEVICE_TABLE(usb, id_table); -diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c -index bd5bd85..b382d9a 100644 ---- a/drivers/usb/serial/kobil_sct.c -+++ b/drivers/usb/serial/kobil_sct.c -@@ -372,7 +372,7 @@ static void kobil_read_int_callback(struct urb *urb) - } - - tty = tty_port_tty_get(&port->port); -- if (urb->actual_length) { -+ if (tty && urb->actual_length) { - - /* BEGIN DEBUG */ - /* -diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c -index 5f46838..75c7f45 100644 ---- a/drivers/usb/serial/option.c -+++ b/drivers/usb/serial/option.c -@@ -652,7 +652,8 @@ static const struct usb_device_id option_ids[] = { - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, -+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, -+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, -diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c -index 546a521..2ff90a9 100644 ---- a/drivers/usb/serial/usb-serial.c -+++ b/drivers/usb/serial/usb-serial.c -@@ -911,9 +911,8 @@ int usb_serial_probe(struct usb_interface *interface, - dev_err(&interface->dev, "No free urbs available\n"); - goto probe_error; - } -- buffer_size = serial->type->bulk_in_size; -- if (!buffer_size) -- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); -+ buffer_size = max_t(int, serial->type->bulk_in_size, -+ le16_to_cpu(endpoint->wMaxPacketSize)); - port->bulk_in_size = buffer_size; - port->bulk_in_endpointAddress = endpoint->bEndpointAddress; - port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); -diff --git a/fs/dcache.c b/fs/dcache.c -index 611ffe9..a39fe47 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -296,8 +296,12 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) - __releases(parent->d_lock) - __releases(dentry->d_inode->i_lock) - { -- dentry->d_parent = NULL; - list_del(&dentry->d_u.d_child); -+ /* -+ * Inform try_to_ascend() that we are no longer attached to the -+ * dentry tree -+ */ -+ dentry->d_flags |= DCACHE_DISCONNECTED; - if (parent) - spin_unlock(&parent->d_lock); - dentry_iput(dentry); -@@ -1012,6 +1016,35 @@ void shrink_dcache_for_umount(struct super_block *sb) - } - - /* -+ * This tries to ascend one level of parenthood, but -+ * we can race with renaming, so we need to re-check -+ * the parenthood after dropping the lock and check -+ * that the sequence number still matches. -+ */ -+static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) -+{ -+ struct dentry *new = old->d_parent; -+ -+ rcu_read_lock(); -+ spin_unlock(&old->d_lock); -+ spin_lock(&new->d_lock); -+ -+ /* -+ * might go back up the wrong parent if we have had a rename -+ * or deletion -+ */ -+ if (new != old->d_parent || -+ (old->d_flags & DCACHE_DISCONNECTED) || -+ (!locked && read_seqretry(&rename_lock, seq))) { -+ spin_unlock(&new->d_lock); -+ new = NULL; -+ } -+ rcu_read_unlock(); -+ return new; -+} -+ -+ -+/* - * Search for at least 1 mount point in the dentry's subdirs. - * We descend to the next level whenever the d_subdirs - * list is non-empty and continue searching. -@@ -1066,24 +1099,10 @@ resume: - * All done at this level ... ascend and resume the search. - */ - if (this_parent != parent) { -- struct dentry *tmp; -- struct dentry *child; -- -- tmp = this_parent->d_parent; -- rcu_read_lock(); -- spin_unlock(&this_parent->d_lock); -- child = this_parent; -- this_parent = tmp; -- spin_lock(&this_parent->d_lock); -- /* might go back up the wrong parent if we have had a rename -- * or deletion */ -- if (this_parent != child->d_parent || -- (!locked && read_seqretry(&rename_lock, seq))) { -- spin_unlock(&this_parent->d_lock); -- rcu_read_unlock(); -+ struct dentry *child = this_parent; -+ this_parent = try_to_ascend(this_parent, locked, seq); -+ if (!this_parent) - goto rename_retry; -- } -- rcu_read_unlock(); - next = child->d_u.d_child.next; - goto resume; - } -@@ -1181,24 +1200,10 @@ resume: - * All done at this level ... ascend and resume the search. - */ - if (this_parent != parent) { -- struct dentry *tmp; -- struct dentry *child; -- -- tmp = this_parent->d_parent; -- rcu_read_lock(); -- spin_unlock(&this_parent->d_lock); -- child = this_parent; -- this_parent = tmp; -- spin_lock(&this_parent->d_lock); -- /* might go back up the wrong parent if we have had a rename -- * or deletion */ -- if (this_parent != child->d_parent || -- (!locked && read_seqretry(&rename_lock, seq))) { -- spin_unlock(&this_parent->d_lock); -- rcu_read_unlock(); -+ struct dentry *child = this_parent; -+ this_parent = try_to_ascend(this_parent, locked, seq); -+ if (!this_parent) - goto rename_retry; -- } -- rcu_read_unlock(); - next = child->d_u.d_child.next; - goto resume; - } -@@ -2942,28 +2947,14 @@ resume: - spin_unlock(&dentry->d_lock); - } - if (this_parent != root) { -- struct dentry *tmp; -- struct dentry *child; -- -- tmp = this_parent->d_parent; -+ struct dentry *child = this_parent; - if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { - this_parent->d_flags |= DCACHE_GENOCIDE; - this_parent->d_count--; - } -- rcu_read_lock(); -- spin_unlock(&this_parent->d_lock); -- child = this_parent; -- this_parent = tmp; -- spin_lock(&this_parent->d_lock); -- /* might go back up the wrong parent if we have had a rename -- * or deletion */ -- if (this_parent != child->d_parent || -- (!locked && read_seqretry(&rename_lock, seq))) { -- spin_unlock(&this_parent->d_lock); -- rcu_read_unlock(); -+ this_parent = try_to_ascend(this_parent, locked, seq); -+ if (!this_parent) - goto rename_retry; -- } -- rcu_read_unlock(); - next = child->d_u.d_child.next; - goto resume; - } -diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c -index b27ba71..75c968e 100644 ---- a/fs/ext3/namei.c -+++ b/fs/ext3/namei.c -@@ -1540,8 +1540,8 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, - goto cleanup; - node2 = (struct dx_node *)(bh2->b_data); - entries2 = node2->entries; -+ memset(&node2->fake, 0, sizeof(struct fake_dirent)); - node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize); -- node2->fake.inode = 0; - BUFFER_TRACE(frame->bh, "get_write_access"); - err = ext3_journal_get_write_access(handle, frame->bh); - if (err) -diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c -index be03a0b..764b86a 100644 ---- a/fs/partitions/osf.c -+++ b/fs/partitions/osf.c -@@ -10,7 +10,7 @@ - #include "check.h" - #include "osf.h" - --#define MAX_OSF_PARTITIONS 8 -+#define MAX_OSF_PARTITIONS 18 - - int osf_partition(struct parsed_partitions *state) - { -diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h -index dcd6a7c..ca29e03 100644 ---- a/include/linux/ftrace.h -+++ b/include/linux/ftrace.h -@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void); - - extern void ftrace_graph_init_task(struct task_struct *t); - extern void ftrace_graph_exit_task(struct task_struct *t); -+extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); - - static inline int task_curr_ret_stack(struct task_struct *t) - { -@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void) - - static inline void ftrace_graph_init_task(struct task_struct *t) { } - static inline void ftrace_graph_exit_task(struct task_struct *t) { } -+static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } - - static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, - trace_func_graph_ent_t entryfunc) -diff --git a/include/linux/hid.h b/include/linux/hid.h -index d91c25e..fc5faf6 100644 ---- a/include/linux/hid.h -+++ b/include/linux/hid.h -@@ -638,7 +638,7 @@ struct hid_driver { - struct hid_input *hidinput, struct hid_field *field, - struct hid_usage *usage, unsigned long **bit, int *max); - void (*feature_mapping)(struct hid_device *hdev, -- struct hid_input *hidinput, struct hid_field *field, -+ struct hid_field *field, - struct hid_usage *usage); - #ifdef CONFIG_PM - int (*suspend)(struct hid_device *hdev, pm_message_t message); -diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h -index ab46194..76d896c 100644 ---- a/include/linux/usb/ch9.h -+++ b/include/linux/usb/ch9.h -@@ -585,6 +585,8 @@ struct usb_ss_ep_comp_descriptor { - #define USB_DT_SS_EP_COMP_SIZE 6 - /* Bits 4:0 of bmAttributes if this is a bulk endpoint */ - #define USB_SS_MAX_STREAMS(p) (1 << (p & 0x1f)) -+/* Bits 1:0 of bmAttributes if this is an isoc endpoint */ -+#define USB_SS_MULT(p) (1 + ((p) & 0x3)) - - /*-------------------------------------------------------------------------*/ - -diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h -index a854fe8..f21f599 100644 ---- a/include/linux/usb/hcd.h -+++ b/include/linux/usb/hcd.h -@@ -99,6 +99,8 @@ struct usb_hcd { - #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ - #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ - #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ -+#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ -+#define HCD_FLAG_DEAD 6 /* controller has died? */ - - /* The flags can be tested using these macros; they are likely to - * be slightly faster than test_bit(). -@@ -108,6 +110,8 @@ struct usb_hcd { - #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) - #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) - #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) -+#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) -+#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) - - /* Flags that get set only during HCD registration or removal. */ - unsigned rh_registered:1;/* is root hub registered? */ -diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h -index c904913..45f3b9d 100644 ---- a/include/linux/usb/serial.h -+++ b/include/linux/usb/serial.h -@@ -191,7 +191,8 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) - * @id_table: pointer to a list of usb_device_id structures that define all - * of the devices this structure can support. - * @num_ports: the number of different ports this device will have. -- * @bulk_in_size: bytes to allocate for bulk-in buffer (0 = end-point size) -+ * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer -+ * (0 = end-point size) - * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) - * @calc_num_ports: pointer to a function to determine how many ports this - * device has dynamically. It will be called after the probe() -diff --git a/kernel/perf_event.c b/kernel/perf_event.c -index 656222f..b22a2ef 100644 ---- a/kernel/perf_event.c -+++ b/kernel/perf_event.c -@@ -4567,7 +4567,7 @@ static int perf_exclude_event(struct perf_event *event, - struct pt_regs *regs) - { - if (event->hw.state & PERF_HES_STOPPED) -- return 0; -+ return 1; - - if (regs) { - if (event->attr.exclude_user && user_mode(regs)) -@@ -4923,6 +4923,8 @@ static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) - { -+ if (event->hw.state & PERF_HES_STOPPED) -+ return 0; - /* - * All tracepoints are from kernel-space. - */ -diff --git a/kernel/sched.c b/kernel/sched.c -index 42eab5a..c164920c 100644 ---- a/kernel/sched.c -+++ b/kernel/sched.c -@@ -5572,7 +5572,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) - * The idle tasks have their own, simple scheduling class: - */ - idle->sched_class = &idle_sched_class; -- ftrace_graph_init_task(idle); -+ ftrace_graph_init_idle_task(idle, cpu); - } - - /* -diff --git a/kernel/smp.c b/kernel/smp.c -index 9910744..9545489 100644 ---- a/kernel/smp.c -+++ b/kernel/smp.c -@@ -450,7 +450,7 @@ void smp_call_function_many(const struct cpumask *mask, - { - struct call_function_data *data; - unsigned long flags; -- int cpu, next_cpu, this_cpu = smp_processor_id(); -+ int refs, cpu, next_cpu, this_cpu = smp_processor_id(); - - /* - * Can deadlock when called with interrupts disabled. -@@ -461,7 +461,7 @@ void smp_call_function_many(const struct cpumask *mask, - WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() - && !oops_in_progress && !early_boot_irqs_disabled); - -- /* So, what's a CPU they want? Ignoring this one. */ -+ /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ - cpu = cpumask_first_and(mask, cpu_online_mask); - if (cpu == this_cpu) - cpu = cpumask_next_and(cpu, mask, cpu_online_mask); -@@ -483,22 +483,49 @@ void smp_call_function_many(const struct cpumask *mask, - - data = &__get_cpu_var(cfd_data); - csd_lock(&data->csd); -+ -+ /* This BUG_ON verifies our reuse assertions and can be removed */ - BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); - -+ /* -+ * The global call function queue list add and delete are protected -+ * by a lock, but the list is traversed without any lock, relying -+ * on the rcu list add and delete to allow safe concurrent traversal. -+ * We reuse the call function data without waiting for any grace -+ * period after some other cpu removes it from the global queue. -+ * This means a cpu might find our data block as it is being -+ * filled out. -+ * -+ * We hold off the interrupt handler on the other cpu by -+ * ordering our writes to the cpu mask vs our setting of the -+ * refs counter. We assert only the cpu owning the data block -+ * will set a bit in cpumask, and each bit will only be cleared -+ * by the subject cpu. Each cpu must first find its bit is -+ * set and then check that refs is set indicating the element is -+ * ready to be processed, otherwise it must skip the entry. -+ * -+ * On the previous iteration refs was set to 0 by another cpu. -+ * To avoid the use of transitivity, set the counter to 0 here -+ * so the wmb will pair with the rmb in the interrupt handler. -+ */ -+ atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */ -+ - data->csd.func = func; - data->csd.info = info; -- cpumask_and(data->cpumask, mask, cpu_online_mask); -- cpumask_clear_cpu(this_cpu, data->cpumask); - -- /* -- * To ensure the interrupt handler gets an complete view -- * we order the cpumask and refs writes and order the read -- * of them in the interrupt handler. In addition we may -- * only clear our own cpu bit from the mask. -- */ -+ /* Ensure 0 refs is visible before mask. Also orders func and info */ - smp_wmb(); - -- atomic_set(&data->refs, cpumask_weight(data->cpumask)); -+ /* We rely on the "and" being processed before the store */ -+ cpumask_and(data->cpumask, mask, cpu_online_mask); -+ cpumask_clear_cpu(this_cpu, data->cpumask); -+ refs = cpumask_weight(data->cpumask); -+ -+ /* Some callers race with other cpus changing the passed mask */ -+ if (unlikely(!refs)) { -+ csd_unlock(&data->csd); -+ return; -+ } - - raw_spin_lock_irqsave(&call_function.lock, flags); - /* -@@ -507,6 +534,12 @@ void smp_call_function_many(const struct cpumask *mask, - * will not miss any other list entries: - */ - list_add_rcu(&data->csd.list, &call_function.queue); -+ /* -+ * We rely on the wmb() in list_add_rcu to complete our writes -+ * to the cpumask before this write to refs, which indicates -+ * data is on the list and is ready to be processed. -+ */ -+ atomic_set(&data->refs, refs); - raw_spin_unlock_irqrestore(&call_function.lock, flags); - - /* -diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c -index f3dadae..888b611 100644 ---- a/kernel/trace/ftrace.c -+++ b/kernel/trace/ftrace.c -@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void) - /* The cpu_boot init_task->ret_stack will never be freed */ - for_each_online_cpu(cpu) { - if (!idle_task(cpu)->ret_stack) -- ftrace_graph_init_task(idle_task(cpu)); -+ ftrace_graph_init_idle_task(idle_task(cpu), cpu); - } - - do { -@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void) - mutex_unlock(&ftrace_lock); - } - -+static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); -+ -+static void -+graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) -+{ -+ atomic_set(&t->tracing_graph_pause, 0); -+ atomic_set(&t->trace_overrun, 0); -+ t->ftrace_timestamp = 0; -+ /* make curr_ret_stack visable before we add the ret_stack */ -+ smp_wmb(); -+ t->ret_stack = ret_stack; -+} -+ -+/* -+ * Allocate a return stack for the idle task. May be the first -+ * time through, or it may be done by CPU hotplug online. -+ */ -+void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) -+{ -+ t->curr_ret_stack = -1; -+ /* -+ * The idle task has no parent, it either has its own -+ * stack or no stack at all. -+ */ -+ if (t->ret_stack) -+ WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); -+ -+ if (ftrace_graph_active) { -+ struct ftrace_ret_stack *ret_stack; -+ -+ ret_stack = per_cpu(idle_ret_stack, cpu); -+ if (!ret_stack) { -+ ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH -+ * sizeof(struct ftrace_ret_stack), -+ GFP_KERNEL); -+ if (!ret_stack) -+ return; -+ per_cpu(idle_ret_stack, cpu) = ret_stack; -+ } -+ graph_init_task(t, ret_stack); -+ } -+} -+ - /* Allocate a return stack for newly created task */ - void ftrace_graph_init_task(struct task_struct *t) - { -@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t) - GFP_KERNEL); - if (!ret_stack) - return; -- atomic_set(&t->tracing_graph_pause, 0); -- atomic_set(&t->trace_overrun, 0); -- t->ftrace_timestamp = 0; -- /* make curr_ret_stack visable before we add the ret_stack */ -- smp_wmb(); -- t->ret_stack = ret_stack; -+ graph_init_task(t, ret_stack); - } - } - -diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c -index 57d344c..35d046b 100644 ---- a/net/sunrpc/clnt.c -+++ b/net/sunrpc/clnt.c -@@ -436,7 +436,9 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) - if (!(rovr->tk_flags & RPC_TASK_KILLED)) { - rovr->tk_flags |= RPC_TASK_KILLED; - rpc_exit(rovr, -EIO); -- rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); -+ if (RPC_IS_QUEUED(rovr)) -+ rpc_wake_up_queued_task(rovr->tk_waitqueue, -+ rovr); - } - } - spin_unlock(&clnt->cl_lock); -diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c -index 59e5994..17c3e3a 100644 ---- a/net/sunrpc/sched.c -+++ b/net/sunrpc/sched.c -@@ -637,14 +637,12 @@ static void __rpc_execute(struct rpc_task *task) - save_callback = task->tk_callback; - task->tk_callback = NULL; - save_callback(task); -- } -- -- /* -- * Perform the next FSM step. -- * tk_action may be NULL when the task has been killed -- * by someone else. -- */ -- if (!RPC_IS_QUEUED(task)) { -+ } else { -+ /* -+ * Perform the next FSM step. -+ * tk_action may be NULL when the task has been killed -+ * by someone else. -+ */ - if (task->tk_action == NULL) - break; - task->tk_action(task); -diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c -index 9d32f18..cb09f1f 100644 ---- a/security/tomoyo/file.c -+++ b/security/tomoyo/file.c -@@ -927,7 +927,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, - struct path *path, const int flag) - { - const u8 acc_mode = ACC_MODE(flag); -- int error = -ENOMEM; -+ int error = 0; - struct tomoyo_path_info buf; - struct tomoyo_request_info r; - int idx; -@@ -938,9 +938,6 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, - buf.name = NULL; - r.mode = TOMOYO_CONFIG_DISABLED; - idx = tomoyo_read_lock(); -- if (!tomoyo_get_realpath(&buf, path)) -- goto out; -- error = 0; - /* - * If the filename is specified by "deny_rewrite" keyword, - * we need to check "allow_rewrite" permission when the filename is not -diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c -index 12b44b0..a0da775 100644 ---- a/sound/drivers/aloop.c -+++ b/sound/drivers/aloop.c -@@ -482,8 +482,9 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) - cable->streams[SNDRV_PCM_STREAM_CAPTURE]; - unsigned long delta_play = 0, delta_capt = 0; - unsigned int running; -+ unsigned long flags; - -- spin_lock(&cable->lock); -+ spin_lock_irqsave(&cable->lock, flags); - running = cable->running ^ cable->pause; - if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { - delta_play = jiffies - dpcm_play->last_jiffies; -@@ -495,10 +496,8 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) - dpcm_capt->last_jiffies += delta_capt; - } - -- if (delta_play == 0 && delta_capt == 0) { -- spin_unlock(&cable->lock); -- return running; -- } -+ if (delta_play == 0 && delta_capt == 0) -+ goto unlock; - - if (delta_play > delta_capt) { - loopback_bytepos_update(dpcm_play, delta_play - delta_capt, -@@ -510,14 +509,14 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) - delta_capt = delta_play; - } - -- if (delta_play == 0 && delta_capt == 0) { -- spin_unlock(&cable->lock); -- return running; -- } -+ if (delta_play == 0 && delta_capt == 0) -+ goto unlock; -+ - /* note delta_capt == delta_play at this moment */ - loopback_bytepos_update(dpcm_capt, delta_capt, BYTEPOS_UPDATE_COPY); - loopback_bytepos_update(dpcm_play, delta_play, BYTEPOS_UPDATE_POSONLY); -- spin_unlock(&cable->lock); -+ unlock: -+ spin_unlock_irqrestore(&cable->lock, flags); - return running; - } - -diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c -index 22dbd91..448dd01 100644 ---- a/sound/pci/asihpi/hpioctl.c -+++ b/sound/pci/asihpi/hpioctl.c -@@ -155,6 +155,11 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) - goto out; - } - -+ if (hm->h.adapter_index >= HPI_MAX_ADAPTERS) { -+ err = -EINVAL; -+ goto out; -+ } -+ - pa = &adapters[hm->h.adapter_index]; - hr->h.size = 0; - if (hm->h.object == HPI_OBJ_SUBSYSTEM) { -diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c -index 1bff80c..b932154 100644 ---- a/sound/pci/ctxfi/ctatc.c -+++ b/sound/pci/ctxfi/ctatc.c -@@ -869,7 +869,7 @@ spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm) - mutex_lock(&atc->atc_mutex); - dao->ops->get_spos(dao, &status); - if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) { -- status &= ((~IEC958_AES3_CON_FS) << 24); -+ status &= ~(IEC958_AES3_CON_FS << 24); - status |= (iec958_con_fs << 24); - dao->ops->set_spos(dao, status); - dao->ops->commit_write(dao); -diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c -index af56eb9..47d9ea9 100644 ---- a/sound/pci/ctxfi/ctdaio.c -+++ b/sound/pci/ctxfi/ctdaio.c -@@ -176,6 +176,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input) - if (!entry) - return -ENOMEM; - -+ dao->ops->clear_left_input(dao); - /* Program master and conjugate resources */ - input->ops->master(input); - daio->rscl.ops->master(&daio->rscl); -@@ -204,6 +205,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input) - if (!entry) - return -ENOMEM; - -+ dao->ops->clear_right_input(dao); - /* Program master and conjugate resources */ - input->ops->master(input); - daio->rscr.ops->master(&daio->rscr); -diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c -index 15c1e72..c3519ff 100644 ---- a/sound/pci/ctxfi/ctmixer.c -+++ b/sound/pci/ctxfi/ctmixer.c -@@ -566,19 +566,6 @@ static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol, - return 0; - } - --static int ct_spdif_default_get(struct snd_kcontrol *kcontrol, -- struct snd_ctl_elem_value *ucontrol) --{ -- unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF; -- -- ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; -- ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; -- ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; -- ucontrol->value.iec958.status[3] = (status >> 24) & 0xff; -- -- return 0; --} -- - static int ct_spdif_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) - { -@@ -586,6 +573,10 @@ static int ct_spdif_get(struct snd_kcontrol *kcontrol, - unsigned int status; - - atc->spdif_out_get_status(atc, &status); -+ -+ if (status == 0) -+ status = SNDRV_PCM_DEFAULT_CON_SPDIF; -+ - ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; - ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; - ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; -@@ -629,7 +620,7 @@ static struct snd_kcontrol_new iec958_default_ctl = { - .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), - .count = 1, - .info = ct_spdif_info, -- .get = ct_spdif_default_get, -+ .get = ct_spdif_get, - .put = ct_spdif_put, - .private_value = MIXER_IEC958_DEFAULT - }; -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c -index 4261bb8..acd2099 100644 ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -394,6 +394,7 @@ struct alc_spec { - /* other flags */ - unsigned int no_analog :1; /* digital I/O only */ - unsigned int dual_adc_switch:1; /* switch ADCs (for ALC275) */ -+ unsigned int single_input_src:1; - int init_amp; - int codec_variant; /* flag for other variants */ - -@@ -3919,6 +3920,8 @@ static struct hda_amp_list alc880_lg_loopbacks[] = { - * Common callbacks - */ - -+static void alc_init_special_input_src(struct hda_codec *codec); -+ - static int alc_init(struct hda_codec *codec) - { - struct alc_spec *spec = codec->spec; -@@ -3929,6 +3932,7 @@ static int alc_init(struct hda_codec *codec) - - for (i = 0; i < spec->num_init_verbs; i++) - snd_hda_sequence_write(codec, spec->init_verbs[i]); -+ alc_init_special_input_src(codec); - - if (spec->init_hook) - spec->init_hook(codec); -@@ -5151,7 +5155,9 @@ static const char *alc_get_line_out_pfx(const struct auto_pin_cfg *cfg, - - switch (cfg->line_out_type) { - case AUTO_PIN_SPEAKER_OUT: -- return "Speaker"; -+ if (cfg->line_outs == 1) -+ return "Speaker"; -+ break; - case AUTO_PIN_HP_OUT: - return "Headphone"; - default: -@@ -5205,16 +5211,19 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec, - return err; - } else { - const char *name = pfx; -- if (!name) -+ int index = i; -+ if (!name) { - name = chname[i]; -+ index = 0; -+ } - err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, -- name, i, -+ name, index, - HDA_COMPOSE_AMP_VAL(nid, 3, 0, - HDA_OUTPUT)); - if (err < 0) - return err; - err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, -- name, i, -+ name, index, - HDA_COMPOSE_AMP_VAL(nid, 3, 2, - HDA_INPUT)); - if (err < 0) -@@ -5585,6 +5594,7 @@ static void fixup_single_adc(struct hda_codec *codec) - spec->capsrc_nids += i; - spec->adc_nids += i; - spec->num_adc_nids = 1; -+ spec->single_input_src = 1; - } - } - -@@ -5596,6 +5606,16 @@ static void fixup_dual_adc_switch(struct hda_codec *codec) - init_capsrc_for_pin(codec, spec->int_mic.pin); - } - -+/* initialize some special cases for input sources */ -+static void alc_init_special_input_src(struct hda_codec *codec) -+{ -+ struct alc_spec *spec = codec->spec; -+ if (spec->dual_adc_switch) -+ fixup_dual_adc_switch(codec); -+ else if (spec->single_input_src) -+ init_capsrc_for_pin(codec, spec->autocfg.inputs[0].pin); -+} -+ - static void set_capture_mixer(struct hda_codec *codec) - { - struct alc_spec *spec = codec->spec; -@@ -5611,7 +5631,7 @@ static void set_capture_mixer(struct hda_codec *codec) - int mux = 0; - int num_adcs = spec->num_adc_nids; - if (spec->dual_adc_switch) -- fixup_dual_adc_switch(codec); -+ num_adcs = 1; - else if (spec->auto_mic) - fixup_automic_adc(codec); - else if (spec->input_mux) { -@@ -5620,8 +5640,6 @@ static void set_capture_mixer(struct hda_codec *codec) - else if (spec->input_mux->num_items == 1) - fixup_single_adc(codec); - } -- if (spec->dual_adc_switch) -- num_adcs = 1; - spec->cap_mixer = caps[mux][num_adcs - 1]; - } - } -@@ -10748,6 +10766,7 @@ static struct alc_config_preset alc882_presets[] = { - */ - enum { - PINFIX_ABIT_AW9D_MAX, -+ PINFIX_LENOVO_Y530, - PINFIX_PB_M5210, - PINFIX_ACER_ASPIRE_7736, - }; -@@ -10762,6 +10781,14 @@ static const struct alc_fixup alc882_fixups[] = { - { } - } - }, -+ [PINFIX_LENOVO_Y530] = { -+ .type = ALC_FIXUP_PINS, -+ .v.pins = (const struct alc_pincfg[]) { -+ { 0x15, 0x99130112 }, /* rear int speakers */ -+ { 0x16, 0x99130111 }, /* subwoofer */ -+ { } -+ } -+ }, - [PINFIX_PB_M5210] = { - .type = ALC_FIXUP_VERBS, - .v.verbs = (const struct hda_verb[]) { -@@ -10777,6 +10804,7 @@ static const struct alc_fixup alc882_fixups[] = { - - static struct snd_pci_quirk alc882_fixup_tbl[] = { - SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", PINFIX_PB_M5210), -+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), - SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), - SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), - {} -@@ -10829,23 +10857,28 @@ static void alc882_auto_init_hp_out(struct hda_codec *codec) - hda_nid_t pin, dac; - int i; - -- for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { -- pin = spec->autocfg.hp_pins[i]; -- if (!pin) -- break; -- dac = spec->multiout.hp_nid; -- if (!dac) -- dac = spec->multiout.dac_nids[0]; /* to front */ -- alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); -+ if (spec->autocfg.line_out_type != AUTO_PIN_HP_OUT) { -+ for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { -+ pin = spec->autocfg.hp_pins[i]; -+ if (!pin) -+ break; -+ dac = spec->multiout.hp_nid; -+ if (!dac) -+ dac = spec->multiout.dac_nids[0]; /* to front */ -+ alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); -+ } - } -- for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { -- pin = spec->autocfg.speaker_pins[i]; -- if (!pin) -- break; -- dac = spec->multiout.extra_out_nid[0]; -- if (!dac) -- dac = spec->multiout.dac_nids[0]; /* to front */ -- alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); -+ -+ if (spec->autocfg.line_out_type != AUTO_PIN_SPEAKER_OUT) { -+ for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { -+ pin = spec->autocfg.speaker_pins[i]; -+ if (!pin) -+ break; -+ dac = spec->multiout.extra_out_nid[0]; -+ if (!dac) -+ dac = spec->multiout.dac_nids[0]; /* to front */ -+ alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); -+ } - } - } - -diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c -index bd7b123..052062d 100644 ---- a/sound/pci/hda/patch_sigmatel.c -+++ b/sound/pci/hda/patch_sigmatel.c -@@ -757,7 +757,7 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e - struct sigmatel_spec *spec = codec->spec; - unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); - const struct hda_input_mux *imux = spec->input_mux; -- unsigned int idx, prev_idx; -+ unsigned int idx, prev_idx, didx; - - idx = ucontrol->value.enumerated.item[0]; - if (idx >= imux->num_items) -@@ -769,7 +769,8 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e - snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, - AC_VERB_SET_CONNECT_SEL, - imux->items[idx].index); -- if (prev_idx >= spec->num_analog_muxes) { -+ if (prev_idx >= spec->num_analog_muxes && -+ spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { - imux = spec->dinput_mux; - /* 0 = analog */ - snd_hda_codec_write_cache(codec, -@@ -779,9 +780,13 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e - } - } else { - imux = spec->dinput_mux; -+ /* first dimux item is hardcoded to select analog imux, -+ * so lets skip it -+ */ -+ didx = idx - spec->num_analog_muxes + 1; - snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, - AC_VERB_SET_CONNECT_SEL, -- imux->items[idx - 1].index); -+ imux->items[didx].index); - } - spec->cur_mux[adc_idx] = idx; - return 1; -diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c -index 3ceaef6..838a0d5 100644 ---- a/sound/soc/pxa/z2.c -+++ b/sound/soc/pxa/z2.c -@@ -147,7 +147,7 @@ static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd) - snd_soc_dapm_disable_pin(dapm, "LINPUT3"); - snd_soc_dapm_disable_pin(dapm, "RINPUT3"); - snd_soc_dapm_disable_pin(dapm, "OUT3"); -- snd_soc_dapm_disable_pin(dapm, "MONO"); -+ snd_soc_dapm_disable_pin(dapm, "MONO1"); - - /* Add z2 specific widgets */ - snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets, -diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN -index 97d7656..26d4d3f 100755 ---- a/tools/perf/util/PERF-VERSION-GEN -+++ b/tools/perf/util/PERF-VERSION-GEN -@@ -23,10 +23,10 @@ if test -d ../../.git -o -f ../../.git && - then - VN=$(echo "$VN" | sed -e 's/-/./g'); - else -- eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '` -- eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '` -- eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '` -- eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '` -+ eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ') -+ eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') -+ eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') -+ eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ') - - VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}" - fi diff --git a/patches.kernel.org/patch-2.6.38.1-2 b/patches.kernel.org/patch-2.6.38.1-2 deleted file mode 100644 index 4f91a4c..0000000 --- a/patches.kernel.org/patch-2.6.38.1-2 +++ /dev/null @@ -1,2543 +0,0 @@ -From: Jiri Slaby -Subject: Linux 2.6.38.2 -Patch-mainline: Linux 2.6.38.2 - -Signed-off-by: Jiri Slaby ---- -diff --git a/Makefile b/Makefile -index 167ef45..6c15525 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - VERSION = 2 - PATCHLEVEL = 6 - SUBLEVEL = 38 --EXTRAVERSION = .1 -+EXTRAVERSION = .2 - NAME = Flesh-Eating Bats with Fangs - - # *DOCUMENTATION* -diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c -index f62bb4c..7c3fb07 100644 ---- a/arch/arm/mach-s3c2440/mach-mini2440.c -+++ b/arch/arm/mach-s3c2440/mach-mini2440.c -@@ -506,6 +506,11 @@ static struct i2c_board_info mini2440_i2c_devs[] __initdata = { - }, - }; - -+static struct platform_device uda1340_codec = { -+ .name = "uda134x-codec", -+ .id = -1, -+}; -+ - static struct platform_device *mini2440_devices[] __initdata = { - &s3c_device_ohci, - &s3c_device_wdt, -@@ -521,7 +526,9 @@ static struct platform_device *mini2440_devices[] __initdata = { - &s3c_device_nand, - &s3c_device_sdi, - &s3c_device_iis, -+ &uda1340_codec, - &mini2440_audio, -+ &samsung_asoc_dma, - }; - - static void __init mini2440_map_io(void) -diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c -index 90a15d2..2130ca6 100644 ---- a/arch/sh/kernel/ptrace_32.c -+++ b/arch/sh/kernel/ptrace_32.c -@@ -101,6 +101,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr) - - attr = bp->attr; - attr.bp_addr = addr; -+ /* reenable breakpoint */ -+ attr.disabled = false; - err = modify_user_hw_breakpoint(bp, &attr); - if (unlikely(err)) - return err; -@@ -392,6 +394,9 @@ long arch_ptrace(struct task_struct *child, long request, - tmp = 0; - } else { - unsigned long index; -+ ret = init_fpu(child); -+ if (ret) -+ break; - index = addr - offsetof(struct user, fpu); - tmp = ((unsigned long *)child->thread.xstate) - [index >> 2]; -@@ -423,6 +428,9 @@ long arch_ptrace(struct task_struct *child, long request, - else if (addr >= offsetof(struct user, fpu) && - addr < offsetof(struct user, u_fpvalid)) { - unsigned long index; -+ ret = init_fpu(child); -+ if (ret) -+ break; - index = addr - offsetof(struct user, fpu); - set_stopped_child_used_math(child); - ((unsigned long *)child->thread.xstate) -diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c -index 4436eac..c8f9764 100644 ---- a/arch/sh/kernel/ptrace_64.c -+++ b/arch/sh/kernel/ptrace_64.c -@@ -403,6 +403,9 @@ long arch_ptrace(struct task_struct *child, long request, - else if ((addr >= offsetof(struct user, fpu)) && - (addr < offsetof(struct user, u_fpvalid))) { - unsigned long index; -+ ret = init_fpu(child); -+ if (ret) -+ break; - index = addr - offsetof(struct user, fpu); - tmp = get_fpu_long(child, index); - } else if (addr == offsetof(struct user, u_fpvalid)) { -@@ -442,6 +445,9 @@ long arch_ptrace(struct task_struct *child, long request, - else if ((addr >= offsetof(struct user, fpu)) && - (addr < offsetof(struct user, u_fpvalid))) { - unsigned long index; -+ ret = init_fpu(child); -+ if (ret) -+ break; - index = addr - offsetof(struct user, fpu); - ret = put_fpu_long(child, index, data); - } -diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S -index c8b4efa..9ca3b0e 100644 ---- a/arch/x86/kernel/entry_32.S -+++ b/arch/x86/kernel/entry_32.S -@@ -1413,7 +1413,7 @@ ENTRY(async_page_fault) - CFI_ADJUST_CFA_OFFSET 4 - jmp error_code - CFI_ENDPROC --END(apf_page_fault) -+END(async_page_fault) - #endif - - /* -diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c -index 2d2673c..5655c22 100644 ---- a/arch/x86/kernel/head64.c -+++ b/arch/x86/kernel/head64.c -@@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data) - /* Make NULL pointers segfault */ - zap_identity_mappings(); - -- /* Cleanup the over mapped high alias */ -- cleanup_highmap(); -- - max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; - - for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { -diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c -index d3cfe26..e543fe9 100644 ---- a/arch/x86/kernel/setup.c -+++ b/arch/x86/kernel/setup.c -@@ -297,6 +297,9 @@ static void __init init_gbpages(void) - static inline void init_gbpages(void) - { - } -+static void __init cleanup_highmap(void) -+{ -+} - #endif - - static void __init reserve_brk(void) -@@ -922,6 +925,8 @@ void __init setup_arch(char **cmdline_p) - */ - reserve_brk(); - -+ cleanup_highmap(); -+ - memblock.current_limit = get_max_mapped(); - memblock_x86_fill(); - -diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index 947f42a..f13ff3a 100644 ---- a/arch/x86/mm/init.c -+++ b/arch/x86/mm/init.c -@@ -279,25 +279,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, - load_cr3(swapper_pg_dir); - #endif - --#ifdef CONFIG_X86_64 -- if (!after_bootmem && !start) { -- pud_t *pud; -- pmd_t *pmd; -- -- mmu_cr4_features = read_cr4(); -- -- /* -- * _brk_end cannot change anymore, but it and _end may be -- * located on different 2M pages. cleanup_highmap(), however, -- * can only consider _end when it runs, so destroy any -- * mappings beyond _brk_end here. -- */ -- pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); -- pmd = pmd_offset(pud, _brk_end - 1); -- while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) -- pmd_clear(pmd); -- } --#endif - __flush_tlb_all(); - - if (!after_bootmem && e820_table_end > e820_table_start) -diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index c14a542..68f9921 100644 ---- a/arch/x86/mm/init_64.c -+++ b/arch/x86/mm/init_64.c -@@ -51,6 +51,7 @@ - #include - #include - #include -+#include - - static int __init parse_direct_gbpages_off(char *arg) - { -@@ -293,18 +294,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) - * to the compile time generated pmds. This results in invalid pmds up - * to the point where we hit the physaddr 0 mapping. - * -- * We limit the mappings to the region from _text to _end. _end is -- * rounded up to the 2MB boundary. This catches the invalid pmds as -+ * We limit the mappings to the region from _text to _brk_end. _brk_end -+ * is rounded up to the 2MB boundary. This catches the invalid pmds as - * well, as they are located before _text: - */ - void __init cleanup_highmap(void) - { - unsigned long vaddr = __START_KERNEL_map; -- unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; -+ unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); -+ unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; - pmd_t *pmd = level2_kernel_pgt; -- pmd_t *last_pmd = pmd + PTRS_PER_PMD; - -- for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { -+ for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { - if (pmd_none(*pmd)) - continue; - if (vaddr < (unsigned long) _text || vaddr > end) -diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c -index f608942..6020562 100644 ---- a/arch/x86/xen/mmu.c -+++ b/arch/x86/xen/mmu.c -@@ -1651,9 +1651,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) - for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { - pte_t pte; - -- if (pfn > max_pfn_mapped) -- max_pfn_mapped = pfn; -- - if (!pte_none(pte_page[pteidx])) - continue; - -@@ -1711,6 +1708,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, - pud_t *l3; - pmd_t *l2; - -+ /* max_pfn_mapped is the last pfn mapped in the initial memory -+ * mappings. Considering that on Xen after the kernel mappings we -+ * have the mappings of some pages that don't exist in pfn space, we -+ * set max_pfn_mapped to the last real pfn mapped. */ -+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); -+ - /* Zap identity mapping */ - init_level4_pgt[0] = __pgd(0); - -@@ -1815,9 +1818,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, - initial_kernel_pmd = - extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - -- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + -- xen_start_info->nr_pt_frames * PAGE_SIZE + -- 512*1024); -+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); - - kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); - memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); -diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c -index 69ad529..ea5ac2d 100644 ---- a/drivers/firmware/dcdbas.c -+++ b/drivers/firmware/dcdbas.c -@@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) - } - - /* generate SMI */ -+ /* inb to force posted write through and make SMI happen now */ - asm volatile ( -- "outb %b0,%w1" -+ "outb %b0,%w1\n" -+ "inb %w1" - : /* no output args */ - : "a" (smi_cmd->command_code), - "d" (smi_cmd->command_address), -diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c -index 654faa8..6a5371b 100644 ---- a/drivers/gpu/drm/drm_crtc.c -+++ b/drivers/gpu/drm/drm_crtc.c -@@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, - uint32_t __user *encoder_id; - struct drm_mode_group *mode_group; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - - /* -@@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev, - struct drm_mode_object *obj; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - - obj = drm_mode_object_find(dev, crtc_resp->crtc_id, -@@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, - uint64_t __user *prop_values; - uint32_t __user *encoder_ptr; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); - - DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); -@@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, - struct drm_encoder *encoder; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, enc_resp->encoder_id, - DRM_MODE_OBJECT_ENCODER); -@@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, - int ret = 0; - int i; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, crtc_req->crtc_id, - DRM_MODE_OBJECT_CRTC); -@@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, - struct drm_crtc *crtc; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - if (!req->flags) { - DRM_ERROR("no operation set\n"); - return -EINVAL; -@@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev, - struct drm_framebuffer *fb; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - if ((config->min_width > r->width) || (r->width > config->max_width)) { - DRM_ERROR("mode new framebuffer width not within limits\n"); - return -EINVAL; -@@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev, - int ret = 0; - int found = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); - /* TODO check that we realy get a framebuffer back. */ -@@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev, - struct drm_framebuffer *fb; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); - if (!obj) { -@@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, - int num_clips; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); - if (!obj) { -@@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, - struct drm_mode_modeinfo *umode = &mode_cmd->mode; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - - obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); -@@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, - struct drm_mode_modeinfo *umode = &mode_cmd->mode; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - - obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); -@@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, - uint64_t __user *values_ptr; - uint32_t __user *blob_length_ptr; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); - if (!obj) { -@@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, - int ret = 0; - void *blob_ptr; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); - if (!obj) { -@@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, - int ret = -EINVAL; - int i; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - - obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); -@@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, - int size; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { -@@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, - int size; - int ret = 0; - -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ return -EINVAL; -+ - mutex_lock(&dev->mode_config.mutex); - obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { -diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c -index ea1c4b0..c3c78ee 100644 ---- a/drivers/gpu/drm/drm_gem.c -+++ b/drivers/gpu/drm/drm_gem.c -@@ -498,11 +498,12 @@ EXPORT_SYMBOL(drm_gem_vm_open); - void drm_gem_vm_close(struct vm_area_struct *vma) - { - struct drm_gem_object *obj = vma->vm_private_data; -+ struct drm_device *dev = obj->dev; - -- mutex_lock(&obj->dev->struct_mutex); -+ mutex_lock(&dev->struct_mutex); - drm_vm_close_locked(vma); - drm_gem_object_unreference(obj); -- mutex_unlock(&obj->dev->struct_mutex); -+ mutex_unlock(&dev->struct_mutex); - } - EXPORT_SYMBOL(drm_gem_vm_close); - -diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c -index 36e66cc..729c95a 100644 ---- a/drivers/gpu/drm/i915/i915_gem.c -+++ b/drivers/gpu/drm/i915/i915_gem.c -@@ -1749,8 +1749,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) - return; - - spin_lock(&file_priv->mm.lock); -- list_del(&request->client_list); -- request->file_priv = NULL; -+ if (request->file_priv) { -+ list_del(&request->client_list); -+ request->file_priv = NULL; -+ } - spin_unlock(&file_priv->mm.lock); - } - -diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c -index 50ab161..ded73a6 100644 ---- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c -+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c -@@ -388,6 +388,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, - uint32_t __iomem *reloc_entry; - void __iomem *reloc_page; - -+ /* We can't wait for rendering with pagefaults disabled */ -+ if (obj->active && in_atomic()) -+ return -EFAULT; -+ - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret) - return ret; -@@ -461,15 +465,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, - struct list_head *objects) - { - struct drm_i915_gem_object *obj; -- int ret; -- -+ int ret = 0; -+ -+ /* This is the fast path and we cannot handle a pagefault whilst -+ * holding the struct mutex lest the user pass in the relocations -+ * contained within a mmaped bo. For in such a case we, the page -+ * fault handler would call i915_gem_fault() and we would try to -+ * acquire the struct mutex again. Obviously this is bad and so -+ * lockdep complains vehemently. -+ */ -+ pagefault_disable(); - list_for_each_entry(obj, objects, exec_list) { - ret = i915_gem_execbuffer_relocate_object(obj, eb); - if (ret) -- return ret; -+ break; - } -+ pagefault_enable(); - -- return 0; -+ return ret; - } - - static int -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c -index 4a5a73b..e967cc8 100644 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c -@@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode - /* adjust pixel clock as needed */ - adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); - -- if (ASIC_IS_AVIVO(rdev)) -+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) -+ /* TV seems to prefer the legacy algo on some boards */ -+ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, -+ &ref_div, &post_div); -+ else if (ASIC_IS_AVIVO(rdev)) - radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, - &ref_div, &post_div); - else -diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c -index cf7c8d5..cf602e2 100644 ---- a/drivers/gpu/drm/radeon/radeon_combios.c -+++ b/drivers/gpu/drm/radeon/radeon_combios.c -@@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, - - bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) - { -- int edid_info; -+ int edid_info, size; - struct edid *edid; - unsigned char *raw; - edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); -@@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) - return false; - - raw = rdev->bios + edid_info; -- edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); -+ size = EDID_LENGTH * (raw[0x7e] + 1); -+ edid = kmalloc(size, GFP_KERNEL); - if (edid == NULL) - return false; - -- memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); -+ memcpy((unsigned char *)edid, raw, size); - - if (!drm_edid_is_valid(edid)) { - kfree(edid); -@@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) - } - - rdev->mode_info.bios_hardcoded_edid = edid; -+ rdev->mode_info.bios_hardcoded_edid_size = size; - return true; - } - -@@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) - struct edid * - radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) - { -- if (rdev->mode_info.bios_hardcoded_edid) -- return rdev->mode_info.bios_hardcoded_edid; -+ struct edid *edid; -+ -+ if (rdev->mode_info.bios_hardcoded_edid) { -+ edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); -+ if (edid) { -+ memcpy((unsigned char *)edid, -+ (unsigned char *)rdev->mode_info.bios_hardcoded_edid, -+ rdev->mode_info.bios_hardcoded_edid_size); -+ return edid; -+ } -+ } - return NULL; - } - -diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c -index 22b7e3d..d83338b 100644 ---- a/drivers/gpu/drm/radeon/radeon_connectors.c -+++ b/drivers/gpu/drm/radeon/radeon_connectors.c -@@ -629,6 +629,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, - static enum drm_connector_status - radeon_vga_detect(struct drm_connector *connector, bool force) - { -+ struct drm_device *dev = connector->dev; -+ struct radeon_device *rdev = dev->dev_private; - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_encoder *encoder; - struct drm_encoder_helper_funcs *encoder_funcs; -@@ -679,6 +681,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force) - - if (ret == connector_status_connected) - ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); -+ -+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the -+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor -+ * by other means, assume the CRT is connected and use that EDID. -+ */ -+ if ((!rdev->is_atom_bios) && -+ (ret == connector_status_disconnected) && -+ rdev->mode_info.bios_hardcoded_edid_size) { -+ ret = connector_status_connected; -+ } -+ - radeon_connector_update_scratch_regs(connector, ret); - return ret; - } -@@ -790,6 +803,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) - static enum drm_connector_status - radeon_dvi_detect(struct drm_connector *connector, bool force) - { -+ struct drm_device *dev = connector->dev; -+ struct radeon_device *rdev = dev->dev_private; - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_encoder *encoder = NULL; - struct drm_encoder_helper_funcs *encoder_funcs; -@@ -829,8 +844,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) - * you don't really know what's connected to which port as both are digital. - */ - if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { -- struct drm_device *dev = connector->dev; -- struct radeon_device *rdev = dev->dev_private; - struct drm_connector *list_connector; - struct radeon_connector *list_radeon_connector; - list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { -@@ -895,6 +908,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) - ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); - } - -+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the -+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor -+ * by other means, assume the DFP is connected and use that EDID. In most -+ * cases the DVI port is actually a virtual KVM port connected to the service -+ * processor. -+ */ -+ if ((!rdev->is_atom_bios) && -+ (ret == connector_status_disconnected) && -+ rdev->mode_info.bios_hardcoded_edid_size) { -+ radeon_connector->use_digital = true; -+ ret = connector_status_connected; -+ } -+ - out: - /* updated in get modes as well since we need to know if it's analog or digital */ - radeon_connector_update_scratch_regs(connector, ret); -diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h -index a670caa..8c134db 100644 ---- a/drivers/gpu/drm/radeon/radeon_mode.h -+++ b/drivers/gpu/drm/radeon/radeon_mode.h -@@ -239,6 +239,7 @@ struct radeon_mode_info { - struct drm_property *underscan_vborder_property; - /* hardcoded DFP edid from BIOS */ - struct edid *bios_hardcoded_edid; -+ int bios_hardcoded_edid_size; - - /* pointer to fbdev info structure */ - struct radeon_fbdev *rfbdev; -diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c -index 7f85a86..53e6273 100644 ---- a/drivers/input/xen-kbdfront.c -+++ b/drivers/input/xen-kbdfront.c -@@ -110,7 +110,7 @@ static irqreturn_t input_handler(int rq, void *dev_id) - static int __devinit xenkbd_probe(struct xenbus_device *dev, - const struct xenbus_device_id *id) - { -- int ret, i; -+ int ret, i, abs; - struct xenkbd_info *info; - struct input_dev *kbd, *ptr; - -@@ -128,6 +128,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, - if (!info->page) - goto error_nomem; - -+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) -+ abs = 0; -+ if (abs) -+ xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1"); -+ - /* keyboard */ - kbd = input_allocate_device(); - if (!kbd) -@@ -137,11 +142,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, - kbd->id.bustype = BUS_PCI; - kbd->id.vendor = 0x5853; - kbd->id.product = 0xffff; -- kbd->evbit[0] = BIT(EV_KEY); -+ -+ __set_bit(EV_KEY, kbd->evbit); - for (i = KEY_ESC; i < KEY_UNKNOWN; i++) -- set_bit(i, kbd->keybit); -+ __set_bit(i, kbd->keybit); - for (i = KEY_OK; i < KEY_MAX; i++) -- set_bit(i, kbd->keybit); -+ __set_bit(i, kbd->keybit); - - ret = input_register_device(kbd); - if (ret) { -@@ -160,12 +166,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, - ptr->id.bustype = BUS_PCI; - ptr->id.vendor = 0x5853; - ptr->id.product = 0xfffe; -- ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); -+ -+ if (abs) { -+ __set_bit(EV_ABS, ptr->evbit); -+ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); -+ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); -+ } else { -+ input_set_capability(ptr, EV_REL, REL_X); -+ input_set_capability(ptr, EV_REL, REL_Y); -+ } -+ input_set_capability(ptr, EV_REL, REL_WHEEL); -+ -+ __set_bit(EV_KEY, ptr->evbit); - for (i = BTN_LEFT; i <= BTN_TASK; i++) -- set_bit(i, ptr->keybit); -- ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL); -- input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); -- input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); -+ __set_bit(i, ptr->keybit); - - ret = input_register_device(ptr); - if (ret) { -@@ -272,7 +286,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, - enum xenbus_state backend_state) - { - struct xenkbd_info *info = dev_get_drvdata(&dev->dev); -- int ret, val; -+ int val; - - switch (backend_state) { - case XenbusStateInitialising: -@@ -285,16 +299,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, - - case XenbusStateInitWait: - InitWait: -- ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, -- "feature-abs-pointer", "%d", &val); -- if (ret < 0) -- val = 0; -- if (val) { -- ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, -- "request-abs-pointer", "1"); -- if (ret) -- pr_warning("can't request abs-pointer\n"); -- } - xenbus_switch_state(dev, XenbusStateConnected); - break; - -diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c -index a1e9dfb..6459b8c 100644 ---- a/drivers/media/video/uvc/uvc_driver.c -+++ b/drivers/media/video/uvc/uvc_driver.c -@@ -1264,6 +1264,14 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain, - - break; - -+ case UVC_OTT_VENDOR_SPECIFIC: -+ case UVC_OTT_DISPLAY: -+ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: -+ if (uvc_trace_param & UVC_TRACE_PROBE) -+ printk(" OT %d", entity->id); -+ -+ break; -+ - case UVC_TT_STREAMING: - if (UVC_ENTITY_IS_ITERM(entity)) { - if (uvc_trace_param & UVC_TRACE_PROBE) -diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c -index 5673d67..545c029 100644 ---- a/drivers/media/video/uvc/uvc_video.c -+++ b/drivers/media/video/uvc/uvc_video.c -@@ -89,15 +89,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, - static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, - struct uvc_streaming_control *ctrl) - { -- struct uvc_format *format; -+ struct uvc_format *format = NULL; - struct uvc_frame *frame = NULL; - unsigned int i; - -- if (ctrl->bFormatIndex <= 0 || -- ctrl->bFormatIndex > stream->nformats) -- return; -+ for (i = 0; i < stream->nformats; ++i) { -+ if (stream->format[i].index == ctrl->bFormatIndex) { -+ format = &stream->format[i]; -+ break; -+ } -+ } - -- format = &stream->format[ctrl->bFormatIndex - 1]; -+ if (format == NULL) -+ return; - - for (i = 0; i < format->nframes; ++i) { - if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) { -diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c -index cb23aa2..e610cfe 100644 ---- a/drivers/pci/hotplug/acpiphp_glue.c -+++ b/drivers/pci/hotplug/acpiphp_glue.c -@@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) - - pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); - if (pdev) { -+ pdev->current_state = PCI_D0; - slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); - pci_dev_put(pdev); - } -diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c -index 4ab49d4..30bb8d0 100644 ---- a/drivers/usb/class/cdc-acm.c -+++ b/drivers/usb/class/cdc-acm.c -@@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb) - if (!ACM_READY(acm)) - goto exit; - -+ usb_mark_last_busy(acm->dev); -+ - data = (unsigned char *)(dr + 1); - switch (dr->bNotificationType) { - case USB_CDC_NOTIFY_NETWORK_CONNECTION: -@@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb) - break; - } - exit: -- usb_mark_last_busy(acm->dev); - retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval) - dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " -@@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work) - if (!ACM_READY(acm)) - return; - tty = tty_port_tty_get(&acm->port); -+ if (!tty) -+ return; - tty_wakeup(tty); - tty_kref_put(tty); - } -@@ -646,8 +649,10 @@ static void acm_port_down(struct acm *acm) - usb_kill_urb(acm->ctrlurb); - for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); -+ tasklet_disable(&acm->urb_task); - for (i = 0; i < nr; i++) - usb_kill_urb(acm->ru[i].urb); -+ tasklet_enable(&acm->urb_task); - acm->control->needs_remote_wakeup = 0; - usb_autopm_put_interface(acm->control); - } -diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c -index 47085e5..a97c018 100644 ---- a/drivers/usb/class/cdc-wdm.c -+++ b/drivers/usb/class/cdc-wdm.c -@@ -281,7 +281,7 @@ static void cleanup(struct wdm_device *desc) - desc->sbuf, - desc->validity->transfer_dma); - usb_free_coherent(interface_to_usbdev(desc->intf), -- desc->wMaxCommand, -+ desc->bMaxPacketSize0, - desc->inbuf, - desc->response->transfer_dma); - kfree(desc->orq); -diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c -index a7131ad..37518df 100644 ---- a/drivers/usb/core/devio.c -+++ b/drivers/usb/core/devio.c -@@ -802,7 +802,7 @@ static int proc_control(struct dev_state *ps, void __user *arg) - tbuf, ctrl.wLength, tmo); - usb_lock_device(dev); - snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, -- tbuf, i); -+ tbuf, max(i, 0)); - if ((i > 0) && ctrl.wLength) { - if (copy_to_user(ctrl.data, tbuf, i)) { - free_page((unsigned long)tbuf); -diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c -index 233c288..5add8b5 100644 ---- a/drivers/usb/host/ehci-q.c -+++ b/drivers/usb/host/ehci-q.c -@@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) - int stopped; - unsigned count = 0; - u8 state; -- const __le32 halt = HALT_BIT(ehci); - struct ehci_qh_hw *hw = qh->hw; - - if (unlikely (list_empty (&qh->qtd_list))) -@@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) - && !(qtd->hw_alt_next - & EHCI_LIST_END(ehci))) { - stopped = 1; -- goto halt; - } - - /* stop scanning when we reach qtds the hc is using */ -@@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) - */ - ehci_clear_tt_buffer(ehci, qh, urb, token); - } -- -- /* force halt for unlinked or blocked qh, so we'll -- * patch the qh later and so that completions can't -- * activate it while we "know" it's stopped. -- */ -- if ((halt & hw->hw_token) == 0) { --halt: -- hw->hw_token |= halt; -- wmb (); -- } - } - - /* unless we already know the urb's status, collect qtd status -diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c -index f7a2057..8b1d94a 100644 ---- a/drivers/usb/misc/uss720.c -+++ b/drivers/usb/misc/uss720.c -@@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p - spin_lock_irqsave(&priv->asynclock, flags); - list_add_tail(&rq->asynclist, &priv->asynclist); - spin_unlock_irqrestore(&priv->asynclock, flags); -+ kref_get(&rq->ref_count); - ret = usb_submit_urb(rq->urb, mem_flags); -- if (!ret) { -- kref_get(&rq->ref_count); -+ if (!ret) - return rq; -- } -- kref_put(&rq->ref_count, destroy_async); -+ destroy_async(&rq->ref_count); - err("submit_async_request submit_urb failed with %d", ret); - return NULL; - } -diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c -index 9d49d1c..52312e8 100644 ---- a/drivers/usb/musb/blackfin.c -+++ b/drivers/usb/musb/blackfin.c -@@ -322,7 +322,7 @@ static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout) - mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); - } - --static int bfin_musb_get_vbus_status(struct musb *musb) -+static int bfin_musb_vbus_status(struct musb *musb) - { - return 0; - } -@@ -540,7 +540,7 @@ static struct dev_pm_ops bfin_pm_ops = { - .resume = bfin_resume, - }; - --#define DEV_PM_OPS &bfin_pm_op, -+#define DEV_PM_OPS &bfin_pm_ops - #else - #define DEV_PM_OPS NULL - #endif -@@ -548,7 +548,7 @@ static struct dev_pm_ops bfin_pm_ops = { - static struct platform_driver bfin_driver = { - .remove = __exit_p(bfin_remove), - .driver = { -- .name = "musb-bfin", -+ .name = "musb-blackfin", - .pm = DEV_PM_OPS, - }, - }; -diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c -index 0056a41..15e8e1a 100644 ---- a/drivers/video/console/tileblit.c -+++ b/drivers/video/console/tileblit.c -@@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) - { - struct fb_tilecursor cursor; -- int use_sw = (vc->vc_cursor_type & 0x01); -+ int use_sw = (vc->vc_cursor_type & 0x10); - - cursor.sx = vc->vc_x; - cursor.sy = vc->vc_y; -diff --git a/fs/aio.c b/fs/aio.c -index 26869cd..88f0ed5 100644 ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -520,7 +520,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) - ctx->reqs_active--; - - if (unlikely(!ctx->reqs_active && ctx->dead)) -- wake_up(&ctx->wait); -+ wake_up_all(&ctx->wait); - } - - static void aio_fput_routine(struct work_struct *data) -@@ -1229,7 +1229,7 @@ static void io_destroy(struct kioctx *ioctx) - * by other CPUs at this point. Right now, we rely on the - * locking done by the above calls to ensure this consistency. - */ -- wake_up(&ioctx->wait); -+ wake_up_all(&ioctx->wait); - put_ioctx(ioctx); /* once for the lookup */ - } - -diff --git a/fs/dcache.c b/fs/dcache.c -index a39fe47..1baddc1 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -1612,10 +1612,13 @@ struct dentry *d_obtain_alias(struct inode *inode) - __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first); - spin_unlock(&tmp->d_lock); - spin_unlock(&inode->i_lock); -+ security_d_instantiate(tmp, inode); - - return tmp; - - out_iput: -+ if (res && !IS_ERR(res)) -+ security_d_instantiate(res, inode); - iput(inode); - return res; - } -diff --git a/fs/ext3/super.c b/fs/ext3/super.c -index 85c8cc8..0d62f29 100644 ---- a/fs/ext3/super.c -+++ b/fs/ext3/super.c -@@ -1464,6 +1464,13 @@ static void ext3_orphan_cleanup (struct super_block * sb, - return; - } - -+ /* Check if feature set allows readwrite operations */ -+ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { -+ ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " -+ "unknown ROCOMPAT features"); -+ return; -+ } -+ - if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { - if (es->s_last_orphan) - jbd_debug(1, "Errors on filesystem, " -diff --git a/fs/ext4/super.c b/fs/ext4/super.c -index f6a318f..4381efe 100644 ---- a/fs/ext4/super.c -+++ b/fs/ext4/super.c -@@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb); - static int ext4_freeze(struct super_block *sb); - static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data); -+static int ext4_feature_set_ok(struct super_block *sb, int readonly); - static void ext4_destroy_lazyinit_thread(void); - static void ext4_unregister_li_request(struct super_block *sb); - static void ext4_clear_request_list(void); -@@ -2120,6 +2121,13 @@ static void ext4_orphan_cleanup(struct super_block *sb, - return; - } - -+ /* Check if feature set would not allow a r/w mount */ -+ if (!ext4_feature_set_ok(sb, 0)) { -+ ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " -+ "unknown ROCOMPAT features"); -+ return; -+ } -+ - if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { - if (es->s_last_orphan) - jbd_debug(1, "Errors on filesystem, " -diff --git a/fs/namespace.c b/fs/namespace.c -index d1edf26..445534b 100644 ---- a/fs/namespace.c -+++ b/fs/namespace.c -@@ -2469,9 +2469,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, - error = user_path_dir(new_root, &new); - if (error) - goto out0; -- error = -EINVAL; -- if (!check_mnt(new.mnt)) -- goto out1; - - error = user_path_dir(put_old, &old); - if (error) -@@ -2491,7 +2488,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, - IS_MNT_SHARED(new.mnt->mnt_parent) || - IS_MNT_SHARED(root.mnt->mnt_parent)) - goto out2; -- if (!check_mnt(root.mnt)) -+ if (!check_mnt(root.mnt) || !check_mnt(new.mnt)) - goto out2; - error = -ENOENT; - if (cant_mount(old.dentry)) -@@ -2515,19 +2512,19 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, - goto out2; /* not attached */ - /* make sure we can reach put_old from new_root */ - tmp = old.mnt; -- br_write_lock(vfsmount_lock); - if (tmp != new.mnt) { - for (;;) { - if (tmp->mnt_parent == tmp) -- goto out3; /* already mounted on put_old */ -+ goto out2; /* already mounted on put_old */ - if (tmp->mnt_parent == new.mnt) - break; - tmp = tmp->mnt_parent; - } - if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) -- goto out3; -+ goto out2; - } else if (!is_subdir(old.dentry, new.dentry)) -- goto out3; -+ goto out2; -+ br_write_lock(vfsmount_lock); - detach_mnt(new.mnt, &parent_path); - detach_mnt(root.mnt, &root_parent); - /* mount old root on put_old */ -@@ -2550,9 +2547,6 @@ out1: - path_put(&new); - out0: - return error; --out3: -- br_write_unlock(vfsmount_lock); -- goto out2; - } - - static void __init init_mount_tree(void) -diff --git a/fs/nfs/write.c b/fs/nfs/write.c -index 42b92d7..b5fcbf7 100644 ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -1214,13 +1214,17 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) - #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) - { -+ int ret; -+ - if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags)) - return 1; -- if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags, -- NFS_INO_COMMIT, nfs_wait_bit_killable, -- TASK_KILLABLE)) -- return 1; -- return 0; -+ if (!may_wait) -+ return 0; -+ ret = out_of_line_wait_on_bit_lock(&nfsi->flags, -+ NFS_INO_COMMIT, -+ nfs_wait_bit_killable, -+ TASK_KILLABLE); -+ return (ret < 0) ? ret : 1; - } - - static void nfs_commit_clear_lock(struct nfs_inode *nfsi) -@@ -1396,9 +1400,10 @@ int nfs_commit_inode(struct inode *inode, int how) - { - LIST_HEAD(head); - int may_wait = how & FLUSH_SYNC; -- int res = 0; -+ int res; - -- if (!nfs_commit_set_lock(NFS_I(inode), may_wait)) -+ res = nfs_commit_set_lock(NFS_I(inode), may_wait); -+ if (res <= 0) - goto out_mark_dirty; - spin_lock(&inode->i_lock); - res = nfs_scan_commit(inode, &head, 0, 0); -@@ -1407,12 +1412,14 @@ int nfs_commit_inode(struct inode *inode, int how) - int error = nfs_commit_list(inode, &head, how); - if (error < 0) - return error; -- if (may_wait) -- wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, -- nfs_wait_bit_killable, -- TASK_KILLABLE); -- else -+ if (!may_wait) - goto out_mark_dirty; -+ error = wait_on_bit(&NFS_I(inode)->flags, -+ NFS_INO_COMMIT, -+ nfs_wait_bit_killable, -+ TASK_KILLABLE); -+ if (error < 0) -+ return error; - } else - nfs_commit_clear_lock(NFS_I(inode)); - return res; -diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c -index db52546..5fcb139 100644 ---- a/fs/nfsd/nfs4proc.c -+++ b/fs/nfsd/nfs4proc.c -@@ -984,8 +984,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *, - void *); - enum nfsd4_op_flags { - ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */ -- ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */ -- ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */ -+ ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */ -+ ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */ - }; - - struct nfsd4_operation { -diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c -index 7b566ec..f0e448a 100644 ---- a/fs/nfsd/nfs4state.c -+++ b/fs/nfsd/nfs4state.c -@@ -316,64 +316,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; - static struct list_head client_lru; - static struct list_head close_lru; - --static void unhash_generic_stateid(struct nfs4_stateid *stp) --{ -- list_del(&stp->st_hash); -- list_del(&stp->st_perfile); -- list_del(&stp->st_perstateowner); --} -- --static void free_generic_stateid(struct nfs4_stateid *stp) --{ -- put_nfs4_file(stp->st_file); -- kmem_cache_free(stateid_slab, stp); --} -- --static void release_lock_stateid(struct nfs4_stateid *stp) --{ -- struct file *file; -- -- unhash_generic_stateid(stp); -- file = find_any_file(stp->st_file); -- if (file) -- locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); -- free_generic_stateid(stp); --} -- --static void unhash_lockowner(struct nfs4_stateowner *sop) --{ -- struct nfs4_stateid *stp; -- -- list_del(&sop->so_idhash); -- list_del(&sop->so_strhash); -- list_del(&sop->so_perstateid); -- while (!list_empty(&sop->so_stateids)) { -- stp = list_first_entry(&sop->so_stateids, -- struct nfs4_stateid, st_perstateowner); -- release_lock_stateid(stp); -- } --} -- --static void release_lockowner(struct nfs4_stateowner *sop) --{ -- unhash_lockowner(sop); -- nfs4_put_stateowner(sop); --} -- --static void --release_stateid_lockowners(struct nfs4_stateid *open_stp) --{ -- struct nfs4_stateowner *lock_sop; -- -- while (!list_empty(&open_stp->st_lockowners)) { -- lock_sop = list_entry(open_stp->st_lockowners.next, -- struct nfs4_stateowner, so_perstateid); -- /* list_del(&open_stp->st_lockowners); */ -- BUG_ON(lock_sop->so_is_open_owner); -- release_lockowner(lock_sop); -- } --} -- - /* - * We store the NONE, READ, WRITE, and BOTH bits separately in the - * st_{access,deny}_bmap field of the stateid, in order to track not -@@ -446,13 +388,71 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp) - return nfs4_access_to_omode(access); - } - --static void release_open_stateid(struct nfs4_stateid *stp) -+static void unhash_generic_stateid(struct nfs4_stateid *stp) -+{ -+ list_del(&stp->st_hash); -+ list_del(&stp->st_perfile); -+ list_del(&stp->st_perstateowner); -+} -+ -+static void free_generic_stateid(struct nfs4_stateid *stp) - { - int oflag = nfs4_access_bmap_to_omode(stp); - -+ nfs4_file_put_access(stp->st_file, oflag); -+ put_nfs4_file(stp->st_file); -+ kmem_cache_free(stateid_slab, stp); -+} -+ -+static void release_lock_stateid(struct nfs4_stateid *stp) -+{ -+ struct file *file; -+ -+ unhash_generic_stateid(stp); -+ file = find_any_file(stp->st_file); -+ if (file) -+ locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); -+ free_generic_stateid(stp); -+} -+ -+static void unhash_lockowner(struct nfs4_stateowner *sop) -+{ -+ struct nfs4_stateid *stp; -+ -+ list_del(&sop->so_idhash); -+ list_del(&sop->so_strhash); -+ list_del(&sop->so_perstateid); -+ while (!list_empty(&sop->so_stateids)) { -+ stp = list_first_entry(&sop->so_stateids, -+ struct nfs4_stateid, st_perstateowner); -+ release_lock_stateid(stp); -+ } -+} -+ -+static void release_lockowner(struct nfs4_stateowner *sop) -+{ -+ unhash_lockowner(sop); -+ nfs4_put_stateowner(sop); -+} -+ -+static void -+release_stateid_lockowners(struct nfs4_stateid *open_stp) -+{ -+ struct nfs4_stateowner *lock_sop; -+ -+ while (!list_empty(&open_stp->st_lockowners)) { -+ lock_sop = list_entry(open_stp->st_lockowners.next, -+ struct nfs4_stateowner, so_perstateid); -+ /* list_del(&open_stp->st_lockowners); */ -+ BUG_ON(lock_sop->so_is_open_owner); -+ release_lockowner(lock_sop); -+ } -+} -+ -+static void release_open_stateid(struct nfs4_stateid *stp) -+{ - unhash_generic_stateid(stp); - release_stateid_lockowners(stp); -- nfs4_file_put_access(stp->st_file, oflag); - free_generic_stateid(stp); - } - -@@ -3735,6 +3735,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc - stp->st_stateid.si_stateownerid = sop->so_id; - stp->st_stateid.si_fileid = fp->fi_id; - stp->st_stateid.si_generation = 0; -+ stp->st_access_bmap = 0; - stp->st_deny_bmap = open_stp->st_deny_bmap; - stp->st_openstp = open_stp; - -@@ -3749,6 +3750,17 @@ check_lock_length(u64 offset, u64 length) - LOFF_OVERFLOW(offset, length))); - } - -+static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access) -+{ -+ struct nfs4_file *fp = lock_stp->st_file; -+ int oflag = nfs4_access_to_omode(access); -+ -+ if (test_bit(access, &lock_stp->st_access_bmap)) -+ return; -+ nfs4_file_get_access(fp, oflag); -+ __set_bit(access, &lock_stp->st_access_bmap); -+} -+ - /* - * LOCK operation - */ -@@ -3765,7 +3777,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, - struct file_lock conflock; - __be32 status = 0; - unsigned int strhashval; -- unsigned int cmd; - int err; - - dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", -@@ -3847,22 +3858,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, - switch (lock->lk_type) { - case NFS4_READ_LT: - case NFS4_READW_LT: -- if (find_readable_file(lock_stp->st_file)) { -- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ); -- filp = find_readable_file(lock_stp->st_file); -- } -+ filp = find_readable_file(lock_stp->st_file); -+ if (filp) -+ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); - file_lock.fl_type = F_RDLCK; -- cmd = F_SETLK; -- break; -+ break; - case NFS4_WRITE_LT: - case NFS4_WRITEW_LT: -- if (find_writeable_file(lock_stp->st_file)) { -- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE); -- filp = find_writeable_file(lock_stp->st_file); -- } -+ filp = find_writeable_file(lock_stp->st_file); -+ if (filp) -+ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); - file_lock.fl_type = F_WRLCK; -- cmd = F_SETLK; -- break; -+ break; - default: - status = nfserr_inval; - goto out; -@@ -3886,7 +3893,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, - * Note: locks.c uses the BKL to protect the inode's lock list. - */ - -- err = vfs_lock_file(filp, cmd, &file_lock, &conflock); -+ err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); - switch (-err) { - case 0: /* success! */ - update_stateid(&lock_stp->st_stateid); -diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c -index 615f0a9..c6766af 100644 ---- a/fs/nfsd/nfs4xdr.c -+++ b/fs/nfsd/nfs4xdr.c -@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, - - u32 dummy; - char *machine_name; -- int i, j; -+ int i; - int nr_secflavs; - - READ_BUF(16); -@@ -1215,8 +1215,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, - READ_BUF(4); - READ32(dummy); - READ_BUF(dummy * 4); -- for (j = 0; j < dummy; ++j) -- READ32(dummy); - break; - case RPC_AUTH_GSS: - dprintk("RPC_AUTH_GSS callback secflavor " -@@ -1232,7 +1230,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, - READ_BUF(4); - READ32(dummy); - READ_BUF(dummy); -- p += XDR_QUADLEN(dummy); - break; - default: - dprintk("Illegal callback secflavor\n"); -diff --git a/fs/proc/array.c b/fs/proc/array.c -index 7c99c1c..5e4f776 100644 ---- a/fs/proc/array.c -+++ b/fs/proc/array.c -@@ -489,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, - vsize, - mm ? get_mm_rss(mm) : 0, - rsslim, -- mm ? mm->start_code : 0, -- mm ? mm->end_code : 0, -+ mm ? (permitted ? mm->start_code : 1) : 0, -+ mm ? (permitted ? mm->end_code : 1) : 0, - (permitted && mm) ? mm->start_stack : 0, - esp, - eip, -diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c -index 60b9148..f269ee6 100644 ---- a/fs/proc/task_mmu.c -+++ b/fs/proc/task_mmu.c -@@ -249,8 +249,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) - const char *name = arch_vma_name(vma); - if (!name) { - if (mm) { -- if (vma->vm_start <= mm->start_brk && -- vma->vm_end >= mm->brk) { -+ if (vma->vm_start <= mm->brk && -+ vma->vm_end >= mm->start_brk) { - name = "[heap]"; - } else if (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack) { -diff --git a/fs/super.c b/fs/super.c -index 7e9dd4c..0d89e93 100644 ---- a/fs/super.c -+++ b/fs/super.c -@@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type) - #else - INIT_LIST_HEAD(&s->s_files); - #endif -+ s->s_bdi = &default_backing_dev_info; - INIT_LIST_HEAD(&s->s_instances); - INIT_HLIST_BL_HEAD(&s->s_anon); - INIT_LIST_HEAD(&s->s_inodes); -@@ -1003,6 +1004,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void - } - BUG_ON(!mnt->mnt_sb); - WARN_ON(!mnt->mnt_sb->s_bdi); -+ WARN_ON(mnt->mnt_sb->s_bdi == &default_backing_dev_info); - mnt->mnt_sb->s_flags |= MS_BORN; - - error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); -diff --git a/fs/sync.c b/fs/sync.c -index ba76b96..412dc89 100644 ---- a/fs/sync.c -+++ b/fs/sync.c -@@ -33,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) - * This should be safe, as we require bdi backing to actually - * write out data in the first place - */ -- if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info) -+ if (sb->s_bdi == &noop_backing_dev_info) - return 0; - - if (sb->s_qcop && sb->s_qcop->quota_sync) -@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem); - - static void sync_one_sb(struct super_block *sb, void *arg) - { -- if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi) -+ if (!(sb->s_flags & MS_RDONLY)) - __sync_filesystem(sb, *(int *)arg); - } - /* -diff --git a/include/linux/compaction.h b/include/linux/compaction.h -index dfa2ed4..cc9f7a4 100644 ---- a/include/linux/compaction.h -+++ b/include/linux/compaction.h -@@ -11,9 +11,6 @@ - /* The full zone was compacted */ - #define COMPACT_COMPLETE 3 - --#define COMPACT_MODE_DIRECT_RECLAIM 0 --#define COMPACT_MODE_KSWAPD 1 -- - #ifdef CONFIG_COMPACTION - extern int sysctl_compact_memory; - extern int sysctl_compaction_handler(struct ctl_table *table, int write, -@@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist, - bool sync); - extern unsigned long compaction_suitable(struct zone *zone, int order); - extern unsigned long compact_zone_order(struct zone *zone, int order, -- gfp_t gfp_mask, bool sync, -- int compact_mode); -+ gfp_t gfp_mask, bool sync); - - /* Do not skip compaction more than 64 times */ - #define COMPACT_MAX_DEFER_SHIFT 6 -@@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order) - } - - static inline unsigned long compact_zone_order(struct zone *zone, int order, -- gfp_t gfp_mask, bool sync, -- int compact_mode) -+ gfp_t gfp_mask, bool sync) - { - return COMPACT_CONTINUE; - } -diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h -index 1908929..a3c1874 100644 ---- a/include/linux/ethtool.h -+++ b/include/linux/ethtool.h -@@ -13,6 +13,9 @@ - #ifndef _LINUX_ETHTOOL_H - #define _LINUX_ETHTOOL_H - -+#ifdef __KERNEL__ -+#include -+#endif - #include - #include - -@@ -449,6 +452,37 @@ struct ethtool_rxnfc { - __u32 rule_locs[0]; - }; - -+#ifdef __KERNEL__ -+#ifdef CONFIG_COMPAT -+ -+struct compat_ethtool_rx_flow_spec { -+ u32 flow_type; -+ union { -+ struct ethtool_tcpip4_spec tcp_ip4_spec; -+ struct ethtool_tcpip4_spec udp_ip4_spec; -+ struct ethtool_tcpip4_spec sctp_ip4_spec; -+ struct ethtool_ah_espip4_spec ah_ip4_spec; -+ struct ethtool_ah_espip4_spec esp_ip4_spec; -+ struct ethtool_usrip4_spec usr_ip4_spec; -+ struct ethhdr ether_spec; -+ u8 hdata[72]; -+ } h_u, m_u; -+ compat_u64 ring_cookie; -+ u32 location; -+}; -+ -+struct compat_ethtool_rxnfc { -+ u32 cmd; -+ u32 flow_type; -+ compat_u64 data; -+ struct compat_ethtool_rx_flow_spec fs; -+ u32 rule_cnt; -+ u32 rule_locs[0]; -+}; -+ -+#endif /* CONFIG_COMPAT */ -+#endif /* __KERNEL__ */ -+ - /** - * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection - * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR -diff --git a/include/linux/mm.h b/include/linux/mm.h -index f6385fc..c67adb4 100644 ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page) - /* - * PageBuddy() indicate that the page is free and in the buddy system - * (see mm/page_alloc.c). -+ * -+ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to -+ * -2 so that an underflow of the page_mapcount() won't be mistaken -+ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very -+ * efficiently by most CPU architectures. - */ -+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) -+ - static inline int PageBuddy(struct page *page) - { -- return atomic_read(&page->_mapcount) == -2; -+ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; - } - - static inline void __SetPageBuddy(struct page *page) - { - VM_BUG_ON(atomic_read(&page->_mapcount) != -1); -- atomic_set(&page->_mapcount, -2); -+ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); - } - - static inline void __ClearPageBuddy(struct page *page) -diff --git a/kernel/cgroup.c b/kernel/cgroup.c -index b24d702..bcc7336 100644 ---- a/kernel/cgroup.c -+++ b/kernel/cgroup.c -@@ -1813,10 +1813,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) - - /* Update the css_set linked lists if we're using them */ - write_lock(&css_set_lock); -- if (!list_empty(&tsk->cg_list)) { -- list_del(&tsk->cg_list); -- list_add(&tsk->cg_list, &newcg->tasks); -- } -+ if (!list_empty(&tsk->cg_list)) -+ list_move(&tsk->cg_list, &newcg->tasks); - write_unlock(&css_set_lock); - - for_each_subsys(root, ss) { -@@ -3655,12 +3653,12 @@ again: - spin_lock(&release_list_lock); - set_bit(CGRP_REMOVED, &cgrp->flags); - if (!list_empty(&cgrp->release_list)) -- list_del(&cgrp->release_list); -+ list_del_init(&cgrp->release_list); - spin_unlock(&release_list_lock); - - cgroup_lock_hierarchy(cgrp->root); - /* delete this cgroup from parent->children */ -- list_del(&cgrp->sibling); -+ list_del_init(&cgrp->sibling); - cgroup_unlock_hierarchy(cgrp->root); - - d = dget(cgrp->dentry); -@@ -3879,7 +3877,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) - subsys[ss->subsys_id] = NULL; - - /* remove subsystem from rootnode's list of subsystems */ -- list_del(&ss->sibling); -+ list_del_init(&ss->sibling); - - /* - * disentangle the css from all css_sets attached to the dummytop. as -@@ -4253,7 +4251,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) - if (!list_empty(&tsk->cg_list)) { - write_lock(&css_set_lock); - if (!list_empty(&tsk->cg_list)) -- list_del(&tsk->cg_list); -+ list_del_init(&tsk->cg_list); - write_unlock(&css_set_lock); - } - -diff --git a/kernel/perf_event.c b/kernel/perf_event.c -index b22a2ef..ad02fea 100644 ---- a/kernel/perf_event.c -+++ b/kernel/perf_event.c -@@ -6115,17 +6115,20 @@ __perf_event_exit_task(struct perf_event *child_event, - struct perf_event_context *child_ctx, - struct task_struct *child) - { -- struct perf_event *parent_event; -+ if (child_event->parent) { -+ raw_spin_lock_irq(&child_ctx->lock); -+ perf_group_detach(child_event); -+ raw_spin_unlock_irq(&child_ctx->lock); -+ } - - perf_event_remove_from_context(child_event); - -- parent_event = child_event->parent; - /* -- * It can happen that parent exits first, and has events -+ * It can happen that the parent exits first, and has events - * that are still around due to the child reference. These -- * events need to be zapped - but otherwise linger. -+ * events need to be zapped. - */ -- if (parent_event) { -+ if (child_event->parent) { - sync_child_event(child_event, child); - free_event(child_event); - } -diff --git a/kernel/signal.c b/kernel/signal.c -index 4e3cff1..3175186 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -2421,9 +2421,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, - return -EFAULT; - - /* Not even root can pretend to send signals from the kernel. -- Nor can they impersonate a kill(), which adds source info. */ -- if (info.si_code >= 0) -+ * Nor can they impersonate a kill()/tgkill(), which adds source info. -+ */ -+ if (info.si_code != SI_QUEUE) { -+ /* We used to allow any < 0 si_code */ -+ WARN_ON_ONCE(info.si_code < 0); - return -EPERM; -+ } - info.si_signo = sig; - - /* POSIX.1b doesn't mention process groups. */ -@@ -2437,9 +2441,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) - return -EINVAL; - - /* Not even root can pretend to send signals from the kernel. -- Nor can they impersonate a kill(), which adds source info. */ -- if (info->si_code >= 0) -+ * Nor can they impersonate a kill()/tgkill(), which adds source info. -+ */ -+ if (info->si_code != SI_QUEUE) { -+ /* We used to allow any < 0 si_code */ -+ WARN_ON_ONCE(info->si_code < 0); - return -EPERM; -+ } - info->si_signo = sig; - - return do_send_specific(tgid, pid, sig, info); -diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 4eed0af..443fd20 100644 ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -169,6 +169,11 @@ static int proc_taint(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); - #endif - -+#ifdef CONFIG_PRINTK -+static int proc_dmesg_restrict(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos); -+#endif -+ - #ifdef CONFIG_MAGIC_SYSRQ - /* Note: sysrq code uses it's own private copy */ - static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE; -@@ -713,7 +718,7 @@ static struct ctl_table kern_table[] = { - .data = &kptr_restrict, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec_minmax, -+ .proc_handler = proc_dmesg_restrict, - .extra1 = &zero, - .extra2 = &two, - }, -@@ -2397,6 +2402,17 @@ static int proc_taint(struct ctl_table *table, int write, - return err; - } - -+#ifdef CONFIG_PRINTK -+static int proc_dmesg_restrict(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos) -+{ -+ if (write && !capable(CAP_SYS_ADMIN)) -+ return -EPERM; -+ -+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos); -+} -+#endif -+ - struct do_proc_dointvec_minmax_conv_param { - int *min; - int *max; -diff --git a/mm/backing-dev.c b/mm/backing-dev.c -index 027100d..8e4ed88 100644 ---- a/mm/backing-dev.c -+++ b/mm/backing-dev.c -@@ -604,7 +604,7 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (sb->s_bdi == bdi) -- sb->s_bdi = NULL; -+ sb->s_bdi = &default_backing_dev_info; - } - spin_unlock(&sb_lock); - } -diff --git a/mm/compaction.c b/mm/compaction.c -index 8be430b8..dcb058b 100644 ---- a/mm/compaction.c -+++ b/mm/compaction.c -@@ -42,8 +42,6 @@ struct compact_control { - unsigned int order; /* order a direct compactor needs */ - int migratetype; /* MOVABLE, RECLAIMABLE etc */ - struct zone *zone; -- -- int compact_mode; - }; - - static unsigned long release_freepages(struct list_head *freelist) -@@ -397,10 +395,7 @@ static int compact_finished(struct zone *zone, - return COMPACT_COMPLETE; - - /* Compaction run is not finished if the watermark is not met */ -- if (cc->compact_mode != COMPACT_MODE_KSWAPD) -- watermark = low_wmark_pages(zone); -- else -- watermark = high_wmark_pages(zone); -+ watermark = low_wmark_pages(zone); - watermark += (1 << cc->order); - - if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) -@@ -413,15 +408,6 @@ static int compact_finished(struct zone *zone, - if (cc->order == -1) - return COMPACT_CONTINUE; - -- /* -- * Generating only one page of the right order is not enough -- * for kswapd, we must continue until we're above the high -- * watermark as a pool for high order GFP_ATOMIC allocations -- * too. -- */ -- if (cc->compact_mode == COMPACT_MODE_KSWAPD) -- return COMPACT_CONTINUE; -- - /* Direct compactor: Is a suitable page free? */ - for (order = cc->order; order < MAX_ORDER; order++) { - /* Job done if page is free of the right migratetype */ -@@ -543,8 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) - - unsigned long compact_zone_order(struct zone *zone, - int order, gfp_t gfp_mask, -- bool sync, -- int compact_mode) -+ bool sync) - { - struct compact_control cc = { - .nr_freepages = 0, -@@ -553,7 +538,6 @@ unsigned long compact_zone_order(struct zone *zone, - .migratetype = allocflags_to_migratetype(gfp_mask), - .zone = zone, - .sync = sync, -- .compact_mode = compact_mode, - }; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); -@@ -599,8 +583,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, - nodemask) { - int status; - -- status = compact_zone_order(zone, order, gfp_mask, sync, -- COMPACT_MODE_DIRECT_RECLAIM); -+ status = compact_zone_order(zone, order, gfp_mask, sync); - rc = max(status, rc); - - /* If a normal allocation would succeed, stop compacting */ -@@ -631,7 +614,6 @@ static int compact_node(int nid) - .nr_freepages = 0, - .nr_migratepages = 0, - .order = -1, -- .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, - }; - - zone = &pgdat->node_zones[zoneid]; -diff --git a/mm/oom_kill.c b/mm/oom_kill.c -index 7dcca55..33b5861 100644 ---- a/mm/oom_kill.c -+++ b/mm/oom_kill.c -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - - int sysctl_panic_on_oom; - int sysctl_oom_kill_allocating_task; -@@ -292,13 +293,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, - unsigned long totalpages, struct mem_cgroup *mem, - const nodemask_t *nodemask) - { -- struct task_struct *p; -+ struct task_struct *g, *p; - struct task_struct *chosen = NULL; - *ppoints = 0; - -- for_each_process(p) { -+ do_each_thread(g, p) { - unsigned int points; - -+ if (!p->mm) -+ continue; - if (oom_unkillable_task(p, mem, nodemask)) - continue; - -@@ -314,22 +317,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, - if (test_tsk_thread_flag(p, TIF_MEMDIE)) - return ERR_PTR(-1UL); - -- /* -- * This is in the process of releasing memory so wait for it -- * to finish before killing some other task by mistake. -- * -- * However, if p is the current task, we allow the 'kill' to -- * go ahead if it is exiting: this will simply set TIF_MEMDIE, -- * which will allow it to gain access to memory reserves in -- * the process of exiting and releasing its resources. -- * Otherwise we could get an easy OOM deadlock. -- */ -- if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) { -- if (p != current) -- return ERR_PTR(-1UL); -- -- chosen = p; -- *ppoints = 1000; -+ if (p->flags & PF_EXITING) { -+ /* -+ * If p is the current task and is in the process of -+ * releasing memory, we allow the "kill" to set -+ * TIF_MEMDIE, which will allow it to gain access to -+ * memory reserves. Otherwise, it may stall forever. -+ * -+ * The loop isn't broken here, however, in case other -+ * threads are found to have already been oom killed. -+ */ -+ if (p == current) { -+ chosen = p; -+ *ppoints = 1000; -+ } else { -+ /* -+ * If this task is not being ptraced on exit, -+ * then wait for it to finish before killing -+ * some other task unnecessarily. -+ */ -+ if (!(task_ptrace(p->group_leader) & -+ PT_TRACE_EXIT)) -+ return ERR_PTR(-1UL); -+ } - } - - points = oom_badness(p, mem, nodemask, totalpages); -@@ -337,7 +347,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, - chosen = p; - *ppoints = points; - } -- } -+ } while_each_thread(g, p); - - return chosen; - } -@@ -491,6 +501,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, - list_for_each_entry(child, &t->children, sibling) { - unsigned int child_points; - -+ if (child->mm == p->mm) -+ continue; - /* - * oom_badness() returns 0 if the thread is unkillable - */ -diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index cdef1d4..2828037 100644 ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -286,7 +286,7 @@ static void bad_page(struct page *page) - - /* Don't complain about poisoned pages */ - if (PageHWPoison(page)) { -- __ClearPageBuddy(page); -+ reset_page_mapcount(page); /* remove PageBuddy */ - return; - } - -@@ -317,7 +317,7 @@ static void bad_page(struct page *page) - dump_stack(); - out: - /* Leave bad fields for debug, except PageBuddy could make trouble */ -- __ClearPageBuddy(page); -+ reset_page_mapcount(page); /* remove PageBuddy */ - add_taint(TAINT_BAD_PAGE); - } - -diff --git a/mm/shmem.c b/mm/shmem.c -index 5ee67c99..5ac23d5 100644 ---- a/mm/shmem.c -+++ b/mm/shmem.c -@@ -2791,5 +2791,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) - fput(vma->vm_file); - vma->vm_file = file; - vma->vm_ops = &shmem_vm_ops; -+ vma->vm_flags |= VM_CAN_NONLINEAR; - return 0; - } -diff --git a/mm/slab.c b/mm/slab.c -index 37961d1f..4c6e2e3 100644 ---- a/mm/slab.c -+++ b/mm/slab.c -@@ -2288,8 +2288,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, - if (ralign < align) { - ralign = align; - } -- /* disable debug if not aligning with REDZONE_ALIGN */ -- if (ralign & (__alignof__(unsigned long long) - 1)) -+ /* disable debug if necessary */ -+ if (ralign > __alignof__(unsigned long long)) - flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); - /* - * 4) Store it. -@@ -2315,8 +2315,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, - */ - if (flags & SLAB_RED_ZONE) { - /* add space for red zone words */ -- cachep->obj_offset += align; -- size += align + sizeof(unsigned long long); -+ cachep->obj_offset += sizeof(unsigned long long); -+ size += 2 * sizeof(unsigned long long); - } - if (flags & SLAB_STORE_USER) { - /* user store requires one word storage behind the end of -diff --git a/mm/swapfile.c b/mm/swapfile.c -index 0341c57..6d6d28c 100644 ---- a/mm/swapfile.c -+++ b/mm/swapfile.c -@@ -2149,8 +2149,13 @@ bad_swap_2: - p->flags = 0; - spin_unlock(&swap_lock); - vfree(swap_map); -- if (swap_file) -+ if (swap_file) { -+ if (did_down) { -+ mutex_unlock(&inode->i_mutex); -+ did_down = 0; -+ } - filp_close(swap_file, NULL); -+ } - out: - if (page && !IS_ERR(page)) { - kunmap(page); -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 6771ea7..3b4a41d 100644 ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -2397,7 +2397,6 @@ loop_again: - * cause too much scanning of the lower zones. - */ - for (i = 0; i <= end_zone; i++) { -- int compaction; - struct zone *zone = pgdat->node_zones + i; - int nr_slab; - -@@ -2428,24 +2427,9 @@ loop_again: - sc.nr_reclaimed += reclaim_state->reclaimed_slab; - total_scanned += sc.nr_scanned; - -- compaction = 0; -- if (order && -- zone_watermark_ok(zone, 0, -- high_wmark_pages(zone), -- end_zone, 0) && -- !zone_watermark_ok(zone, order, -- high_wmark_pages(zone), -- end_zone, 0)) { -- compact_zone_order(zone, -- order, -- sc.gfp_mask, false, -- COMPACT_MODE_KSWAPD); -- compaction = 1; -- } -- - if (zone->all_unreclaimable) - continue; -- if (!compaction && nr_slab == 0 && -+ if (nr_slab == 0 && - !zone_reclaimable(zone)) - zone->all_unreclaimable = 1; - /* -diff --git a/net/socket.c b/net/socket.c -index ac2219f..29c7df0 100644 ---- a/net/socket.c -+++ b/net/socket.c -@@ -2583,23 +2583,123 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) - - static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) - { -+ struct compat_ethtool_rxnfc __user *compat_rxnfc; -+ bool convert_in = false, convert_out = false; -+ size_t buf_size = ALIGN(sizeof(struct ifreq), 8); -+ struct ethtool_rxnfc __user *rxnfc; - struct ifreq __user *ifr; -+ u32 rule_cnt = 0, actual_rule_cnt; -+ u32 ethcmd; - u32 data; -- void __user *datap; -+ int ret; -+ -+ if (get_user(data, &ifr32->ifr_ifru.ifru_data)) -+ return -EFAULT; - -- ifr = compat_alloc_user_space(sizeof(*ifr)); -+ compat_rxnfc = compat_ptr(data); - -- if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) -+ if (get_user(ethcmd, &compat_rxnfc->cmd)) - return -EFAULT; - -- if (get_user(data, &ifr32->ifr_ifru.ifru_data)) -+ /* Most ethtool structures are defined without padding. -+ * Unfortunately struct ethtool_rxnfc is an exception. -+ */ -+ switch (ethcmd) { -+ default: -+ break; -+ case ETHTOOL_GRXCLSRLALL: -+ /* Buffer size is variable */ -+ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) -+ return -EFAULT; -+ if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) -+ return -ENOMEM; -+ buf_size += rule_cnt * sizeof(u32); -+ /* fall through */ -+ case ETHTOOL_GRXRINGS: -+ case ETHTOOL_GRXCLSRLCNT: -+ case ETHTOOL_GRXCLSRULE: -+ convert_out = true; -+ /* fall through */ -+ case ETHTOOL_SRXCLSRLDEL: -+ case ETHTOOL_SRXCLSRLINS: -+ buf_size += sizeof(struct ethtool_rxnfc); -+ convert_in = true; -+ break; -+ } -+ -+ ifr = compat_alloc_user_space(buf_size); -+ rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); -+ -+ if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) - return -EFAULT; - -- datap = compat_ptr(data); -- if (put_user(datap, &ifr->ifr_ifru.ifru_data)) -+ if (put_user(convert_in ? rxnfc : compat_ptr(data), -+ &ifr->ifr_ifru.ifru_data)) - return -EFAULT; - -- return dev_ioctl(net, SIOCETHTOOL, ifr); -+ if (convert_in) { -+ /* We expect there to be holes between fs.m_u and -+ * fs.ring_cookie and at the end of fs, but nowhere else. -+ */ -+ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + -+ sizeof(compat_rxnfc->fs.m_u) != -+ offsetof(struct ethtool_rxnfc, fs.m_u) + -+ sizeof(rxnfc->fs.m_u)); -+ BUILD_BUG_ON( -+ offsetof(struct compat_ethtool_rxnfc, fs.location) - -+ offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != -+ offsetof(struct ethtool_rxnfc, fs.location) - -+ offsetof(struct ethtool_rxnfc, fs.ring_cookie)); -+ -+ if (copy_in_user(rxnfc, compat_rxnfc, -+ (void *)(&rxnfc->fs.m_u + 1) - -+ (void *)rxnfc) || -+ copy_in_user(&rxnfc->fs.ring_cookie, -+ &compat_rxnfc->fs.ring_cookie, -+ (void *)(&rxnfc->fs.location + 1) - -+ (void *)&rxnfc->fs.ring_cookie) || -+ copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, -+ sizeof(rxnfc->rule_cnt))) -+ return -EFAULT; -+ } -+ -+ ret = dev_ioctl(net, SIOCETHTOOL, ifr); -+ if (ret) -+ return ret; -+ -+ if (convert_out) { -+ if (copy_in_user(compat_rxnfc, rxnfc, -+ (const void *)(&rxnfc->fs.m_u + 1) - -+ (const void *)rxnfc) || -+ copy_in_user(&compat_rxnfc->fs.ring_cookie, -+ &rxnfc->fs.ring_cookie, -+ (const void *)(&rxnfc->fs.location + 1) - -+ (const void *)&rxnfc->fs.ring_cookie) || -+ copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, -+ sizeof(rxnfc->rule_cnt))) -+ return -EFAULT; -+ -+ if (ethcmd == ETHTOOL_GRXCLSRLALL) { -+ /* As an optimisation, we only copy the actual -+ * number of rules that the underlying -+ * function returned. Since Mallory might -+ * change the rule count in user memory, we -+ * check that it is less than the rule count -+ * originally given (as the user buffer size), -+ * which has been range-checked. -+ */ -+ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) -+ return -EFAULT; -+ if (actual_rule_cnt < rule_cnt) -+ rule_cnt = actual_rule_cnt; -+ if (copy_in_user(&compat_rxnfc->rule_locs[0], -+ &rxnfc->rule_locs[0], -+ rule_cnt * sizeof(u32))) -+ return -EFAULT; -+ } -+ } -+ -+ return 0; - } - - static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) -diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c -index be96d42..1e336a0 100644 ---- a/net/sunrpc/xprtsock.c -+++ b/net/sunrpc/xprtsock.c -@@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport) - if (sk == NULL) - return; - -+ transport->srcport = 0; -+ - write_lock_bh(&sk->sk_callback_lock); - transport->inet = NULL; - transport->sock = NULL; -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c -index acd2099..c2eb6a7 100644 ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -16085,9 +16085,12 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec, - return err; - } else { - const char *name = pfx; -- if (!name) -+ int index = i; -+ if (!name) { - name = chname[i]; -- err = __alc861_create_out_sw(codec, name, nid, i, 3); -+ index = 0; -+ } -+ err = __alc861_create_out_sw(codec, name, nid, index, 3); - if (err < 0) - return err; - } -@@ -17238,16 +17241,19 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, - return err; - } else { - const char *name = pfx; -- if (!name) -+ int index = i; -+ if (!name) { - name = chname[i]; -+ index = 0; -+ } - err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, -- name, i, -+ name, index, - HDA_COMPOSE_AMP_VAL(nid_v, 3, 0, - HDA_OUTPUT)); - if (err < 0) - return err; - err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, -- name, i, -+ name, index, - HDA_COMPOSE_AMP_VAL(nid_s, 3, 2, - HDA_INPUT)); - if (err < 0) -@@ -19296,12 +19302,15 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec, - return err; - } else { - const char *name = pfx; -- if (!name) -+ int index = i; -+ if (!name) { - name = chname[i]; -- err = __alc662_add_vol_ctl(spec, name, nid, i, 3); -+ index = 0; -+ } -+ err = __alc662_add_vol_ctl(spec, name, nid, index, 3); - if (err < 0) - return err; -- err = __alc662_add_sw_ctl(spec, name, mix, i, 3); -+ err = __alc662_add_sw_ctl(spec, name, mix, index, 3); - if (err < 0) - return err; - } -diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c -index 052062d..8566119 100644 ---- a/sound/pci/hda/patch_sigmatel.c -+++ b/sound/pci/hda/patch_sigmatel.c -@@ -94,6 +94,7 @@ enum { - STAC_92HD83XXX_REF, - STAC_92HD83XXX_PWR_REF, - STAC_DELL_S14, -+ STAC_DELL_E5520M, - STAC_92HD83XXX_HP, - STAC_HP_DV7_4000, - STAC_92HD83XXX_MODELS -@@ -1657,6 +1658,13 @@ static unsigned int dell_s14_pin_configs[10] = { - 0x40f000f0, 0x40f000f0, - }; - -+/* Switch int mic from 0x20 to 0x11 */ -+static unsigned int dell_e5520m_pin_configs[10] = { -+ 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110, -+ 0x23011050, 0x23a1102e, 0x400000f3, 0xd5a30130, -+ 0x400000f0, 0x40f000f0, -+}; -+ - static unsigned int hp_dv7_4000_pin_configs[10] = { - 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, - 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, -@@ -1667,6 +1675,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { - [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, - [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, - [STAC_DELL_S14] = dell_s14_pin_configs, -+ [STAC_DELL_E5520M] = dell_e5520m_pin_configs, - [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, - }; - -@@ -1675,6 +1684,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { - [STAC_92HD83XXX_REF] = "ref", - [STAC_92HD83XXX_PWR_REF] = "mic-ref", - [STAC_DELL_S14] = "dell-s14", -+ [STAC_DELL_E5520M] = "dell-e5520m", - [STAC_92HD83XXX_HP] = "hp", - [STAC_HP_DV7_4000] = "hp-dv7-4000", - }; -@@ -1687,6 +1697,14 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { - "DFI LanParty", STAC_92HD83XXX_REF), - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, - "unknown Dell", STAC_DELL_S14), -+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049a, -+ "Dell E5520", STAC_DELL_E5520M), -+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049b, -+ "Dell E5420", STAC_DELL_E5520M), -+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04eb, -+ "Dell E5420m", STAC_DELL_E5520M), -+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04ec, -+ "Dell E5520m", STAC_DELL_E5520M), - SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, - "HP", STAC_92HD83XXX_HP), - {} /* terminator */ -diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c -index 63b0054..acc4579 100644 ---- a/sound/pci/hda/patch_via.c -+++ b/sound/pci/hda/patch_via.c -@@ -159,6 +159,7 @@ struct via_spec { - #endif - }; - -+static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec); - static struct via_spec * via_new_spec(struct hda_codec *codec) - { - struct via_spec *spec; -@@ -169,6 +170,10 @@ static struct via_spec * via_new_spec(struct hda_codec *codec) - - codec->spec = spec; - spec->codec = codec; -+ spec->codec_type = get_codec_type(codec); -+ /* VT1708BCE & VT1708S are almost same */ -+ if (spec->codec_type == VT1708BCE) -+ spec->codec_type = VT1708S; - return spec; - } - -@@ -1101,6 +1106,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, - struct hda_codec *codec = snd_kcontrol_chip(kcontrol); - struct via_spec *spec = codec->spec; - unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); -+ int ret; - - if (!spec->mux_nids[adc_idx]) - return -EINVAL; -@@ -1109,12 +1115,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, - AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0) - snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, - AC_VERB_SET_POWER_STATE, AC_PWRST_D0); -- /* update jack power state */ -- set_jack_power_state(codec); - -- return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, -+ ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, - spec->mux_nids[adc_idx], - &spec->cur_mux[adc_idx]); -+ /* update jack power state */ -+ set_jack_power_state(codec); -+ -+ return ret; - } - - static int via_independent_hp_info(struct snd_kcontrol *kcontrol, -@@ -1188,8 +1196,16 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, - /* Get Independent Mode index of headphone pin widget */ - spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel - ? 1 : 0; -- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel); -+ if (spec->codec_type == VT1718S) -+ snd_hda_codec_write(codec, nid, 0, -+ AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); -+ else -+ snd_hda_codec_write(codec, nid, 0, -+ AC_VERB_SET_CONNECT_SEL, pinsel); - -+ if (spec->codec_type == VT1812) -+ snd_hda_codec_write(codec, 0x35, 0, -+ AC_VERB_SET_CONNECT_SEL, pinsel); - if (spec->multiout.hp_nid && spec->multiout.hp_nid - != spec->multiout.dac_nids[HDA_FRONT]) - snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid, -@@ -1208,6 +1224,8 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, - activate_ctl(codec, "Headphone Playback Switch", - spec->hp_independent_mode); - } -+ /* update jack power state */ -+ set_jack_power_state(codec); - return 0; - } - -@@ -1248,9 +1266,12 @@ static int via_hp_build(struct hda_codec *codec) - break; - } - -- nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); -- if (nums <= 1) -- return 0; -+ if (spec->codec_type != VT1708) { -+ nums = snd_hda_get_connections(codec, nid, -+ conn, HDA_MAX_CONNECTIONS); -+ if (nums <= 1) -+ return 0; -+ } - - knew = via_clone_control(spec, &via_hp_mixer[0]); - if (knew == NULL) -@@ -1310,6 +1331,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute) - start_idx = 2; - end_idx = 4; - break; -+ case VT1718S: -+ nid_mixer = 0x21; -+ start_idx = 1; -+ end_idx = 3; -+ break; - default: - return; - } -@@ -2185,10 +2211,6 @@ static int via_init(struct hda_codec *codec) - for (i = 0; i < spec->num_iverbs; i++) - snd_hda_sequence_write(codec, spec->init_verbs[i]); - -- spec->codec_type = get_codec_type(codec); -- if (spec->codec_type == VT1708BCE) -- spec->codec_type = VT1708S; /* VT1708BCE & VT1708S are almost -- same */ - /* Lydia Add for EAPD enable */ - if (!spec->dig_in_nid) { /* No Digital In connection */ - if (spec->dig_in_pin) { -@@ -2438,7 +2460,14 @@ static int vt_auto_create_analog_input_ctls(struct hda_codec *codec, - else - type_idx = 0; - label = hda_get_autocfg_input_label(codec, cfg, i); -- err = via_new_analog_input(spec, label, type_idx, idx, cap_nid); -+ if (spec->codec_type == VT1708S || -+ spec->codec_type == VT1702 || -+ spec->codec_type == VT1716S) -+ err = via_new_analog_input(spec, label, type_idx, -+ idx+1, cap_nid); -+ else -+ err = via_new_analog_input(spec, label, type_idx, -+ idx, cap_nid); - if (err < 0) - return err; - snd_hda_add_imux_item(imux, label, idx, NULL); -diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c -index e76847a..48ffd40 100644 ---- a/sound/soc/codecs/uda134x.c -+++ b/sound/soc/codecs/uda134x.c -@@ -486,7 +486,8 @@ static struct snd_soc_dai_driver uda134x_dai = { - static int uda134x_soc_probe(struct snd_soc_codec *codec) - { - struct uda134x_priv *uda134x; -- struct uda134x_platform_data *pd = dev_get_drvdata(codec->card->dev); -+ struct uda134x_platform_data *pd = codec->card->dev->platform_data; -+ - int ret; - - printk(KERN_INFO "UDA134X SoC Audio Codec\n"); -diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c -index 2c09e93..86f1dc4 100644 ---- a/sound/soc/samsung/s3c24xx_uda134x.c -+++ b/sound/soc/samsung/s3c24xx_uda134x.c -@@ -226,7 +226,7 @@ static struct snd_soc_ops s3c24xx_uda134x_ops = { - static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = { - .name = "UDA134X", - .stream_name = "UDA134X", -- .codec_name = "uda134x-hifi", -+ .codec_name = "uda134x-codec", - .codec_dai_name = "uda134x-hifi", - .cpu_dai_name = "s3c24xx-iis", - .ops = &s3c24xx_uda134x_ops, -@@ -321,6 +321,7 @@ static int s3c24xx_uda134x_probe(struct platform_device *pdev) - - platform_set_drvdata(s3c24xx_uda134x_snd_device, - &snd_soc_s3c24xx_uda134x); -+ platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x)); - ret = platform_device_add(s3c24xx_uda134x_snd_device); - if (ret) { - printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n"); diff --git a/patches.kernel.org/patch-2.6.38.2-3 b/patches.kernel.org/patch-2.6.38.2-3 deleted file mode 100644 index 10d4ea6..0000000 --- a/patches.kernel.org/patch-2.6.38.2-3 +++ /dev/null @@ -1,3905 +0,0 @@ -From: Greg Kroah-Hartman -Subject: Linux 2.6.38.3 -Patch-mainline: Linux 2.6.38.3 - - -Signed-off-by: Greg Kroah-Hartman - -diff --git a/Makefile b/Makefile -index 6c15525..e47e39e 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - VERSION = 2 - PATCHLEVEL = 6 - SUBLEVEL = 38 --EXTRAVERSION = .2 -+EXTRAVERSION = .3 - NAME = Flesh-Eating Bats with Fangs - - # *DOCUMENTATION* -diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c -index 09d31db..aa92696 100644 ---- a/arch/powerpc/kernel/time.c -+++ b/arch/powerpc/kernel/time.c -@@ -356,7 +356,7 @@ void account_system_vtime(struct task_struct *tsk) - } - get_paca()->user_time_scaled += user_scaled; - -- if (in_irq() || idle_task(smp_processor_id()) != tsk) { -+ if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { - account_system_time(tsk, 0, delta, sys_scaled); - if (stolen) - account_steal_time(stolen); -diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S -index 8fe2a49..4292df7 100644 ---- a/arch/x86/crypto/aesni-intel_asm.S -+++ b/arch/x86/crypto/aesni-intel_asm.S -@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt: - movdqa SHUF_MASK(%rip), %xmm10 - PSHUFB_XMM %xmm10, %xmm0 - -+ - ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) - sub $16, %r11 - add %r13, %r11 -@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt: - # GHASH computation for the last <16 byte block - sub %r13, %r11 - add $16, %r11 -- PSHUFB_XMM %xmm10, %xmm1 -+ -+ movdqa SHUF_MASK(%rip), %xmm10 -+ PSHUFB_XMM %xmm10, %xmm0 - - # shuffle xmm0 back to output as ciphertext - -diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c -index e1e60c7..b375b2a 100644 ---- a/arch/x86/crypto/aesni-intel_glue.c -+++ b/arch/x86/crypto/aesni-intel_glue.c -@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm) - struct cryptd_aead *cryptd_tfm; - struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) - PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); -+ struct crypto_aead *cryptd_child; -+ struct aesni_rfc4106_gcm_ctx *child_ctx; - cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); -+ -+ cryptd_child = cryptd_aead_child(cryptd_tfm); -+ child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); -+ memcpy(child_ctx, ctx, sizeof(*ctx)); - ctx->cryptd_tfm = cryptd_tfm; - tfm->crt_aead.reqsize = sizeof(struct aead_request) - + crypto_aead_reqsize(&cryptd_tfm->base); -@@ -925,6 +931,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, - int ret = 0; - struct crypto_tfm *tfm = crypto_aead_tfm(parent); - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); -+ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); -+ struct aesni_rfc4106_gcm_ctx *child_ctx = -+ aesni_rfc4106_gcm_ctx_get(cryptd_child); - u8 *new_key_mem = NULL; - - if (key_len < 4) { -@@ -968,6 +977,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, - goto exit; - } - ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); -+ memcpy(child_ctx, ctx, sizeof(*ctx)); - exit: - kfree(new_key_mem); - return ret; -@@ -999,7 +1009,6 @@ static int rfc4106_encrypt(struct aead_request *req) - int ret; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); -- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); - - if (!irq_fpu_usable()) { - struct aead_request *cryptd_req = -@@ -1008,6 +1017,7 @@ static int rfc4106_encrypt(struct aead_request *req) - aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - return crypto_aead_encrypt(cryptd_req); - } else { -+ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); - kernel_fpu_begin(); - ret = cryptd_child->base.crt_aead.encrypt(req); - kernel_fpu_end(); -@@ -1020,7 +1030,6 @@ static int rfc4106_decrypt(struct aead_request *req) - int ret; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); -- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); - - if (!irq_fpu_usable()) { - struct aead_request *cryptd_req = -@@ -1029,6 +1038,7 @@ static int rfc4106_decrypt(struct aead_request *req) - aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - return crypto_aead_decrypt(cryptd_req); - } else { -+ struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); - kernel_fpu_begin(); - ret = cryptd_child->base.crt_aead.decrypt(req); - kernel_fpu_end(); -diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c -index bebabec..151787e 100644 ---- a/arch/x86/kernel/cpu/mtrr/main.c -+++ b/arch/x86/kernel/cpu/mtrr/main.c -@@ -292,14 +292,24 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ - - /* - * HACK! -- * We use this same function to initialize the mtrrs on boot. -- * The state of the boot cpu's mtrrs has been saved, and we want -- * to replicate across all the APs. -- * If we're doing that @reg is set to something special... -+ * -+ * We use this same function to initialize the mtrrs during boot, -+ * resume, runtime cpu online and on an explicit request to set a -+ * specific MTRR. -+ * -+ * During boot or suspend, the state of the boot cpu's mtrrs has been -+ * saved, and we want to replicate that across all the cpus that come -+ * online (either at the end of boot or resume or during a runtime cpu -+ * online). If we're doing that, @reg is set to something special and on -+ * this cpu we still do mtrr_if->set_all(). During boot/resume, this -+ * is unnecessary if at this point we are still on the cpu that started -+ * the boot/resume sequence. But there is no guarantee that we are still -+ * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be -+ * sure that we are in sync with everyone else. - */ - if (reg != ~0U) - mtrr_if->set(reg, base, size, type); -- else if (!mtrr_aps_delayed_init) -+ else - mtrr_if->set_all(); - - /* Wait for the others */ -diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c -index 5655c22..2d2673c 100644 ---- a/arch/x86/kernel/head64.c -+++ b/arch/x86/kernel/head64.c -@@ -77,6 +77,9 @@ void __init x86_64_start_kernel(char * real_mode_data) - /* Make NULL pointers segfault */ - zap_identity_mappings(); - -+ /* Cleanup the over mapped high alias */ -+ cleanup_highmap(); -+ - max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; - - for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { -diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c -index e543fe9..d3cfe26 100644 ---- a/arch/x86/kernel/setup.c -+++ b/arch/x86/kernel/setup.c -@@ -297,9 +297,6 @@ static void __init init_gbpages(void) - static inline void init_gbpages(void) - { - } --static void __init cleanup_highmap(void) --{ --} - #endif - - static void __init reserve_brk(void) -@@ -925,8 +922,6 @@ void __init setup_arch(char **cmdline_p) - */ - reserve_brk(); - -- cleanup_highmap(); -- - memblock.current_limit = get_max_mapped(); - memblock_x86_fill(); - -diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index f13ff3a..947f42a 100644 ---- a/arch/x86/mm/init.c -+++ b/arch/x86/mm/init.c -@@ -279,6 +279,25 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, - load_cr3(swapper_pg_dir); - #endif - -+#ifdef CONFIG_X86_64 -+ if (!after_bootmem && !start) { -+ pud_t *pud; -+ pmd_t *pmd; -+ -+ mmu_cr4_features = read_cr4(); -+ -+ /* -+ * _brk_end cannot change anymore, but it and _end may be -+ * located on different 2M pages. cleanup_highmap(), however, -+ * can only consider _end when it runs, so destroy any -+ * mappings beyond _brk_end here. -+ */ -+ pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); -+ pmd = pmd_offset(pud, _brk_end - 1); -+ while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) -+ pmd_clear(pmd); -+ } -+#endif - __flush_tlb_all(); - - if (!after_bootmem && e820_table_end > e820_table_start) -diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index 68f9921..c14a542 100644 ---- a/arch/x86/mm/init_64.c -+++ b/arch/x86/mm/init_64.c -@@ -51,7 +51,6 @@ - #include - #include - #include --#include - - static int __init parse_direct_gbpages_off(char *arg) - { -@@ -294,18 +293,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) - * to the compile time generated pmds. This results in invalid pmds up - * to the point where we hit the physaddr 0 mapping. - * -- * We limit the mappings to the region from _text to _brk_end. _brk_end -- * is rounded up to the 2MB boundary. This catches the invalid pmds as -+ * We limit the mappings to the region from _text to _end. _end is -+ * rounded up to the 2MB boundary. This catches the invalid pmds as - * well, as they are located before _text: - */ - void __init cleanup_highmap(void) - { - unsigned long vaddr = __START_KERNEL_map; -- unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); -- unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; -+ unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; - pmd_t *pmd = level2_kernel_pgt; -+ pmd_t *last_pmd = pmd + PTRS_PER_PMD; - -- for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { -+ for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { - if (pmd_none(*pmd)) - continue; - if (vaddr < (unsigned long) _text || vaddr > end) -diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c -index 8524939..c7358dd 100644 ---- a/drivers/acpi/pci_root.c -+++ b/drivers/acpi/pci_root.c -@@ -564,7 +564,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) - /* Indicate support for various _OSC capabilities. */ - if (pci_ext_cfg_avail(root->bus->self)) - flags |= OSC_EXT_PCI_CONFIG_SUPPORT; -- if (pcie_aspm_enabled()) -+ if (pcie_aspm_support_enabled()) - flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | - OSC_CLOCK_PWR_CAPABILITY_SUPPORT; - if (pci_msi_enabled()) -diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c -index 25ef1a4..b836e11 100644 ---- a/drivers/atm/solos-pci.c -+++ b/drivers/atm/solos-pci.c -@@ -165,7 +165,6 @@ static uint32_t fpga_tx(struct solos_card *); - static irqreturn_t solos_irq(int irq, void *dev_id); - static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); - static int list_vccs(int vci); --static void release_vccs(struct atm_dev *dev); - static int atm_init(struct solos_card *, struct device *); - static void atm_remove(struct solos_card *); - static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); -@@ -384,7 +383,6 @@ static int process_status(struct solos_card *card, int port, struct sk_buff *skb - /* Anything but 'Showtime' is down */ - if (strcmp(state_str, "Showtime")) { - atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST); -- release_vccs(card->atmdev[port]); - dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str); - return 0; - } -@@ -697,7 +695,7 @@ void solos_bh(unsigned long card_arg) - size); - } - if (atmdebug) { -- dev_info(&card->dev->dev, "Received: device %d\n", port); -+ dev_info(&card->dev->dev, "Received: port %d\n", port); - dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", - size, le16_to_cpu(header->vpi), - le16_to_cpu(header->vci)); -@@ -830,28 +828,6 @@ static int list_vccs(int vci) - return num_found; - } - --static void release_vccs(struct atm_dev *dev) --{ -- int i; -- -- write_lock_irq(&vcc_sklist_lock); -- for (i = 0; i < VCC_HTABLE_SIZE; i++) { -- struct hlist_head *head = &vcc_hash[i]; -- struct hlist_node *node, *tmp; -- struct sock *s; -- struct atm_vcc *vcc; -- -- sk_for_each_safe(s, node, tmp, head) { -- vcc = atm_sk(s); -- if (vcc->dev == dev) { -- vcc_release_async(vcc, -EPIPE); -- sk_del_node_init(s); -- } -- } -- } -- write_unlock_irq(&vcc_sklist_lock); --} -- - - static int popen(struct atm_vcc *vcc) - { -@@ -1018,8 +994,15 @@ static uint32_t fpga_tx(struct solos_card *card) - - /* Clean up and free oldskb now it's gone */ - if (atmdebug) { -+ struct pkt_hdr *header = (void *)oldskb->data; -+ int size = le16_to_cpu(header->size); -+ -+ skb_pull(oldskb, sizeof(*header)); - dev_info(&card->dev->dev, "Transmitted: port %d\n", - port); -+ dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", -+ size, le16_to_cpu(header->vpi), -+ le16_to_cpu(header->vci)); - print_buffer(oldskb); - } - -@@ -1262,7 +1245,7 @@ static int atm_init(struct solos_card *card, struct device *parent) - card->atmdev[i]->ci_range.vci_bits = 16; - card->atmdev[i]->dev_data = card; - card->atmdev[i]->phy_data = (void *)(unsigned long)i; -- atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_UNKNOWN); -+ atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND); - - skb = alloc_skb(sizeof(*header), GFP_ATOMIC); - if (!skb) { -diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h -index 579f749..554bbd9 100644 ---- a/drivers/block/cciss.h -+++ b/drivers/block/cciss.h -@@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) - h->ctlr, c->busaddr); - #endif /* CCISS_DEBUG */ - writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); -+ readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); - h->commands_outstanding++; - if ( h->commands_outstanding > h->max_outstanding) - h->max_outstanding = h->commands_outstanding; -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c -index 700a384..f44ca40 100644 ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -71,6 +71,9 @@ static struct usb_device_id btusb_table[] = { - /* Apple MacBookAir3,1, MacBookAir3,2 */ - { USB_DEVICE(0x05ac, 0x821b) }, - -+ /* Apple MacBookPro8,2 */ -+ { USB_DEVICE(0x05ac, 0x821a) }, -+ - /* AVM BlueFRITZ! USB v2.0 */ - { USB_DEVICE(0x057c, 0x3800) }, - -diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c -index 1f46f1c..7beb0e2 100644 ---- a/drivers/char/tpm/tpm.c -+++ b/drivers/char/tpm/tpm.c -@@ -980,7 +980,7 @@ int tpm_open(struct inode *inode, struct file *file) - return -EBUSY; - } - -- chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); -+ chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); - if (chip->data_buffer == NULL) { - clear_bit(0, &chip->is_open); - put_device(chip->dev); -diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c -index 23e0355..7e0e660 100644 ---- a/drivers/edac/amd64_edac.c -+++ b/drivers/edac/amd64_edac.c -@@ -2765,7 +2765,7 @@ static int __init amd64_edac_init(void) - mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); - ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); - if (!(mcis && ecc_stngs)) -- goto err_ret; -+ goto err_free; - - msrs = msrs_alloc(); - if (!msrs) -diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c -index 02d5c41..99768d9 100644 ---- a/drivers/gpu/drm/radeon/radeon_atombios.c -+++ b/drivers/gpu/drm/radeon/radeon_atombios.c -@@ -675,7 +675,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) - ATOM_ENCODER_CAP_RECORD *cap_record; - u16 caps = 0; - -- while (record->ucRecordType > 0 && -+ while (record->ucRecordSize > 0 && -+ record->ucRecordType > 0 && - record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { - switch (record->ucRecordType) { - case ATOM_ENCODER_CAP_RECORD_TYPE: -@@ -720,7 +721,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) - break; - } - -- while (record->ucRecordType > 0 && -+ while (record->ucRecordSize > 0 && -+ record->ucRecordType > 0 && - record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { - switch (record->ucRecordType) { - case ATOM_I2C_RECORD_TYPE: -@@ -782,10 +784,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) - ATOM_HPD_INT_RECORD *hpd_record; - ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; - -- while (record->ucRecordType > 0 -- && record-> -- ucRecordType <= -- ATOM_MAX_OBJECT_RECORD_NUMBER) { -+ while (record->ucRecordSize > 0 && -+ record->ucRecordType > 0 && -+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { - switch (record->ucRecordType) { - case ATOM_I2C_RECORD_TYPE: - i2c_record = -diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c -index 318cc40..418c399 100644 ---- a/drivers/hid/hid-magicmouse.c -+++ b/drivers/hid/hid-magicmouse.c -@@ -418,6 +418,8 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h - input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, - 2565, 4, 0); - } -+ -+ input_set_events_per_packet(input, 60); - } - - if (report_undeciphered) { -diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c -index aa186cf..e06e045 100644 ---- a/drivers/input/mouse/synaptics.c -+++ b/drivers/input/mouse/synaptics.c -@@ -836,8 +836,8 @@ static const struct dmi_system_id __initconst toshiba_dmi_table[] = { - }, - - }, -- { } - #endif -+ { } - }; - - static bool broken_olpc_ec; -@@ -851,8 +851,8 @@ static const struct dmi_system_id __initconst olpc_dmi_table[] = { - DMI_MATCH(DMI_PRODUCT_NAME, "XO"), - }, - }, -- { } - #endif -+ { } - }; - - void __init synaptics_module_init(void) -diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c -index 80a3ae3..c0cff64 100644 ---- a/drivers/leds/leds-lp5521.c -+++ b/drivers/leds/leds-lp5521.c -@@ -534,7 +534,7 @@ static ssize_t lp5521_selftest(struct device *dev, - } - - /* led class device attributes */ --static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); -+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); - static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); - - static struct attribute *lp5521_led_attributes[] = { -@@ -548,15 +548,15 @@ static struct attribute_group lp5521_led_attribute_group = { - }; - - /* device attributes */ --static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, - show_engine1_mode, store_engine1_mode); --static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, - show_engine2_mode, store_engine2_mode); --static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, - show_engine3_mode, store_engine3_mode); --static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); --static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); --static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); -+static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); -+static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); -+static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); - static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); - - static struct attribute *lp5521_attributes[] = { -diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c -index d0c4068..e19fed2 100644 ---- a/drivers/leds/leds-lp5523.c -+++ b/drivers/leds/leds-lp5523.c -@@ -713,7 +713,7 @@ static ssize_t store_current(struct device *dev, - } - - /* led class device attributes */ --static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); -+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); - static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); - - static struct attribute *lp5523_led_attributes[] = { -@@ -727,21 +727,21 @@ static struct attribute_group lp5523_led_attribute_group = { - }; - - /* device attributes */ --static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, - show_engine1_mode, store_engine1_mode); --static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, - show_engine2_mode, store_engine2_mode); --static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, - show_engine3_mode, store_engine3_mode); --static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR, - show_engine1_leds, store_engine1_leds); --static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR, - show_engine2_leds, store_engine2_leds); --static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, -+static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR, - show_engine3_leds, store_engine3_leds); --static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); --static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); --static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); -+static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); -+static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); -+static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); - static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); - - static struct attribute *lp5523_attributes[] = { -diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig -index ecdffa6..c090246 100644 ---- a/drivers/media/radio/Kconfig -+++ b/drivers/media/radio/Kconfig -@@ -441,6 +441,7 @@ config RADIO_TIMBERDALE - config RADIO_WL1273 - tristate "Texas Instruments WL1273 I2C FM Radio" - depends on I2C && VIDEO_V4L2 -+ select MFD_CORE - select MFD_WL1273_CORE - select FW_LOADER - ---help--- -diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c -index df33a1d..a794ae6 100644 ---- a/drivers/media/video/tlg2300/pd-video.c -+++ b/drivers/media/video/tlg2300/pd-video.c -@@ -764,10 +764,8 @@ static int pd_vidioc_s_fmt(struct poseidon *pd, struct v4l2_pix_format *pix) - } - ret |= send_set_req(pd, VIDEO_ROSOLU_SEL, - vid_resol, &cmd_status); -- if (ret || cmd_status) { -- mutex_unlock(&pd->lock); -+ if (ret || cmd_status) - return -EBUSY; -- } - - pix_def->pixelformat = pix->pixelformat; /* save it */ - pix->height = (context->tvnormid & V4L2_STD_525_60) ? 480 : 576; -diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c -index 4193af5..1707d22 100644 ---- a/drivers/mfd/ab3100-core.c -+++ b/drivers/mfd/ab3100-core.c -@@ -613,7 +613,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) - ab3100_get_priv.ab3100 = ab3100; - ab3100_get_priv.mode = false; - ab3100_get_reg_file = debugfs_create_file("get_reg", -- S_IWUGO, ab3100_dir, &ab3100_get_priv, -+ S_IWUSR, ab3100_dir, &ab3100_get_priv, - &ab3100_get_set_reg_fops); - if (!ab3100_get_reg_file) { - err = -ENOMEM; -@@ -623,7 +623,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) - ab3100_set_priv.ab3100 = ab3100; - ab3100_set_priv.mode = true; - ab3100_set_reg_file = debugfs_create_file("set_reg", -- S_IWUGO, ab3100_dir, &ab3100_set_priv, -+ S_IWUSR, ab3100_dir, &ab3100_set_priv, - &ab3100_get_set_reg_fops); - if (!ab3100_set_reg_file) { - err = -ENOMEM; -diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c -index 5fbca34..681984d 100644 ---- a/drivers/mfd/ab3550-core.c -+++ b/drivers/mfd/ab3550-core.c -@@ -1053,17 +1053,17 @@ static inline void ab3550_setup_debugfs(struct ab3550 *ab) - goto exit_destroy_dir; - - ab3550_bank_file = debugfs_create_file("register-bank", -- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops); -+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_bank_fops); - if (!ab3550_bank_file) - goto exit_destroy_reg; - - ab3550_address_file = debugfs_create_file("register-address", -- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops); -+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_address_fops); - if (!ab3550_address_file) - goto exit_destroy_bank; - - ab3550_val_file = debugfs_create_file("register-value", -- (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops); -+ (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_val_fops); - if (!ab3550_val_file) - goto exit_destroy_address; - -diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c -index 3c1541a..64748e4 100644 ---- a/drivers/mfd/ab8500-debugfs.c -+++ b/drivers/mfd/ab8500-debugfs.c -@@ -585,18 +585,18 @@ static int __devinit ab8500_debug_probe(struct platform_device *plf) - goto exit_destroy_dir; - - ab8500_bank_file = debugfs_create_file("register-bank", -- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops); -+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops); - if (!ab8500_bank_file) - goto exit_destroy_reg; - - ab8500_address_file = debugfs_create_file("register-address", -- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, -+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, - &ab8500_address_fops); - if (!ab8500_address_file) - goto exit_destroy_bank; - - ab8500_val_file = debugfs_create_file("register-value", -- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops); -+ (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops); - if (!ab8500_val_file) - goto exit_destroy_address; - -diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c -index 46b3439..16d7179 100644 ---- a/drivers/misc/ep93xx_pwm.c -+++ b/drivers/misc/ep93xx_pwm.c -@@ -249,11 +249,11 @@ static ssize_t ep93xx_pwm_set_invert(struct device *dev, - - static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); - static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); --static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO, -+static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO, - ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); --static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO, -+static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO, - ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); --static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO, -+static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO, - ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); - - static struct attribute *ep93xx_pwm_attrs[] = { -diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c -index ea5cfe2..24386a8 100644 ---- a/drivers/net/myri10ge/myri10ge.c -+++ b/drivers/net/myri10ge/myri10ge.c -@@ -3645,6 +3645,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) - dma_free_coherent(&pdev->dev, bytes, - ss->fw_stats, ss->fw_stats_bus); - ss->fw_stats = NULL; -+ netif_napi_del(&ss->napi); - } - } - kfree(mgp->ss); -diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c -index 587498e..3de98cb 100644 ---- a/drivers/net/netxen/netxen_nic_ethtool.c -+++ b/drivers/net/netxen/netxen_nic_ethtool.c -@@ -901,7 +901,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data) - struct netxen_adapter *adapter = netdev_priv(netdev); - int hw_lro; - -- if (data & ~ETH_FLAG_LRO) -+ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) - return -EINVAL; - - if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) -diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c -index 4c14510..45b2755 100644 ---- a/drivers/net/qlcnic/qlcnic_ethtool.c -+++ b/drivers/net/qlcnic/qlcnic_ethtool.c -@@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int hw_lro; - -- if (data & ~ETH_FLAG_LRO) -+ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) - return -EINVAL; - - if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) -diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c -index 39c17ce..0cdff2b 100644 ---- a/drivers/net/s2io.c -+++ b/drivers/net/s2io.c -@@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data) - int rc = 0; - int changed = 0; - -- if (data & ~ETH_FLAG_LRO) -+ if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO)) - return -EINVAL; - - if (data & ETH_FLAG_LRO) { -diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c -index 81254be..51f2ef1 100644 ---- a/drivers/net/vmxnet3/vmxnet3_ethtool.c -+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c -@@ -304,8 +304,8 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) - u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; - unsigned long flags; - -- if (data & ~ETH_FLAG_LRO) -- return -EOPNOTSUPP; -+ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) -+ return -EINVAL; - - if (lro_requested ^ lro_present) { - /* toggle the LRO feature*/ -diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c -index 1dd3a21..c5eb034 100644 ---- a/drivers/net/vxge/vxge-ethtool.c -+++ b/drivers/net/vxge/vxge-ethtool.c -@@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data) - struct vxgedev *vdev = netdev_priv(dev); - enum vxge_hw_status status; - -- if (data & ~ETH_FLAG_RXHASH) -- return -EOPNOTSUPP; -+ if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH)) -+ return -EINVAL; - - if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) - return 0; -diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c -index a09d15f..0848e09 100644 ---- a/drivers/net/wireless/ath/ath9k/main.c -+++ b/drivers/net/wireless/ath/ath9k/main.c -@@ -1063,6 +1063,8 @@ static int ath9k_start(struct ieee80211_hw *hw) - "Starting driver with initial channel: %d MHz\n", - curchan->center_freq); - -+ ath9k_ps_wakeup(sc); -+ - mutex_lock(&sc->mutex); - - if (ath9k_wiphy_started(sc)) { -@@ -1179,6 +1181,8 @@ static int ath9k_start(struct ieee80211_hw *hw) - mutex_unlock: - mutex_unlock(&sc->mutex); - -+ ath9k_ps_restore(sc); -+ - return r; - } - -diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c -index 07b7804..5c9d83b 100644 ---- a/drivers/net/wireless/ath/ath9k/xmit.c -+++ b/drivers/net/wireless/ath/ath9k/xmit.c -@@ -1699,8 +1699,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, - u8 tidno; - - spin_lock_bh(&txctl->txq->axq_lock); -- -- if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { -+ if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && -+ ieee80211_is_data_qos(hdr->frame_control)) { - tidno = ieee80211_get_qos_ctl(hdr)[0] & - IEEE80211_QOS_CTL_TID_MASK; - tid = ATH_AN_2_TID(txctl->an, tidno); -diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c -index 3d5566e..ff0f5ba 100644 ---- a/drivers/net/wireless/b43/dma.c -+++ b/drivers/net/wireless/b43/dma.c -@@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) - dmaaddr = meta->dmaaddr; - goto drop_recycle_buffer; - } -- if (unlikely(len > ring->rx_buffersize)) { -+ if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { - /* The data did not fit into one descriptor buffer - * and is split over multiple buffers. - * This should never happen, as we try to allocate buffers -diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h -index a01c210..e8a80a1 100644 ---- a/drivers/net/wireless/b43/dma.h -+++ b/drivers/net/wireless/b43/dma.h -@@ -163,7 +163,7 @@ struct b43_dmadesc_generic { - /* DMA engine tuning knobs */ - #define B43_TXRING_SLOTS 256 - #define B43_RXRING_SLOTS 64 --#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN -+#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN) - - /* Pointer poison */ - #define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) -diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h -index 9e6f313..c0cd307 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h -+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h -@@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr { - - /* 6x00 Specific */ - #define EEPROM_6000_TX_POWER_VERSION (4) --#define EEPROM_6000_EEPROM_VERSION (0x434) -+#define EEPROM_6000_EEPROM_VERSION (0x423) - - /* 6x50 Specific */ - #define EEPROM_6050_TX_POWER_VERSION (4) -diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c -index 9b344a9..e183587 100644 ---- a/drivers/net/wireless/p54/p54usb.c -+++ b/drivers/net/wireless/p54/p54usb.c -@@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { - {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ - {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ - {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ -+ {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */ - {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ - {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ - {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ -@@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { - {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ - {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ - {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ -+ {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */ - {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ - {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ - -diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c -index 54917a2..e2a528d 100644 ---- a/drivers/net/wireless/rt2x00/rt2800lib.c -+++ b/drivers/net/wireless/rt2x00/rt2800lib.c -@@ -2810,10 +2810,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) - - rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); -- rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); -- rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); -- rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); - rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); - - /* Wait for DMA, ignore error */ -@@ -2823,9 +2820,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) - rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 0); - rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); - rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); -- -- rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); -- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); - } - EXPORT_SYMBOL_GPL(rt2800_disable_radio); - -diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c -index 3b3f1e4..37a38b5 100644 ---- a/drivers/net/wireless/rt2x00/rt2800pci.c -+++ b/drivers/net/wireless/rt2x00/rt2800pci.c -@@ -475,39 +475,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) - - static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) - { -- u32 reg; -- -- rt2800_disable_radio(rt2x00dev); -- -- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); -- -- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); -- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); -- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); -- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); -- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); -- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); -- rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); -- rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); -- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); -- -- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); -- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); -+ if (rt2x00_is_soc(rt2x00dev)) { -+ rt2800_disable_radio(rt2x00dev); -+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); -+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); -+ } - } - - static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, - enum dev_state state) - { -- /* -- * Always put the device to sleep (even when we intend to wakeup!) -- * if the device is booting and wasn't asleep it will return -- * failure when attempting to wakeup. -- */ -- rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2); -- - if (state == STATE_AWAKE) { -- rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); -+ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02); - rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); -+ } else if (state == STATE_SLEEP) { -+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); -+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); -+ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01); - } - - return 0; -diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c -index 9597a03..2b77a29 100644 ---- a/drivers/net/wireless/rt2x00/rt2x00dev.c -+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c -@@ -1031,8 +1031,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) - * Stop all work. - */ - cancel_work_sync(&rt2x00dev->intf_work); -- cancel_work_sync(&rt2x00dev->rxdone_work); -- cancel_work_sync(&rt2x00dev->txdone_work); -+ if (rt2x00_is_usb(rt2x00dev)) { -+ cancel_work_sync(&rt2x00dev->rxdone_work); -+ cancel_work_sync(&rt2x00dev->txdone_work); -+ } - - /* - * Free the tx status fifo. -diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c -index e64403b..6ec06a4 100644 ---- a/drivers/net/wireless/wl12xx/testmode.c -+++ b/drivers/net/wireless/wl12xx/testmode.c -@@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) - - kfree(wl->nvs); - -- wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); -+ if (len != sizeof(struct wl1271_nvs_file)) -+ return -EINVAL; -+ -+ wl->nvs = kzalloc(len, GFP_KERNEL); - if (!wl->nvs) { - wl1271_error("could not allocate memory for the nvs file"); - ret = -ENOMEM; -diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c -index 3188cd9..bbdb4fd 100644 ---- a/drivers/pci/pcie/aspm.c -+++ b/drivers/pci/pcie/aspm.c -@@ -69,6 +69,7 @@ struct pcie_link_state { - }; - - static int aspm_disabled, aspm_force, aspm_clear_state; -+static bool aspm_support_enabled = true; - static DEFINE_MUTEX(aspm_lock); - static LIST_HEAD(link_list); - -@@ -896,6 +897,7 @@ static int __init pcie_aspm_disable(char *str) - { - if (!strcmp(str, "off")) { - aspm_disabled = 1; -+ aspm_support_enabled = false; - printk(KERN_INFO "PCIe ASPM is disabled\n"); - } else if (!strcmp(str, "force")) { - aspm_force = 1; -@@ -930,3 +932,8 @@ int pcie_aspm_enabled(void) - } - EXPORT_SYMBOL(pcie_aspm_enabled); - -+bool pcie_aspm_support_enabled(void) -+{ -+ return aspm_support_enabled; -+} -+EXPORT_SYMBOL(pcie_aspm_support_enabled); -diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c -index 38b34a7..fa54ba7 100644 ---- a/drivers/platform/x86/acer-wmi.c -+++ b/drivers/platform/x86/acer-wmi.c -@@ -222,6 +222,7 @@ struct acer_debug { - static struct rfkill *wireless_rfkill; - static struct rfkill *bluetooth_rfkill; - static struct rfkill *threeg_rfkill; -+static bool rfkill_inited; - - /* Each low-level interface must define at least some of the following */ - struct wmi_interface { -@@ -1161,9 +1162,13 @@ static int acer_rfkill_set(void *data, bool blocked) - { - acpi_status status; - u32 cap = (unsigned long)data; -- status = set_u32(!blocked, cap); -- if (ACPI_FAILURE(status)) -- return -ENODEV; -+ -+ if (rfkill_inited) { -+ status = set_u32(!blocked, cap); -+ if (ACPI_FAILURE(status)) -+ return -ENODEV; -+ } -+ - return 0; - } - -@@ -1187,14 +1192,16 @@ static struct rfkill *acer_rfkill_register(struct device *dev, - return ERR_PTR(-ENOMEM); - - status = get_device_status(&state, cap); -- if (ACPI_SUCCESS(status)) -- rfkill_init_sw_state(rfkill_dev, !state); - - err = rfkill_register(rfkill_dev); - if (err) { - rfkill_destroy(rfkill_dev); - return ERR_PTR(err); - } -+ -+ if (ACPI_SUCCESS(status)) -+ rfkill_set_sw_state(rfkill_dev, !state); -+ - return rfkill_dev; - } - -@@ -1229,6 +1236,8 @@ static int acer_rfkill_init(struct device *dev) - } - } - -+ rfkill_inited = true; -+ - schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); - - return 0; -diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c -index 37268e9..afeb546 100644 ---- a/drivers/rtc/rtc-ds1511.c -+++ b/drivers/rtc/rtc-ds1511.c -@@ -485,7 +485,7 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj, - static struct bin_attribute ds1511_nvram_attr = { - .attr = { - .name = "nvram", -- .mode = S_IRUGO | S_IWUGO, -+ .mode = S_IRUGO | S_IWUSR, - }, - .size = DS1511_RAM_MAX, - .read = ds1511_nvram_read, -diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c -index f905ecb..01543d2 100644 ---- a/drivers/scsi/scsi_transport_iscsi.c -+++ b/drivers/scsi/scsi_transport_iscsi.c -@@ -1847,7 +1847,7 @@ store_priv_session_##field(struct device *dev, \ - #define iscsi_priv_session_rw_attr(field, format) \ - iscsi_priv_session_attr_show(field, format) \ - iscsi_priv_session_attr_store(field) \ --static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ -+static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ - show_priv_session_##field, \ - store_priv_session_##field) - iscsi_priv_session_rw_attr(recovery_tmo, "%d"); -diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c -index 7f5a6a8..3b00e90 100644 ---- a/drivers/scsi/ses.c -+++ b/drivers/scsi/ses.c -@@ -390,9 +390,9 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, - len = (desc_ptr[2] << 8) + desc_ptr[3]; - /* skip past overall descriptor */ - desc_ptr += len + 4; -- if (ses_dev->page10) -- addl_desc_ptr = ses_dev->page10 + 8; - } -+ if (ses_dev->page10) -+ addl_desc_ptr = ses_dev->page10 + 8; - type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; - components = 0; - for (i = 0; i < types; i++, type_ptr += 4) { -diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c -index 45a627d..09e596a 100644 ---- a/drivers/staging/hv/channel.c -+++ b/drivers/staging/hv/channel.c -@@ -76,14 +76,14 @@ static void vmbus_setevent(struct vmbus_channel *channel) - - if (channel->offermsg.monitor_allocated) { - /* Each u32 represents 32 channels */ -- set_bit(channel->offermsg.child_relid & 31, -+ sync_set_bit(channel->offermsg.child_relid & 31, - (unsigned long *) gVmbusConnection.SendInterruptPage + - (channel->offermsg.child_relid >> 5)); - - monitorpage = gVmbusConnection.MonitorPages; - monitorpage++; /* Get the child to parent monitor page */ - -- set_bit(channel->monitor_bit, -+ sync_set_bit(channel->monitor_bit, - (unsigned long *)&monitorpage->trigger_group - [channel->monitor_grp].pending); - -@@ -99,7 +99,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) - - if (Channel->offermsg.monitor_allocated) { - /* Each u32 represents 32 channels */ -- clear_bit(Channel->offermsg.child_relid & 31, -+ sync_clear_bit(Channel->offermsg.child_relid & 31, - (unsigned long *)gVmbusConnection.SendInterruptPage + - (Channel->offermsg.child_relid >> 5)); - -@@ -107,7 +107,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) - (struct hv_monitor_page *)gVmbusConnection.MonitorPages; - monitorPage++; /* Get the child to parent monitor page */ - -- clear_bit(Channel->monitor_bit, -+ sync_clear_bit(Channel->monitor_bit, - (unsigned long *)&monitorPage->trigger_group - [Channel->monitor_grp].Pending); - } -diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c -index c2e298f..0739eb7 100644 ---- a/drivers/staging/hv/connection.c -+++ b/drivers/staging/hv/connection.c -@@ -281,7 +281,7 @@ void VmbusOnEvents(void) - for (dword = 0; dword < maxdword; dword++) { - if (recvInterruptPage[dword]) { - for (bit = 0; bit < 32; bit++) { -- if (test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) { -+ if (sync_test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) { - relid = (dword << 5) + bit; - DPRINT_DBG(VMBUS, "event detected for relid - %d", relid); - -@@ -320,7 +320,7 @@ int VmbusPostMessage(void *buffer, size_t bufferLen) - int VmbusSetEvent(u32 childRelId) - { - /* Each u32 represents 32 channels */ -- set_bit(childRelId & 31, -+ sync_set_bit(childRelId & 31, - (unsigned long *)gVmbusConnection.SendInterruptPage + - (childRelId >> 5)); - -diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c -index b41c964..f433add 100644 ---- a/drivers/staging/hv/netvsc_drv.c -+++ b/drivers/staging/hv/netvsc_drv.c -@@ -46,6 +46,7 @@ struct net_device_context { - /* point back to our device context */ - struct vm_device *device_ctx; - unsigned long avail; -+ struct work_struct work; - }; - - struct netvsc_driver_context { -@@ -225,6 +226,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, - unsigned int status) - { - struct vm_device *device_ctx = to_vm_device(device_obj); -+ struct net_device_context *ndev_ctx; - struct net_device *net = dev_get_drvdata(&device_ctx->device); - - if (!net) { -@@ -237,6 +239,8 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, - netif_carrier_on(net); - netif_wake_queue(net); - netif_notify_peers(net); -+ ndev_ctx = netdev_priv(net); -+ schedule_work(&ndev_ctx->work); - } else { - netif_carrier_off(net); - netif_stop_queue(net); -@@ -336,6 +340,25 @@ static const struct net_device_ops device_ops = { - .ndo_set_mac_address = eth_mac_addr, - }; - -+/* -+ * Send GARP packet to network peers after migrations. -+ * After Quick Migration, the network is not immediately operational in the -+ * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add -+ * another netif_notify_peers() into a scheduled work, otherwise GARP packet -+ * will not be sent after quick migration, and cause network disconnection. -+ */ -+static void netvsc_send_garp(struct work_struct *w) -+{ -+ struct net_device_context *ndev_ctx; -+ struct net_device *net; -+ -+ msleep(20); -+ ndev_ctx = container_of(w, struct net_device_context, work); -+ net = dev_get_drvdata(&ndev_ctx->device_ctx->device); -+ netif_notify_peers(net); -+} -+ -+ - static int netvsc_probe(struct device *device) - { - struct driver_context *driver_ctx = -@@ -364,6 +387,7 @@ static int netvsc_probe(struct device *device) - net_device_ctx->device_ctx = device_ctx; - net_device_ctx->avail = ring_size; - dev_set_drvdata(device, net); -+ INIT_WORK(&net_device_ctx->work, netvsc_send_garp); - - /* Notify the netvsc driver of the new device */ - ret = net_drv_obj->base.OnDeviceAdd(device_obj, &device_info); -diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c -index 84fdb64..87e6cf2 100644 ---- a/drivers/staging/hv/vmbus_drv.c -+++ b/drivers/staging/hv/vmbus_drv.c -@@ -291,7 +291,7 @@ static int vmbus_on_isr(struct hv_driver *drv) - event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; - - /* Since we are a child, we only need to check bit 0 */ -- if (test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { -+ if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { - DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]); - ret |= 0x2; - } -diff --git a/drivers/staging/hv/vmbus_private.h b/drivers/staging/hv/vmbus_private.h -index 07f6d22..c75b2d7 100644 ---- a/drivers/staging/hv/vmbus_private.h -+++ b/drivers/staging/hv/vmbus_private.h -@@ -31,6 +31,7 @@ - #include "channel_mgmt.h" - #include "ring_buffer.h" - #include -+#include - - - /* -diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h -index 6ff33e1..90e90f0 100644 ---- a/drivers/staging/iio/imu/adis16400.h -+++ b/drivers/staging/iio/imu/adis16400.h -@@ -17,7 +17,8 @@ - #ifndef SPI_ADIS16400_H_ - #define SPI_ADIS16400_H_ - --#define ADIS16400_STARTUP_DELAY 220 /* ms */ -+#define ADIS16400_STARTUP_DELAY 290 /* ms */ -+#define ADIS16400_MTEST_DELAY 90 /* ms */ - - #define ADIS16400_READ_REG(a) a - #define ADIS16400_WRITE_REG(a) ((a) | 0x80) -diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c -index cfb108a..2107edb 100644 ---- a/drivers/staging/iio/imu/adis16400_core.c -+++ b/drivers/staging/iio/imu/adis16400_core.c -@@ -93,7 +93,6 @@ static int adis16400_spi_write_reg_16(struct device *dev, - .tx_buf = st->tx + 2, - .bits_per_word = 8, - .len = 2, -- .cs_change = 1, - }, - }; - -@@ -137,7 +136,6 @@ static int adis16400_spi_read_reg_16(struct device *dev, - .rx_buf = st->rx, - .bits_per_word = 8, - .len = 2, -- .cs_change = 1, - }, - }; - -@@ -375,7 +373,7 @@ static int adis16400_self_test(struct device *dev) - dev_err(dev, "problem starting self test"); - goto err_ret; - } -- -+ msleep(ADIS16400_MTEST_DELAY); - adis16400_check_status(dev); - - err_ret: -@@ -497,12 +495,12 @@ err_ret: - _reg) - - static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_X, ADIS16400_XGYRO_OFF); --static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_XGYRO_OFF); --static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_XGYRO_OFF); -+static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_YGYRO_OFF); -+static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_ZGYRO_OFF); - - static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_X, ADIS16400_XACCL_OFF); --static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_XACCL_OFF); --static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_XACCL_OFF); -+static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_YACCL_OFF); -+static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_ZACCL_OFF); - - - static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16400_read_14bit_signed, -diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c -index 33293fb..da28cb4 100644 ---- a/drivers/staging/iio/imu/adis16400_ring.c -+++ b/drivers/staging/iio/imu/adis16400_ring.c -@@ -122,12 +122,10 @@ static int adis16400_spi_read_burst(struct device *dev, u8 *rx) - .tx_buf = st->tx, - .bits_per_word = 8, - .len = 2, -- .cs_change = 0, - }, { - .rx_buf = rx, - .bits_per_word = 8, - .len = 24, -- .cs_change = 1, - }, - }; - -@@ -162,9 +160,10 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s) - work_trigger_to_ring); - struct iio_ring_buffer *ring = st->indio_dev->ring; - -- int i = 0; -+ int i = 0, j; - s16 *data; - size_t datasize = ring->access.get_bytes_per_datum(ring); -+ unsigned long mask = ring->scan_mask; - - data = kmalloc(datasize , GFP_KERNEL); - if (data == NULL) { -@@ -174,9 +173,12 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s) - - if (ring->scan_count) - if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0) -- for (; i < ring->scan_count; i++) -+ for (; i < ring->scan_count; i++) { -+ j = __ffs(mask); -+ mask &= ~(1 << j); - data[i] = be16_to_cpup( -- (__be16 *)&(st->rx[i*2])); -+ (__be16 *)&(st->rx[j*2])); -+ } - - /* Guaranteed to be aligned with 8 byte boundary */ - if (ring->scan_timestamp) -diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c -index ae6ac82..8e60332 100644 ---- a/drivers/staging/usbip/stub_rx.c -+++ b/drivers/staging/usbip/stub_rx.c -@@ -170,33 +170,23 @@ static int tweak_set_configuration_cmd(struct urb *urb) - - static int tweak_reset_device_cmd(struct urb *urb) - { -- struct usb_ctrlrequest *req; -- __u16 value; -- __u16 index; -- int ret; -- -- req = (struct usb_ctrlrequest *) urb->setup_packet; -- value = le16_to_cpu(req->wValue); -- index = le16_to_cpu(req->wIndex); -- -- usbip_uinfo("reset_device (port %d) to %s\n", index, -- dev_name(&urb->dev->dev)); -+ struct stub_priv *priv = (struct stub_priv *) urb->context; -+ struct stub_device *sdev = priv->sdev; - -- /* all interfaces should be owned by usbip driver, so just reset it. */ -- ret = usb_lock_device_for_reset(urb->dev, NULL); -- if (ret < 0) { -- dev_err(&urb->dev->dev, "lock for reset\n"); -- return ret; -- } -- -- /* try to reset the device */ -- ret = usb_reset_device(urb->dev); -- if (ret < 0) -- dev_err(&urb->dev->dev, "device reset\n"); -+ usbip_uinfo("reset_device %s\n", dev_name(&urb->dev->dev)); - -- usb_unlock_device(urb->dev); -- -- return ret; -+ /* -+ * usb_lock_device_for_reset caused a deadlock: it causes the driver -+ * to unbind. In the shutdown the rx thread is signalled to shut down -+ * but this thread is pending in the usb_lock_device_for_reset. -+ * -+ * Instead queue the reset. -+ * -+ * Unfortunatly an existing usbip connection will be dropped due to -+ * driver unbinding. -+ */ -+ usb_queue_reset_device(sdev->interface); -+ return 0; - } - - /* -diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c -index d7136e2..b7a493c 100644 ---- a/drivers/staging/usbip/stub_tx.c -+++ b/drivers/staging/usbip/stub_tx.c -@@ -169,7 +169,6 @@ static int stub_send_ret_submit(struct stub_device *sdev) - struct stub_priv *priv, *tmp; - - struct msghdr msg; -- struct kvec iov[3]; - size_t txsize; - - size_t total_size = 0; -@@ -179,28 +178,73 @@ static int stub_send_ret_submit(struct stub_device *sdev) - struct urb *urb = priv->urb; - struct usbip_header pdu_header; - void *iso_buffer = NULL; -+ struct kvec *iov = NULL; -+ int iovnum = 0; - - txsize = 0; - memset(&pdu_header, 0, sizeof(pdu_header)); - memset(&msg, 0, sizeof(msg)); -- memset(&iov, 0, sizeof(iov)); - -- usbip_dbg_stub_tx("setup txdata urb %p\n", urb); -+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) -+ iovnum = 2 + urb->number_of_packets; -+ else -+ iovnum = 2; -+ -+ iov = kzalloc(iovnum * sizeof(struct kvec), GFP_KERNEL); - -+ if (!iov) { -+ usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); -+ return -1; -+ } -+ -+ iovnum = 0; - - /* 1. setup usbip_header */ - setup_ret_submit_pdu(&pdu_header, urb); -+ usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", -+ pdu_header.base.seqnum, urb); -+ /*usbip_dump_header(pdu_header);*/ - usbip_header_correct_endian(&pdu_header, 1); - -- iov[0].iov_base = &pdu_header; -- iov[0].iov_len = sizeof(pdu_header); -+ iov[iovnum].iov_base = &pdu_header; -+ iov[iovnum].iov_len = sizeof(pdu_header); -+ iovnum++; - txsize += sizeof(pdu_header); - - /* 2. setup transfer buffer */ -- if (usb_pipein(urb->pipe) && urb->actual_length > 0) { -- iov[1].iov_base = urb->transfer_buffer; -- iov[1].iov_len = urb->actual_length; -+ if (usb_pipein(urb->pipe) && -+ usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && -+ urb->actual_length > 0) { -+ iov[iovnum].iov_base = urb->transfer_buffer; -+ iov[iovnum].iov_len = urb->actual_length; -+ iovnum++; - txsize += urb->actual_length; -+ } else if (usb_pipein(urb->pipe) && -+ usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { -+ /* -+ * For isochronous packets: actual length is the sum of -+ * the actual length of the individual, packets, but as -+ * the packet offsets are not changed there will be -+ * padding between the packets. To optimally use the -+ * bandwidth the padding is not transmitted. -+ */ -+ -+ int i; -+ for (i = 0; i < urb->number_of_packets; i++) { -+ iov[iovnum].iov_base = urb->transfer_buffer + urb->iso_frame_desc[i].offset; -+ iov[iovnum].iov_len = urb->iso_frame_desc[i].actual_length; -+ iovnum++; -+ txsize += urb->iso_frame_desc[i].actual_length; -+ } -+ -+ if (txsize != sizeof(pdu_header) + urb->actual_length) { -+ dev_err(&sdev->interface->dev, -+ "actual length of urb (%d) does not match iso packet sizes (%d)\n", -+ urb->actual_length, txsize-sizeof(pdu_header)); -+ kfree(iov); -+ usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); -+ return -1; -+ } - } - - /* 3. setup iso_packet_descriptor */ -@@ -211,32 +255,34 @@ static int stub_send_ret_submit(struct stub_device *sdev) - if (!iso_buffer) { - usbip_event_add(&sdev->ud, - SDEV_EVENT_ERROR_MALLOC); -+ kfree(iov); - return -1; - } - -- iov[2].iov_base = iso_buffer; -- iov[2].iov_len = len; -+ iov[iovnum].iov_base = iso_buffer; -+ iov[iovnum].iov_len = len; - txsize += len; -+ iovnum++; - } - -- ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, -- 3, txsize); -+ ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, -+ iov, iovnum, txsize); - if (ret != txsize) { - dev_err(&sdev->interface->dev, - "sendmsg failed!, retval %d for %zd\n", - ret, txsize); -+ kfree(iov); - kfree(iso_buffer); - usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); - return -1; - } - -+ kfree(iov); - kfree(iso_buffer); -- usbip_dbg_stub_tx("send txdata\n"); - - total_size += txsize; - } - -- - spin_lock_irqsave(&sdev->priv_lock, flags); - - list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { -diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c -index 210ef16..2108ca1 100644 ---- a/drivers/staging/usbip/usbip_common.c -+++ b/drivers/staging/usbip/usbip_common.c -@@ -334,10 +334,11 @@ void usbip_dump_header(struct usbip_header *pdu) - usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); - break; - case USBIP_RET_SUBMIT: -- usbip_udbg("RET_SUBMIT: st %d al %u sf %d ec %d\n", -+ usbip_udbg("RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n", - pdu->u.ret_submit.status, - pdu->u.ret_submit.actual_length, - pdu->u.ret_submit.start_frame, -+ pdu->u.ret_submit.number_of_packets, - pdu->u.ret_submit.error_count); - case USBIP_RET_UNLINK: - usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); -@@ -625,6 +626,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, - rpdu->status = urb->status; - rpdu->actual_length = urb->actual_length; - rpdu->start_frame = urb->start_frame; -+ rpdu->number_of_packets = urb->number_of_packets; - rpdu->error_count = urb->error_count; - } else { - /* vhci_rx.c */ -@@ -632,6 +634,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, - urb->status = rpdu->status; - urb->actual_length = rpdu->actual_length; - urb->start_frame = rpdu->start_frame; -+ urb->number_of_packets = rpdu->number_of_packets; - urb->error_count = rpdu->error_count; - } - } -@@ -700,11 +703,13 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, - cpu_to_be32s(&pdu->status); - cpu_to_be32s(&pdu->actual_length); - cpu_to_be32s(&pdu->start_frame); -+ cpu_to_be32s(&pdu->number_of_packets); - cpu_to_be32s(&pdu->error_count); - } else { - be32_to_cpus(&pdu->status); - be32_to_cpus(&pdu->actual_length); - be32_to_cpus(&pdu->start_frame); -+ cpu_to_be32s(&pdu->number_of_packets); - be32_to_cpus(&pdu->error_count); - } - } -@@ -830,6 +835,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) - int size = np * sizeof(*iso); - int i; - int ret; -+ int total_length = 0; - - if (!usb_pipeisoc(urb->pipe)) - return 0; -@@ -859,19 +865,75 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) - return -EPIPE; - } - -+ - for (i = 0; i < np; i++) { - iso = buff + (i * sizeof(*iso)); - - usbip_iso_pakcet_correct_endian(iso, 0); - usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0); -+ total_length += urb->iso_frame_desc[i].actual_length; - } - - kfree(buff); - -+ if (total_length != urb->actual_length) { -+ dev_err(&urb->dev->dev, -+ "total length of iso packets (%d) not equal to actual length of buffer (%d)\n", -+ total_length, urb->actual_length); -+ -+ if (ud->side == USBIP_STUB) -+ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); -+ else -+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); -+ -+ return -EPIPE; -+ } -+ - return ret; - } - EXPORT_SYMBOL_GPL(usbip_recv_iso); - -+/* -+ * This functions restores the padding which was removed for optimizing -+ * the bandwidth during transfer over tcp/ip -+ * -+ * buffer and iso packets need to be stored and be in propeper endian in urb -+ * before calling this function -+ */ -+int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) -+{ -+ int np = urb->number_of_packets; -+ int i; -+ int ret; -+ int actualoffset = urb->actual_length; -+ -+ if (!usb_pipeisoc(urb->pipe)) -+ return 0; -+ -+ /* if no packets or length of data is 0, then nothing to unpack */ -+ if (np == 0 || urb->actual_length == 0) -+ return 0; -+ -+ /* -+ * if actual_length is transfer_buffer_length then no padding is -+ * present. -+ */ -+ if (urb->actual_length == urb->transfer_buffer_length) -+ return 0; -+ -+ /* -+ * loop over all packets from last to first (to prevent overwritting -+ * memory when padding) and move them into the proper place -+ */ -+ for (i = np-1; i > 0; i--) { -+ actualoffset -= urb->iso_frame_desc[i].actual_length; -+ memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset, -+ urb->transfer_buffer + actualoffset, -+ urb->iso_frame_desc[i].actual_length); -+ } -+ return ret; -+} -+EXPORT_SYMBOL_GPL(usbip_pad_iso); - - /* some members of urb must be substituted before. */ - int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) -diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h -index d280e23..baa4c09 100644 ---- a/drivers/staging/usbip/usbip_common.h -+++ b/drivers/staging/usbip/usbip_common.h -@@ -393,6 +393,8 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send); - int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); - /* some members of urb must be substituted before. */ - int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); -+/* some members of urb must be substituted before. */ -+int usbip_pad_iso(struct usbip_device *ud, struct urb *urb); - void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); - - -diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c -index bf69914..109002a 100644 ---- a/drivers/staging/usbip/vhci_rx.c -+++ b/drivers/staging/usbip/vhci_rx.c -@@ -99,6 +99,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, - if (usbip_recv_iso(ud, urb) < 0) - return; - -+ /* restore the padding in iso packets */ -+ if (usbip_pad_iso(ud, urb) < 0) -+ return; - - if (usbip_dbg_flag_vhci_rx) - usbip_dump_urb(urb); -diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c -index 596ba60..51b5551 100644 ---- a/drivers/watchdog/davinci_wdt.c -+++ b/drivers/watchdog/davinci_wdt.c -@@ -202,7 +202,6 @@ static struct miscdevice davinci_wdt_miscdev = { - static int __devinit davinci_wdt_probe(struct platform_device *pdev) - { - int ret = 0, size; -- struct resource *res; - struct device *dev = &pdev->dev; - - wdt_clk = clk_get(dev, NULL); -@@ -216,31 +215,31 @@ static int __devinit davinci_wdt_probe(struct platform_device *pdev) - - dev_info(dev, "heartbeat %d sec\n", heartbeat); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- if (res == NULL) { -+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (wdt_mem == NULL) { - dev_err(dev, "failed to get memory region resource\n"); - return -ENOENT; - } - -- size = resource_size(res); -- wdt_mem = request_mem_region(res->start, size, pdev->name); -- -- if (wdt_mem == NULL) { -+ size = resource_size(wdt_mem); -+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { - dev_err(dev, "failed to get memory region\n"); - return -ENOENT; - } - -- wdt_base = ioremap(res->start, size); -+ wdt_base = ioremap(wdt_mem->start, size); - if (!wdt_base) { - dev_err(dev, "failed to map memory region\n"); -+ release_mem_region(wdt_mem->start, size); -+ wdt_mem = NULL; - return -ENOMEM; - } - - ret = misc_register(&davinci_wdt_miscdev); - if (ret < 0) { - dev_err(dev, "cannot register misc device\n"); -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, size); -+ wdt_mem = NULL; - } else { - set_bit(WDT_DEVICE_INITED, &wdt_status); - } -@@ -253,8 +252,7 @@ static int __devexit davinci_wdt_remove(struct platform_device *pdev) - { - misc_deregister(&davinci_wdt_miscdev); - if (wdt_mem) { -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); - wdt_mem = NULL; - } - -diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c -index 3053ff0..1fe9bc5 100644 ---- a/drivers/watchdog/max63xx_wdt.c -+++ b/drivers/watchdog/max63xx_wdt.c -@@ -270,7 +270,6 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) - { - int ret = 0; - int size; -- struct resource *res; - struct device *dev = &pdev->dev; - struct max63xx_timeout *table; - -@@ -294,21 +293,19 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) - - max63xx_pdev = pdev; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- if (res == NULL) { -+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (wdt_mem == NULL) { - dev_err(dev, "failed to get memory region resource\n"); - return -ENOENT; - } - -- size = resource_size(res); -- wdt_mem = request_mem_region(res->start, size, pdev->name); -- -- if (wdt_mem == NULL) { -+ size = resource_size(wdt_mem); -+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { - dev_err(dev, "failed to get memory region\n"); - return -ENOENT; - } - -- wdt_base = ioremap(res->start, size); -+ wdt_base = ioremap(wdt_mem->start, size); - if (!wdt_base) { - dev_err(dev, "failed to map memory region\n"); - ret = -ENOMEM; -@@ -326,8 +323,8 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) - out_unmap: - iounmap(wdt_base); - out_request: -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, size); -+ wdt_mem = NULL; - - return ret; - } -@@ -336,8 +333,7 @@ static int __devexit max63xx_wdt_remove(struct platform_device *pdev) - { - misc_deregister(&max63xx_wdt_miscdev); - if (wdt_mem) { -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); - wdt_mem = NULL; - } - -diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c -index bf5b97c..8c8c7d5 100644 ---- a/drivers/watchdog/pnx4008_wdt.c -+++ b/drivers/watchdog/pnx4008_wdt.c -@@ -254,7 +254,6 @@ static struct miscdevice pnx4008_wdt_miscdev = { - static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) - { - int ret = 0, size; -- struct resource *res; - - if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) - heartbeat = DEFAULT_HEARTBEAT; -@@ -262,42 +261,42 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) - printk(KERN_INFO MODULE_NAME - "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- if (res == NULL) { -+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (wdt_mem == NULL) { - printk(KERN_INFO MODULE_NAME - "failed to get memory region resouce\n"); - return -ENOENT; - } - -- size = resource_size(res); -- wdt_mem = request_mem_region(res->start, size, pdev->name); -+ size = resource_size(wdt_mem); - -- if (wdt_mem == NULL) { -+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { - printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); - return -ENOENT; - } -- wdt_base = (void __iomem *)IO_ADDRESS(res->start); -+ wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start); - - wdt_clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(wdt_clk)) { - ret = PTR_ERR(wdt_clk); -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, size); -+ wdt_mem = NULL; - goto out; - } - - ret = clk_enable(wdt_clk); - if (ret) { -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, size); -+ wdt_mem = NULL; -+ clk_put(wdt_clk); - goto out; - } - - ret = misc_register(&pnx4008_wdt_miscdev); - if (ret < 0) { - printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, size); -+ wdt_mem = NULL; - clk_disable(wdt_clk); - clk_put(wdt_clk); - } else { -@@ -320,8 +319,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) - clk_put(wdt_clk); - - if (wdt_mem) { -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); - wdt_mem = NULL; - } - return 0; -diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c -index ae53662..8303c57 100644 ---- a/drivers/watchdog/s3c2410_wdt.c -+++ b/drivers/watchdog/s3c2410_wdt.c -@@ -402,7 +402,6 @@ static inline void s3c2410wdt_cpufreq_deregister(void) - - static int __devinit s3c2410wdt_probe(struct platform_device *pdev) - { -- struct resource *res; - struct device *dev; - unsigned int wtcon; - int started = 0; -@@ -416,20 +415,19 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) - - /* get the memory region for the watchdog timer */ - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- if (res == NULL) { -+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (wdt_mem == NULL) { - dev_err(dev, "no memory resource specified\n"); - return -ENOENT; - } - -- size = resource_size(res); -- wdt_mem = request_mem_region(res->start, size, pdev->name); -- if (wdt_mem == NULL) { -+ size = resource_size(wdt_mem); -+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) { - dev_err(dev, "failed to get memory region\n"); - return -EBUSY; - } - -- wdt_base = ioremap(res->start, size); -+ wdt_base = ioremap(wdt_mem->start, size); - if (wdt_base == NULL) { - dev_err(dev, "failed to ioremap() region\n"); - ret = -EINVAL; -@@ -524,8 +522,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) - iounmap(wdt_base); - - err_req: -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, size); -+ wdt_mem = NULL; - - return ret; - } -@@ -545,8 +543,7 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev) - - iounmap(wdt_base); - -- release_resource(wdt_mem); -- kfree(wdt_mem); -+ release_mem_region(wdt_mem->start, resource_size(wdt_mem)); - wdt_mem = NULL; - return 0; - } -diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c -index 8083728..c7ea4be 100644 ---- a/drivers/watchdog/sp5100_tco.c -+++ b/drivers/watchdog/sp5100_tco.c -@@ -42,6 +42,7 @@ - #define PFX TCO_MODULE_NAME ": " - - /* internal variables */ -+static u32 tcobase_phys; - static void __iomem *tcobase; - static unsigned int pm_iobase; - static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ -@@ -305,10 +306,18 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) - /* Low three bits of BASE0 are reserved. */ - val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); - -+ if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, -+ "SP5100 TCO")) { -+ printk(KERN_ERR PFX "mmio address 0x%04x already in use\n", -+ val); -+ goto unreg_region; -+ } -+ tcobase_phys = val; -+ - tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); - if (tcobase == 0) { - printk(KERN_ERR PFX "failed to get tcobase address\n"); -- goto unreg_region; -+ goto unreg_mem_region; - } - - /* Enable watchdog decode bit */ -@@ -346,7 +355,8 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) - /* Done */ - return 1; - -- iounmap(tcobase); -+unreg_mem_region: -+ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); - unreg_region: - release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); - exit: -@@ -401,6 +411,7 @@ static int __devinit sp5100_tco_init(struct platform_device *dev) - - exit: - iounmap(tcobase); -+ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); - release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); - return ret; - } -@@ -414,6 +425,7 @@ static void __devexit sp5100_tco_cleanup(void) - /* Deregister */ - misc_deregister(&sp5100_tco_miscdev); - iounmap(tcobase); -+ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); - release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); - } - -diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h -index 7f78cc7..bd64b41 100644 ---- a/fs/btrfs/ctree.h -+++ b/fs/btrfs/ctree.h -@@ -1284,6 +1284,8 @@ struct btrfs_root { - #define BTRFS_INODE_NOATIME (1 << 9) - #define BTRFS_INODE_DIRSYNC (1 << 10) - -+#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) -+ - /* some macros to generate set/get funcs for the struct fields. This - * assumes there is a lefoo_to_cpu for every type, so lets make a simple - * one for u8: -@@ -2355,6 +2357,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); - int btrfs_find_orphan_roots(struct btrfs_root *tree_root); - int btrfs_set_root_node(struct btrfs_root_item *item, - struct extent_buffer *node); -+void btrfs_check_and_init_root_item(struct btrfs_root_item *item); -+ - /* dir-item.c */ - int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, - struct btrfs_root *root, const char *name, -diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c -index e1aa8d6..edd9efa 100644 ---- a/fs/btrfs/disk-io.c -+++ b/fs/btrfs/disk-io.c -@@ -1184,8 +1184,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, - root->commit_root = btrfs_root_node(root); - BUG_ON(!root->node); - out: -- if (location->objectid != BTRFS_TREE_LOG_OBJECTID) -+ if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { - root->ref_cows = 1; -+ btrfs_check_and_init_root_item(&root->root_item); -+ } - - return root; - } -diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c -index 5fdb2ab..2ff51e6 100644 ---- a/fs/btrfs/ioctl.c -+++ b/fs/btrfs/ioctl.c -@@ -294,6 +294,10 @@ static noinline int create_subvol(struct btrfs_root *root, - inode_item->nbytes = cpu_to_le64(root->leafsize); - inode_item->mode = cpu_to_le32(S_IFDIR | 0755); - -+ root_item.flags = 0; -+ root_item.byte_limit = 0; -+ inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT); -+ - btrfs_set_root_bytenr(&root_item, leaf->start); - btrfs_set_root_generation(&root_item, trans->transid); - btrfs_set_root_level(&root_item, 0); -diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c -index 6a1086e..3e45c32 100644 ---- a/fs/btrfs/root-tree.c -+++ b/fs/btrfs/root-tree.c -@@ -471,3 +471,21 @@ again: - btrfs_free_path(path); - return 0; - } -+ -+/* -+ * Old btrfs forgets to init root_item->flags and root_item->byte_limit -+ * for subvolumes. To work around this problem, we steal a bit from -+ * root_item->inode_item->flags, and use it to indicate if those fields -+ * have been properly initialized. -+ */ -+void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) -+{ -+ u64 inode_flags = le64_to_cpu(root_item->inode.flags); -+ -+ if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { -+ inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; -+ root_item->inode.flags = cpu_to_le64(inode_flags); -+ root_item->flags = 0; -+ root_item->byte_limit = 0; -+ } -+} -diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c -index 3d73c8d..f3d6681 100644 ---- a/fs/btrfs/transaction.c -+++ b/fs/btrfs/transaction.c -@@ -970,6 +970,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, - record_root_in_trans(trans, root); - btrfs_set_root_last_snapshot(&root->root_item, trans->transid); - memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); -+ btrfs_check_and_init_root_item(new_root_item); - - root_flags = btrfs_root_flags(new_root_item); - if (pending->readonly) -diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c -index c1436cf..4feb78c 100644 ---- a/fs/ecryptfs/keystore.c -+++ b/fs/ecryptfs/keystore.c -@@ -1563,6 +1563,7 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, - printk(KERN_ERR "Could not find key with description: [%s]\n", - sig); - rc = process_request_key_err(PTR_ERR(*auth_tok_key)); -+ (*auth_tok_key) = NULL; - goto out; - } - (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); -diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c -index cc64fca..eb9d967 100644 ---- a/fs/ecryptfs/mmap.c -+++ b/fs/ecryptfs/mmap.c -@@ -374,6 +374,11 @@ static int ecryptfs_write_begin(struct file *file, - && (pos != 0)) - zero_user(page, 0, PAGE_CACHE_SIZE); - out: -+ if (unlikely(rc)) { -+ unlock_page(page); -+ page_cache_release(page); -+ *pagep = NULL; -+ } - return rc; - } - -diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c -index 9f7f9e4..fee51db 100644 ---- a/fs/ext4/inode.c -+++ b/fs/ext4/inode.c -@@ -5460,13 +5460,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, - /* if nrblocks are contiguous */ - if (chunk) { - /* -- * With N contiguous data blocks, it need at most -- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks -- * 2 dindirect blocks -- * 1 tindirect block -+ * With N contiguous data blocks, we need at most -+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, -+ * 2 dindirect blocks, and 1 tindirect block - */ -- indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); -- return indirects + 3; -+ return DIV_ROUND_UP(nrblocks, -+ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; - } - /* - * if nrblocks are not contiguous, worse case, each block touch -diff --git a/fs/ext4/super.c b/fs/ext4/super.c -index 4381efe..243deb0 100644 ---- a/fs/ext4/super.c -+++ b/fs/ext4/super.c -@@ -2978,6 +2978,12 @@ static int ext4_register_li_request(struct super_block *sb, - mutex_unlock(&ext4_li_info->li_list_mtx); - - sbi->s_li_request = elr; -+ /* -+ * set elr to NULL here since it has been inserted to -+ * the request_list and the removal and free of it is -+ * handled by ext4_clear_request_list from now on. -+ */ -+ elr = NULL; - - if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { - ret = ext4_run_lazyinit_thread(); -diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c -index 0c6d816..7c831a2 100644 ---- a/fs/nfsd/lockd.c -+++ b/fs/nfsd/lockd.c -@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp) - exp_readlock(); - nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); - fh_put(&fh); -- rqstp->rq_client = NULL; - exp_readunlock(); - /* We return nlm error codes as nlm doesn't know - * about nfsd, but nfsd does know about nlm.. -diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c -index f0e448a..96aaaa4 100644 ---- a/fs/nfsd/nfs4state.c -+++ b/fs/nfsd/nfs4state.c -@@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp) - - static void free_generic_stateid(struct nfs4_stateid *stp) - { -- int oflag = nfs4_access_bmap_to_omode(stp); -+ int oflag; - -- nfs4_file_put_access(stp->st_file, oflag); -- put_nfs4_file(stp->st_file); -+ if (stp->st_access_bmap) { -+ oflag = nfs4_access_bmap_to_omode(stp); -+ nfs4_file_put_access(stp->st_file, oflag); -+ put_nfs4_file(stp->st_file); -+ } - kmem_cache_free(stateid_slab, stp); - } - -diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c -index 2f560c9..f49e628 100644 ---- a/fs/nilfs2/file.c -+++ b/fs/nilfs2/file.c -@@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) - /* - * check to see if the page is mapped already (no holes) - */ -- if (PageMappedToDisk(page)) { -- unlock_page(page); -+ if (PageMappedToDisk(page)) - goto mapped; -- } -+ - if (page_has_buffers(page)) { - struct buffer_head *bh, *head; - int fully_mapped = 1; -@@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) - - if (fully_mapped) { - SetPageMappedToDisk(page); -- unlock_page(page); - goto mapped; - } - } -@@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) - return VM_FAULT_SIGBUS; - - ret = block_page_mkwrite(vma, vmf, nilfs_get_block); -- if (unlikely(ret)) { -+ if (ret != VM_FAULT_LOCKED) { - nilfs_transaction_abort(inode->i_sb); - return ret; - } -+ nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); - nilfs_transaction_commit(inode->i_sb); - - mapped: - SetPageChecked(page); - wait_on_page_writeback(page); -- return 0; -+ return VM_FAULT_LOCKED; - } - - static const struct vm_operations_struct nilfs_file_vm_ops = { -diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c -index a91b69a..0348d0c 100644 ---- a/fs/notify/inotify/inotify_fsnotify.c -+++ b/fs/notify/inotify/inotify_fsnotify.c -@@ -198,6 +198,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group) - idr_for_each(&group->inotify_data.idr, idr_callback, group); - idr_remove_all(&group->inotify_data.idr); - idr_destroy(&group->inotify_data.idr); -+ atomic_dec(&group->inotify_data.user->inotify_devs); - free_uid(group->inotify_data.user); - } - -diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c -index 4cd5d5d..aec9b4a 100644 ---- a/fs/notify/inotify/inotify_user.c -+++ b/fs/notify/inotify/inotify_user.c -@@ -290,7 +290,6 @@ static int inotify_fasync(int fd, struct file *file, int on) - static int inotify_release(struct inode *ignored, struct file *file) - { - struct fsnotify_group *group = file->private_data; -- struct user_struct *user = group->inotify_data.user; - - pr_debug("%s: group=%p\n", __func__, group); - -@@ -299,8 +298,6 @@ static int inotify_release(struct inode *ignored, struct file *file) - /* free this group, matching get was inotify_init->fsnotify_obtain_group */ - fsnotify_put_group(group); - -- atomic_dec(&user->inotify_devs); -- - return 0; - } - -@@ -697,7 +694,7 @@ retry: - return ret; - } - --static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) -+static struct fsnotify_group *inotify_new_group(unsigned int max_events) - { - struct fsnotify_group *group; - -@@ -710,8 +707,14 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign - spin_lock_init(&group->inotify_data.idr_lock); - idr_init(&group->inotify_data.idr); - group->inotify_data.last_wd = 0; -- group->inotify_data.user = user; - group->inotify_data.fa = NULL; -+ group->inotify_data.user = get_current_user(); -+ -+ if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > -+ inotify_max_user_instances) { -+ fsnotify_put_group(group); -+ return ERR_PTR(-EMFILE); -+ } - - return group; - } -@@ -721,7 +724,6 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign - SYSCALL_DEFINE1(inotify_init1, int, flags) - { - struct fsnotify_group *group; -- struct user_struct *user; - int ret; - - /* Check the IN_* constants for consistency. */ -@@ -731,31 +733,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) - if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) - return -EINVAL; - -- user = get_current_user(); -- if (unlikely(atomic_read(&user->inotify_devs) >= -- inotify_max_user_instances)) { -- ret = -EMFILE; -- goto out_free_uid; -- } -- - /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ -- group = inotify_new_group(user, inotify_max_queued_events); -- if (IS_ERR(group)) { -- ret = PTR_ERR(group); -- goto out_free_uid; -- } -- -- atomic_inc(&user->inotify_devs); -+ group = inotify_new_group(inotify_max_queued_events); -+ if (IS_ERR(group)) -+ return PTR_ERR(group); - - ret = anon_inode_getfd("inotify", &inotify_fops, group, - O_RDONLY | flags); -- if (ret >= 0) -- return ret; -+ if (ret < 0) -+ fsnotify_put_group(group); - -- fsnotify_put_group(group); -- atomic_dec(&user->inotify_devs); --out_free_uid: -- free_uid(user); - return ret; - } - -diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c -index 1fbb0e2..bbba782 100644 ---- a/fs/ocfs2/aops.c -+++ b/fs/ocfs2/aops.c -@@ -1026,6 +1026,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, - ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, - &cluster_start, &cluster_end); - -+ /* treat the write as new if the a hole/lseek spanned across -+ * the page boundary. -+ */ -+ new = new | ((i_size_read(inode) <= page_offset(page)) && -+ (page_offset(page) <= user_pos)); -+ - if (page == wc->w_target_page) { - map_from = user_pos & (PAGE_CACHE_SIZE - 1); - map_to = map_from + user_len; -diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c -index a2a622e..b59ee61 100644 ---- a/fs/quota/dquot.c -+++ b/fs/quota/dquot.c -@@ -442,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire); - */ - int dquot_commit(struct dquot *dquot) - { -- int ret = 0, ret2 = 0; -+ int ret = 0; - struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - - mutex_lock(&dqopt->dqio_mutex); -@@ -454,15 +454,10 @@ int dquot_commit(struct dquot *dquot) - spin_unlock(&dq_list_lock); - /* Inactive dquot can be only if there was error during read/init - * => we have better not writing it */ -- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { -+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); -- if (info_dirty(&dqopt->info[dquot->dq_type])) { -- ret2 = dqopt->ops[dquot->dq_type]->write_file_info( -- dquot->dq_sb, dquot->dq_type); -- } -- if (ret >= 0) -- ret = ret2; -- } -+ else -+ ret = -EIO; - out_sem: - mutex_unlock(&dqopt->dqio_mutex); - return ret; -diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c -index 0dc340a..3f79cd1 100644 ---- a/fs/squashfs/dir.c -+++ b/fs/squashfs/dir.c -@@ -172,6 +172,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) - length += sizeof(dirh); - - dir_count = le32_to_cpu(dirh.count) + 1; -+ -+ /* dir_count should never be larger than 256 */ -+ if (dir_count > 256) -+ goto failed_read; -+ - while (dir_count--) { - /* - * Read directory entry. -@@ -183,6 +188,10 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) - - size = le16_to_cpu(dire->size) + 1; - -+ /* size should never be larger than SQUASHFS_NAME_LEN */ -+ if (size > SQUASHFS_NAME_LEN) -+ goto failed_read; -+ - err = squashfs_read_metadata(inode->i_sb, dire->name, - &block, &offset, size); - if (err < 0) -diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c -index 7a9464d..5d922a6 100644 ---- a/fs/squashfs/namei.c -+++ b/fs/squashfs/namei.c -@@ -176,6 +176,11 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, - length += sizeof(dirh); - - dir_count = le32_to_cpu(dirh.count) + 1; -+ -+ /* dir_count should never be larger than 256 */ -+ if (dir_count > 256) -+ goto data_error; -+ - while (dir_count--) { - /* - * Read directory entry. -@@ -187,6 +192,10 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, - - size = le16_to_cpu(dire->size) + 1; - -+ /* size should never be larger than SQUASHFS_NAME_LEN */ -+ if (size > SQUASHFS_NAME_LEN) -+ goto data_error; -+ - err = squashfs_read_metadata(dir->i_sb, dire->name, - &block, &offset, size); - if (err < 0) -@@ -228,6 +237,9 @@ exit_lookup: - d_add(dentry, inode); - return ERR_PTR(0); - -+data_error: -+ err = -EIO; -+ - read_failure: - ERROR("Unable to read directory block [%llx:%x]\n", - squashfs_i(dir)->start + msblk->directory_table, -diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c -index 4661ae2..04ae9a5 100644 ---- a/fs/squashfs/zlib_wrapper.c -+++ b/fs/squashfs/zlib_wrapper.c -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - - #include "squashfs_fs.h" - #include "squashfs_fs_sb.h" -@@ -37,8 +38,7 @@ static void *zlib_init(struct squashfs_sb_info *dummy) - z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL); - if (stream == NULL) - goto failed; -- stream->workspace = kmalloc(zlib_inflate_workspacesize(), -- GFP_KERNEL); -+ stream->workspace = vmalloc(zlib_inflate_workspacesize()); - if (stream->workspace == NULL) - goto failed; - -@@ -56,7 +56,7 @@ static void zlib_free(void *strm) - z_stream *stream = strm; - - if (stream) -- kfree(stream->workspace); -+ vfree(stream->workspace); - kfree(stream); - } - -diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c -index 02429d8..32bcb2c 100644 ---- a/fs/ubifs/commit.c -+++ b/fs/ubifs/commit.c -@@ -521,7 +521,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) - size_t sz; - - if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX)) -- goto out; -+ return 0; - - INIT_LIST_HEAD(&list); - -diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c -index 0bee4db..5b9e985 100644 ---- a/fs/ubifs/debug.c -+++ b/fs/ubifs/debug.c -@@ -961,11 +961,39 @@ void dbg_dump_index(struct ubifs_info *c) - void dbg_save_space_info(struct ubifs_info *c) - { - struct ubifs_debug_info *d = c->dbg; -- -- ubifs_get_lp_stats(c, &d->saved_lst); -+ int freeable_cnt; - - spin_lock(&c->space_lock); -+ memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats)); -+ -+ /* -+ * We use a dirty hack here and zero out @c->freeable_cnt, because it -+ * affects the free space calculations, and UBIFS might not know about -+ * all freeable eraseblocks. Indeed, we know about freeable eraseblocks -+ * only when we read their lprops, and we do this only lazily, upon the -+ * need. So at any given point of time @c->freeable_cnt might be not -+ * exactly accurate. -+ * -+ * Just one example about the issue we hit when we did not zero -+ * @c->freeable_cnt. -+ * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the -+ * amount of free space in @d->saved_free -+ * 2. We re-mount R/W, which makes UBIFS to read the "lsave" -+ * information from flash, where we cache LEBs from various -+ * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()' -+ * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()' -+ * -> 'ubifs_get_pnode()' -> 'update_cats()' -+ * -> 'ubifs_add_to_cat()'). -+ * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt -+ * becomes %1. -+ * 4. We calculate the amount of free space when the re-mount is -+ * finished in 'dbg_check_space_info()' and it does not match -+ * @d->saved_free. -+ */ -+ freeable_cnt = c->freeable_cnt; -+ c->freeable_cnt = 0; - d->saved_free = ubifs_get_free_space_nolock(c); -+ c->freeable_cnt = freeable_cnt; - spin_unlock(&c->space_lock); - } - -@@ -982,12 +1010,15 @@ int dbg_check_space_info(struct ubifs_info *c) - { - struct ubifs_debug_info *d = c->dbg; - struct ubifs_lp_stats lst; -- long long avail, free; -+ long long free; -+ int freeable_cnt; - - spin_lock(&c->space_lock); -- avail = ubifs_calc_available(c, c->min_idx_lebs); -+ freeable_cnt = c->freeable_cnt; -+ c->freeable_cnt = 0; -+ free = ubifs_get_free_space_nolock(c); -+ c->freeable_cnt = freeable_cnt; - spin_unlock(&c->space_lock); -- free = ubifs_get_free_space(c); - - if (free != d->saved_free) { - ubifs_err("free space changed from %lld to %lld", -diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c -index 72775d3..ef5155e 100644 ---- a/fs/ubifs/lpt.c -+++ b/fs/ubifs/lpt.c -@@ -1270,10 +1270,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) - lnum = branch->lnum; - offs = branch->offs; - pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS); -- if (!pnode) { -- err = -ENOMEM; -- goto out; -- } -+ if (!pnode) -+ return -ENOMEM; -+ - if (lnum == 0) { - /* - * This pnode was not written which just means that the LEB -diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c -index 9731898..ad485b6 100644 ---- a/fs/xfs/linux-2.6/xfs_super.c -+++ b/fs/xfs/linux-2.6/xfs_super.c -@@ -1551,10 +1551,14 @@ xfs_fs_fill_super( - if (error) - goto out_free_sb; - -- error = xfs_mountfs(mp); -- if (error) -- goto out_filestream_unmount; -- -+ /* -+ * we must configure the block size in the superblock before we run the -+ * full mount process as the mount process can lookup and cache inodes. -+ * For the same reason we must also initialise the syncd and register -+ * the inode cache shrinker so that inodes can be reclaimed during -+ * operations like a quotacheck that iterate all inodes in the -+ * filesystem. -+ */ - sb->s_magic = XFS_SB_MAGIC; - sb->s_blocksize = mp->m_sb.sb_blocksize; - sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; -@@ -1562,6 +1566,16 @@ xfs_fs_fill_super( - sb->s_time_gran = 1; - set_posix_acl_flag(sb); - -+ error = xfs_syncd_init(mp); -+ if (error) -+ goto out_filestream_unmount; -+ -+ xfs_inode_shrinker_register(mp); -+ -+ error = xfs_mountfs(mp); -+ if (error) -+ goto out_syncd_stop; -+ - root = igrab(VFS_I(mp->m_rootip)); - if (!root) { - error = ENOENT; -@@ -1577,14 +1591,11 @@ xfs_fs_fill_super( - goto fail_vnrele; - } - -- error = xfs_syncd_init(mp); -- if (error) -- goto fail_vnrele; -- -- xfs_inode_shrinker_register(mp); -- - return 0; - -+ out_syncd_stop: -+ xfs_inode_shrinker_unregister(mp); -+ xfs_syncd_stop(mp); - out_filestream_unmount: - xfs_filestream_unmount(mp); - out_free_sb: -@@ -1608,6 +1619,9 @@ xfs_fs_fill_super( - } - - fail_unmount: -+ xfs_inode_shrinker_unregister(mp); -+ xfs_syncd_stop(mp); -+ - /* - * Blow away any referenced inode in the filestreams cache. - * This can and will cause log traffic as inodes go inactive -diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h -index 5ff1194..6724bf3 100644 ---- a/include/drm/drm_pciids.h -+++ b/include/drm/drm_pciids.h -@@ -458,6 +458,8 @@ - {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ -+ {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ -+ {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0, 0, 0} - - #define r128_PCI_IDS \ -diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h -index 475f8c4..381f4ce 100644 ---- a/include/linux/atmdev.h -+++ b/include/linux/atmdev.h -@@ -443,6 +443,7 @@ void atm_dev_signal_change(struct atm_dev *dev, char signal); - - void vcc_insert_socket(struct sock *sk); - -+void atm_dev_release_vccs(struct atm_dev *dev); - - /* - * This is approximately the algorithm used by alloc_skb. -diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h -index a3c1874..a04b6ce 100644 ---- a/include/linux/ethtool.h -+++ b/include/linux/ethtool.h -@@ -591,6 +591,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data); - u32 ethtool_op_get_flags(struct net_device *dev); - int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); - void ethtool_ntuple_flush(struct net_device *dev); -+bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); - - /** - * ðtool_ops - Alter and report network device settings -diff --git a/include/linux/pci.h b/include/linux/pci.h -index 559d028..6002bca 100644 ---- a/include/linux/pci.h -+++ b/include/linux/pci.h -@@ -1002,12 +1002,11 @@ extern bool pcie_ports_auto; - #endif - - #ifndef CONFIG_PCIEASPM --static inline int pcie_aspm_enabled(void) --{ -- return 0; --} -+static inline int pcie_aspm_enabled(void) { return 0; } -+static inline bool pcie_aspm_support_enabled(void) { return false; } - #else - extern int pcie_aspm_enabled(void); -+extern bool pcie_aspm_support_enabled(void); - #endif - - #ifdef CONFIG_PCIEAER -diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h -index 85867dc..bfd36ff 100644 ---- a/include/scsi/scsi_device.h -+++ b/include/scsi/scsi_device.h -@@ -461,7 +461,7 @@ static inline int scsi_device_qas(struct scsi_device *sdev) - } - static inline int scsi_device_enclosure(struct scsi_device *sdev) - { -- return sdev->inquiry[6] & (1<<6); -+ return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; - } - - static inline int scsi_device_protection(struct scsi_device *sdev) -diff --git a/include/sound/pcm.h b/include/sound/pcm.h -index e731f8d..ec26781 100644 ---- a/include/sound/pcm.h -+++ b/include/sound/pcm.h -@@ -1030,9 +1030,7 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s - #define snd_pcm_lib_mmap_iomem NULL - #endif - --int snd_pcm_lib_mmap_noncached(struct snd_pcm_substream *substream, -- struct vm_area_struct *area); --#define snd_pcm_lib_mmap_vmalloc snd_pcm_lib_mmap_noncached -+#define snd_pcm_lib_mmap_vmalloc NULL - - static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) - { -diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h -index 8031769..60f94fb 100644 ---- a/include/sound/soc-dapm.h -+++ b/include/sound/soc-dapm.h -@@ -45,25 +45,25 @@ - /* platform domain */ - #define SND_SOC_DAPM_INPUT(wname) \ - { .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0} -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM } - #define SND_SOC_DAPM_OUTPUT(wname) \ - { .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0} -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM } - #define SND_SOC_DAPM_MIC(wname, wevent) \ - { .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0, .event = wevent, \ -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ - .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} - #define SND_SOC_DAPM_HP(wname, wevent) \ - { .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0, .event = wevent, \ -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ - .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} - #define SND_SOC_DAPM_SPK(wname, wevent) \ - { .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0, .event = wevent, \ -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ - .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} - #define SND_SOC_DAPM_LINE(wname, wevent) \ - { .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0, .event = wevent, \ -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ - .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} - - /* path domain */ -@@ -177,11 +177,11 @@ - /* events that are pre and post DAPM */ - #define SND_SOC_DAPM_PRE(wname, wevent) \ - { .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0, .event = wevent, \ -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ - .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD} - #define SND_SOC_DAPM_POST(wname, wevent) \ - { .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \ -- .num_kcontrols = 0, .event = wevent, \ -+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ - .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD} - - /* stream domain */ -diff --git a/kernel/perf_event.c b/kernel/perf_event.c -index ad02fea..b2536bd 100644 ---- a/kernel/perf_event.c -+++ b/kernel/perf_event.c -@@ -62,7 +62,8 @@ static struct srcu_struct pmus_srcu; - */ - int sysctl_perf_event_paranoid __read_mostly = 1; - --int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ -+/* Minimum for 512 kiB + 1 user control page */ -+int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ - - /* - * max perf event sample rate -@@ -5916,6 +5917,11 @@ SYSCALL_DEFINE5(perf_event_open, - goto err_alloc; - } - -+ if (task) { -+ put_task_struct(task); -+ task = NULL; -+ } -+ - /* - * Look up the group leader (we will attach this event to it): - */ -diff --git a/kernel/signal.c b/kernel/signal.c -index 3175186..bf11d269 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -2423,7 +2423,7 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, - /* Not even root can pretend to send signals from the kernel. - * Nor can they impersonate a kill()/tgkill(), which adds source info. - */ -- if (info.si_code != SI_QUEUE) { -+ if (info.si_code >= 0 || info.si_code == SI_TKILL) { - /* We used to allow any < 0 si_code */ - WARN_ON_ONCE(info.si_code < 0); - return -EPERM; -@@ -2443,7 +2443,7 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) - /* Not even root can pretend to send signals from the kernel. - * Nor can they impersonate a kill()/tgkill(), which adds source info. - */ -- if (info->si_code != SI_QUEUE) { -+ if (info->si_code >= 0 || info->si_code == SI_TKILL) { - /* We used to allow any < 0 si_code */ - WARN_ON_ONCE(info->si_code < 0); - return -EPERM; -diff --git a/mm/mremap.c b/mm/mremap.c -index 1de98d4..a7c1f9f 100644 ---- a/mm/mremap.c -+++ b/mm/mremap.c -@@ -277,9 +277,16 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, - if (old_len > vma->vm_end - addr) - goto Efault; - -- if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { -- if (new_len > old_len) -+ /* Need to be careful about a growing mapping */ -+ if (new_len > old_len) { -+ unsigned long pgoff; -+ -+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) - goto Efault; -+ pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; -+ pgoff += vma->vm_pgoff; -+ if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) -+ goto Einval; - } - - if (vma->vm_flags & VM_LOCKED) { -diff --git a/net/atm/common.c b/net/atm/common.c -index 1b9c52a..22b963d 100644 ---- a/net/atm/common.c -+++ b/net/atm/common.c -@@ -252,6 +252,7 @@ void atm_dev_release_vccs(struct atm_dev *dev) - } - write_unlock_irq(&vcc_sklist_lock); - } -+EXPORT_SYMBOL(atm_dev_release_vccs); - - static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) - { -diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c -index 2862f53..d935da7 100644 ---- a/net/bluetooth/bnep/sock.c -+++ b/net/bluetooth/bnep/sock.c -@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long - sockfd_put(nsock); - return -EBADFD; - } -+ ca.device[sizeof(ca.device)-1] = 0; - - err = bnep_add_connection(&ca, nsock); - if (!err) { -diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c -index 960c6d1..926ed39 100644 ---- a/net/bluetooth/sco.c -+++ b/net/bluetooth/sco.c -@@ -703,6 +703,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user - break; - } - -+ memset(&cinfo, 0, sizeof(cinfo)); - cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; - memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); - -diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c -index 16df053..47acf4a 100644 ---- a/net/bridge/netfilter/ebtables.c -+++ b/net/bridge/netfilter/ebtables.c -@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user, - if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) - return -ENOMEM; - -+ tmp.name[sizeof(tmp.name) - 1] = 0; -+ - countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; - newinfo = vmalloc(sizeof(*newinfo) + countersize); - if (!newinfo) -diff --git a/net/core/ethtool.c b/net/core/ethtool.c -index ff23029..6c7c610 100644 ---- a/net/core/ethtool.c -+++ b/net/core/ethtool.c -@@ -146,9 +146,24 @@ u32 ethtool_op_get_flags(struct net_device *dev) - } - EXPORT_SYMBOL(ethtool_op_get_flags); - -+/* Check if device can enable (or disable) particular feature coded in "data" -+ * argument. Flags "supported" describe features that can be toggled by device. -+ * If feature can not be toggled, it state (enabled or disabled) must match -+ * hardcoded device features state, otherwise flags are marked as invalid. -+ */ -+bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) -+{ -+ u32 features = dev->features & flags_dup_features; -+ /* "data" can contain only flags_dup_features bits, -+ * see __ethtool_set_flags */ -+ -+ return (features & ~supported) != (data & ~supported); -+} -+EXPORT_SYMBOL(ethtool_invalid_flags); -+ - int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) - { -- if (data & ~supported) -+ if (ethtool_invalid_flags(dev, data, supported)) - return -EINVAL; - - dev->features = ((dev->features & ~flags_dup_features) | -diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c -index 0c28263..116d3fd 100644 ---- a/net/econet/af_econet.c -+++ b/net/econet/af_econet.c -@@ -435,10 +435,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, - udpdest.sin_addr.s_addr = htonl(network | addr.station); - } - -+ memset(&ah, 0, sizeof(ah)); - ah.port = port; - ah.cb = cb & 0x7f; - ah.code = 2; /* magic */ -- ah.pad = 0; - - /* tack our header on the front of the iovec */ - size = sizeof(struct aunhdr); -diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c -index e855fff..6d79aa1 100644 ---- a/net/ipv4/netfilter/arp_tables.c -+++ b/net/ipv4/netfilter/arp_tables.c -@@ -1065,6 +1065,7 @@ static int do_replace(struct net *net, const void __user *user, - /* overflow check */ - if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) - return -ENOMEM; -+ tmp.name[sizeof(tmp.name)-1] = 0; - - newinfo = xt_alloc_table_info(tmp.size); - if (!newinfo) -@@ -1486,6 +1487,7 @@ static int compat_do_replace(struct net *net, void __user *user, - return -ENOMEM; - if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) - return -ENOMEM; -+ tmp.name[sizeof(tmp.name)-1] = 0; - - newinfo = xt_alloc_table_info(tmp.size); - if (!newinfo) -@@ -1738,6 +1740,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len - ret = -EFAULT; - break; - } -+ rev.name[sizeof(rev.name)-1] = 0; - - try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, - rev.revision, 1, &ret), -diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c -index 652efea..92fb4c5 100644 ---- a/net/ipv4/netfilter/ip_tables.c -+++ b/net/ipv4/netfilter/ip_tables.c -@@ -387,7 +387,7 @@ ipt_do_table(struct sk_buff *skb, - verdict = (unsigned)(-v) - 1; - break; - } -- if (*stackptr == 0) { -+ if (*stackptr <= origptr) { - e = get_entry(table_base, - private->underflow[hook]); - pr_debug("Underflow (this is normal) " -@@ -427,10 +427,10 @@ ipt_do_table(struct sk_buff *skb, - /* Verdict */ - break; - } while (!acpar.hotdrop); -- xt_info_rdunlock_bh(); - pr_debug("Exiting %s; resetting sp from %u to %u\n", - __func__, *stackptr, origptr); - *stackptr = origptr; -+ xt_info_rdunlock_bh(); - #ifdef DEBUG_ALLOW_ALL - return NF_ACCEPT; - #else -@@ -1261,6 +1261,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) - /* overflow check */ - if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) - return -ENOMEM; -+ tmp.name[sizeof(tmp.name)-1] = 0; - - newinfo = xt_alloc_table_info(tmp.size); - if (!newinfo) -@@ -1805,6 +1806,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) - return -ENOMEM; - if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) - return -ENOMEM; -+ tmp.name[sizeof(tmp.name)-1] = 0; - - newinfo = xt_alloc_table_info(tmp.size); - if (!newinfo) -@@ -2034,6 +2036,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) - ret = -EFAULT; - break; - } -+ rev.name[sizeof(rev.name)-1] = 0; - - if (cmd == IPT_SO_GET_REVISION_TARGET) - target = 1; -diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c -index 1e26a48..af7dec6 100644 ---- a/net/ipv4/netfilter/ipt_CLUSTERIP.c -+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c -@@ -669,8 +669,11 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input, - char buffer[PROC_WRITELEN+1]; - unsigned long nodenum; - -- if (copy_from_user(buffer, input, PROC_WRITELEN)) -+ if (size > PROC_WRITELEN) -+ return -EIO; -+ if (copy_from_user(buffer, input, size)) - return -EFAULT; -+ buffer[size] = 0; - - if (*buffer == '+') { - nodenum = simple_strtoul(buffer+1, NULL, 10); -diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c -index 7d227c6..eadafbf 100644 ---- a/net/ipv6/netfilter/ip6_tables.c -+++ b/net/ipv6/netfilter/ip6_tables.c -@@ -410,7 +410,7 @@ ip6t_do_table(struct sk_buff *skb, - verdict = (unsigned)(-v) - 1; - break; - } -- if (*stackptr == 0) -+ if (*stackptr <= origptr) - e = get_entry(table_base, - private->underflow[hook]); - else -@@ -441,8 +441,8 @@ ip6t_do_table(struct sk_buff *skb, - break; - } while (!acpar.hotdrop); - -- xt_info_rdunlock_bh(); - *stackptr = origptr; -+ xt_info_rdunlock_bh(); - - #ifdef DEBUG_ALLOW_ALL - return NF_ACCEPT; -@@ -1274,6 +1274,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) - /* overflow check */ - if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) - return -ENOMEM; -+ tmp.name[sizeof(tmp.name)-1] = 0; - - newinfo = xt_alloc_table_info(tmp.size); - if (!newinfo) -@@ -1820,6 +1821,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) - return -ENOMEM; - if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) - return -ENOMEM; -+ tmp.name[sizeof(tmp.name)-1] = 0; - - newinfo = xt_alloc_table_info(tmp.size); - if (!newinfo) -@@ -2049,6 +2051,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) - ret = -EFAULT; - break; - } -+ rev.name[sizeof(rev.name)-1] = 0; - - if (cmd == IP6T_SO_GET_REVISION_TARGET) - target = 1; -diff --git a/net/irda/iriap.c b/net/irda/iriap.c -index 5b743bd..3647753 100644 ---- a/net/irda/iriap.c -+++ b/net/irda/iriap.c -@@ -656,10 +656,16 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, - n = 1; - - name_len = fp[n++]; -+ -+ IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;); -+ - memcpy(name, fp+n, name_len); n+=name_len; - name[name_len] = '\0'; - - attr_len = fp[n++]; -+ -+ IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;); -+ - memcpy(attr, fp+n, attr_len); n+=attr_len; - attr[attr_len] = '\0'; - -diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c -index 7c567b8..2bb2beb 100644 ---- a/net/irda/irnet/irnet_ppp.c -+++ b/net/irda/irnet/irnet_ppp.c -@@ -105,6 +105,9 @@ irnet_ctrl_write(irnet_socket * ap, - while(isspace(start[length - 1])) - length--; - -+ DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, -+ -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); -+ - /* Copy the name for later reuse */ - memcpy(ap->rname, start + 5, length - 5); - ap->rname[length - 5] = '\0'; -diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c -index 165a451..cac35ff 100644 ---- a/net/mac80211/rc80211_minstrel_ht.c -+++ b/net/mac80211/rc80211_minstrel_ht.c -@@ -639,18 +639,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; - struct ieee80211_local *local = hw_to_local(mp->hw); - u16 sta_cap = sta->ht_cap.cap; -+ int n_supported = 0; - int ack_dur; - int stbc; - int i; - - /* fall back to the old minstrel for legacy stations */ -- if (!sta->ht_cap.ht_supported) { -- msp->is_ht = false; -- memset(&msp->legacy, 0, sizeof(msp->legacy)); -- msp->legacy.r = msp->ratelist; -- msp->legacy.sample_table = msp->sample_table; -- return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); -- } -+ if (!sta->ht_cap.ht_supported) -+ goto use_legacy; - - BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != - MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); -@@ -705,7 +701,22 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, - - mi->groups[i].supported = - mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; -+ -+ if (mi->groups[i].supported) -+ n_supported++; - } -+ -+ if (!n_supported) -+ goto use_legacy; -+ -+ return; -+ -+use_legacy: -+ msp->is_ht = false; -+ memset(&msp->legacy, 0, sizeof(msp->legacy)); -+ msp->legacy.r = msp->ratelist; -+ msp->legacy.sample_table = msp->sample_table; -+ return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); - } - - static void -diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c -index c426504..604216e 100644 ---- a/net/mac80211/sta_info.c -+++ b/net/mac80211/sta_info.c -@@ -243,6 +243,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, - memcpy(sta->sta.addr, addr, ETH_ALEN); - sta->local = local; - sta->sdata = sdata; -+ sta->last_rx = jiffies; - - ewma_init(&sta->avg_signal, 1024, 8); - -diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c -index 8678823..bcd5ed6 100644 ---- a/net/netfilter/nf_conntrack_h323_asn1.c -+++ b/net/netfilter/nf_conntrack_h323_asn1.c -@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f, - CHECK_BOUND(bs, 2); - count = *bs->cur++; - count <<= 8; -- count = *bs->cur++; -+ count += *bs->cur++; - break; - case SEMI: - BYTE_ALIGN(bs); -diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c -index 1734abb..174d51c 100644 ---- a/net/rose/rose_subr.c -+++ b/net/rose/rose_subr.c -@@ -290,10 +290,15 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct * - facilities->source_ndigis = 0; - facilities->dest_ndigis = 0; - for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { -- if (pt[6] & AX25_HBIT) -+ if (pt[6] & AX25_HBIT) { -+ if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) -+ return -1; - memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); -- else -+ } else { -+ if (facilities->source_ndigis >= ROSE_MAX_DIGIS) -+ return -1; - memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); -+ } - } - } - p += l + 2; -@@ -333,6 +338,11 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac - - case 0xC0: - l = p[1]; -+ -+ /* Prevent overflows*/ -+ if (l < 10 || l > 20) -+ return -1; -+ - if (*p == FAC_CCITT_DEST_NSAP) { - memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); - memcpy(callsign, p + 12, l - 10); -@@ -373,12 +383,16 @@ int rose_parse_facilities(unsigned char *p, - switch (*p) { - case FAC_NATIONAL: /* National */ - len = rose_parse_national(p + 1, facilities, facilities_len - 1); -+ if (len < 0) -+ return 0; - facilities_len -= len + 1; - p += len + 1; - break; - - case FAC_CCITT: /* CCITT */ - len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); -+ if (len < 0) -+ return 0; - facilities_len -= len + 1; - p += len + 1; - break; -diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c -index f375dec..778e5df 100644 ---- a/net/sunrpc/auth_gss/gss_krb5_mech.c -+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c -@@ -427,7 +427,7 @@ static int - context_derive_keys_rc4(struct krb5_ctx *ctx) - { - struct crypto_hash *hmac; -- static const char sigkeyconstant[] = "signaturekey"; -+ char sigkeyconstant[] = "signaturekey"; - int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ - struct hash_desc desc; - struct scatterlist sg[1]; -diff --git a/sound/core/init.c b/sound/core/init.c -index 3e65da2..a0080aa 100644 ---- a/sound/core/init.c -+++ b/sound/core/init.c -@@ -848,6 +848,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file) - return -ENOMEM; - mfile->file = file; - mfile->disconnected_f_op = NULL; -+ INIT_LIST_HEAD(&mfile->shutdown_list); - spin_lock(&card->files_lock); - if (card->shutdown) { - spin_unlock(&card->files_lock); -@@ -883,6 +884,9 @@ int snd_card_file_remove(struct snd_card *card, struct file *file) - list_for_each_entry(mfile, &card->files_list, list) { - if (mfile->file == file) { - list_del(&mfile->list); -+ spin_lock(&shutdown_lock); -+ list_del(&mfile->shutdown_list); -+ spin_unlock(&shutdown_lock); - if (mfile->disconnected_f_op) - fops_put(mfile->disconnected_f_op); - found = mfile; -diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c -index a82e3756..64449cb 100644 ---- a/sound/core/pcm_lib.c -+++ b/sound/core/pcm_lib.c -@@ -375,6 +375,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, - } - - if (runtime->no_period_wakeup) { -+ snd_pcm_sframes_t xrun_threshold; - /* - * Without regular period interrupts, we have to check - * the elapsed time to detect xruns. -@@ -383,7 +384,8 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, - if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) - goto no_delta_check; - hdelta = jdelta - delta * HZ / runtime->rate; -- while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) { -+ xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; -+ while (hdelta > xrun_threshold) { - delta += runtime->buffer_size; - hw_base += runtime->buffer_size; - if (hw_base >= runtime->boundary) -diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c -index 4be45e7..6848dd9 100644 ---- a/sound/core/pcm_native.c -+++ b/sound/core/pcm_native.c -@@ -3201,15 +3201,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, - EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); - #endif /* SNDRV_PCM_INFO_MMAP */ - --/* mmap callback with pgprot_noncached */ --int snd_pcm_lib_mmap_noncached(struct snd_pcm_substream *substream, -- struct vm_area_struct *area) --{ -- area->vm_page_prot = pgprot_noncached(area->vm_page_prot); -- return snd_pcm_default_mmap(substream, area); --} --EXPORT_SYMBOL(snd_pcm_lib_mmap_noncached); -- - /* - * mmap DMA buffer - */ -diff --git a/sound/oss/dev_table.h b/sound/oss/dev_table.h -index b7617be..0199a31 100644 ---- a/sound/oss/dev_table.h -+++ b/sound/oss/dev_table.h -@@ -271,7 +271,7 @@ struct synth_operations - void (*reset) (int dev); - void (*hw_control) (int dev, unsigned char *event); - int (*load_patch) (int dev, int format, const char __user *addr, -- int offs, int count, int pmgr_flag); -+ int count, int pmgr_flag); - void (*aftertouch) (int dev, int voice, int pressure); - void (*controller) (int dev, int voice, int ctrl_num, int value); - void (*panning) (int dev, int voice, int value); -diff --git a/sound/oss/midi_synth.c b/sound/oss/midi_synth.c -index 3c09374..2292c23 100644 ---- a/sound/oss/midi_synth.c -+++ b/sound/oss/midi_synth.c -@@ -476,7 +476,7 @@ EXPORT_SYMBOL(midi_synth_hw_control); - - int - midi_synth_load_patch(int dev, int format, const char __user *addr, -- int offs, int count, int pmgr_flag) -+ int count, int pmgr_flag) - { - int orig_dev = synth_devs[dev]->midi_dev; - -@@ -491,33 +491,29 @@ midi_synth_load_patch(int dev, int format, const char __user *addr, - if (!prefix_cmd(orig_dev, 0xf0)) - return 0; - -+ /* Invalid patch format */ - if (format != SYSEX_PATCH) -- { --/* printk("MIDI Error: Invalid patch format (key) 0x%x\n", format);*/ - return -EINVAL; -- } -+ -+ /* Patch header too short */ - if (count < hdr_size) -- { --/* printk("MIDI Error: Patch header too short\n");*/ - return -EINVAL; -- } -+ - count -= hdr_size; - - /* -- * Copy the header from user space but ignore the first bytes which have -- * been transferred already. -+ * Copy the header from user space - */ - -- if(copy_from_user(&((char *) &sysex)[offs], &(addr)[offs], hdr_size - offs)) -+ if (copy_from_user(&sysex, addr, hdr_size)) - return -EFAULT; -- -- if (count < sysex.len) -- { --/* printk(KERN_WARNING "MIDI Warning: Sysex record too short (%d<%d)\n", count, (int) sysex.len);*/ -+ -+ /* Sysex record too short */ -+ if ((unsigned)count < (unsigned)sysex.len) - sysex.len = count; -- } -- left = sysex.len; -- src_offs = 0; -+ -+ left = sysex.len; -+ src_offs = 0; - - for (i = 0; i < left && !signal_pending(current); i++) - { -diff --git a/sound/oss/midi_synth.h b/sound/oss/midi_synth.h -index 6bc9d00..b64ddd6 100644 ---- a/sound/oss/midi_synth.h -+++ b/sound/oss/midi_synth.h -@@ -8,7 +8,7 @@ int midi_synth_open (int dev, int mode); - void midi_synth_close (int dev); - void midi_synth_hw_control (int dev, unsigned char *event); - int midi_synth_load_patch (int dev, int format, const char __user * addr, -- int offs, int count, int pmgr_flag); -+ int count, int pmgr_flag); - void midi_synth_panning (int dev, int channel, int pressure); - void midi_synth_aftertouch (int dev, int channel, int pressure); - void midi_synth_controller (int dev, int channel, int ctrl_num, int value); -diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c -index 938c48c..407cd67 100644 ---- a/sound/oss/opl3.c -+++ b/sound/oss/opl3.c -@@ -820,7 +820,7 @@ static void opl3_hw_control(int dev, unsigned char *event) - } - - static int opl3_load_patch(int dev, int format, const char __user *addr, -- int offs, int count, int pmgr_flag) -+ int count, int pmgr_flag) - { - struct sbi_instrument ins; - -@@ -830,11 +830,7 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, - return -EINVAL; - } - -- /* -- * What the fuck is going on here? We leave junk in the beginning -- * of ins and then check the field pretty close to that beginning? -- */ -- if(copy_from_user(&((char *) &ins)[offs], addr + offs, sizeof(ins) - offs)) -+ if (copy_from_user(&ins, addr, sizeof(ins))) - return -EFAULT; - - if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) -@@ -849,6 +845,10 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, - - static void opl3_panning(int dev, int voice, int value) - { -+ -+ if (voice < 0 || voice >= devc->nr_voice) -+ return; -+ - devc->voc[voice].panning = value; - } - -@@ -1066,8 +1066,15 @@ static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info - - static void opl3_setup_voice(int dev, int voice, int chn) - { -- struct channel_info *info = -- &synth_devs[dev]->chn_info[chn]; -+ struct channel_info *info; -+ -+ if (voice < 0 || voice >= devc->nr_voice) -+ return; -+ -+ if (chn < 0 || chn > 15) -+ return; -+ -+ info = &synth_devs[dev]->chn_info[chn]; - - opl3_set_instr(dev, voice, info->pgm_num); - -diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c -index 5ea1098..30bcfe4 100644 ---- a/sound/oss/sequencer.c -+++ b/sound/oss/sequencer.c -@@ -241,7 +241,7 @@ int sequencer_write(int dev, struct file *file, const char __user *buf, int coun - return -ENXIO; - - fmt = (*(short *) &event_rec[0]) & 0xffff; -- err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0); -+ err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0); - if (err < 0) - return err; - -diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c -index 537cfba..863eafe 100644 ---- a/sound/pci/ens1370.c -+++ b/sound/pci/ens1370.c -@@ -229,6 +229,7 @@ MODULE_PARM_DESC(lineio, "Line In to Rear Out (0 = auto, 1 = force)."); - #define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */ - #define ES_1371_CODEC_RDY (1<<31) /* codec ready */ - #define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */ -+#define EV_1938_CODEC_MAGIC (1<<26) - #define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */ - #define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0)) - #define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD) -@@ -603,12 +604,18 @@ static void snd_es1370_codec_write(struct snd_ak4531 *ak4531, - - #ifdef CHIP1371 - -+static inline bool is_ev1938(struct ensoniq *ensoniq) -+{ -+ return ensoniq->pci->device == 0x8938; -+} -+ - static void snd_es1371_codec_write(struct snd_ac97 *ac97, - unsigned short reg, unsigned short val) - { - struct ensoniq *ensoniq = ac97->private_data; -- unsigned int t, x; -+ unsigned int t, x, flag; - -+ flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; - mutex_lock(&ensoniq->src_mutex); - for (t = 0; t < POLL_COUNT; t++) { - if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) { -@@ -630,7 +637,8 @@ static void snd_es1371_codec_write(struct snd_ac97 *ac97, - 0x00010000) - break; - } -- outl(ES_1371_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1371_CODEC)); -+ outl(ES_1371_CODEC_WRITE(reg, val) | flag, -+ ES_REG(ensoniq, 1371_CODEC)); - /* restore SRC reg */ - snd_es1371_wait_src_ready(ensoniq); - outl(x, ES_REG(ensoniq, 1371_SMPRATE)); -@@ -647,8 +655,9 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, - unsigned short reg) - { - struct ensoniq *ensoniq = ac97->private_data; -- unsigned int t, x, fail = 0; -+ unsigned int t, x, flag, fail = 0; - -+ flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; - __again: - mutex_lock(&ensoniq->src_mutex); - for (t = 0; t < POLL_COUNT; t++) { -@@ -671,7 +680,8 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, - 0x00010000) - break; - } -- outl(ES_1371_CODEC_READS(reg), ES_REG(ensoniq, 1371_CODEC)); -+ outl(ES_1371_CODEC_READS(reg) | flag, -+ ES_REG(ensoniq, 1371_CODEC)); - /* restore SRC reg */ - snd_es1371_wait_src_ready(ensoniq); - outl(x, ES_REG(ensoniq, 1371_SMPRATE)); -@@ -683,6 +693,11 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, - /* now wait for the stinkin' data (RDY) */ - for (t = 0; t < POLL_COUNT; t++) { - if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) { -+ if (is_ev1938(ensoniq)) { -+ for (t = 0; t < 100; t++) -+ inl(ES_REG(ensoniq, CONTROL)); -+ x = inl(ES_REG(ensoniq, 1371_CODEC)); -+ } - mutex_unlock(&ensoniq->src_mutex); - return ES_1371_CODEC_READ(x); - } -diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c -index 8dabab7..7aee900 100644 ---- a/sound/pci/hda/patch_analog.c -+++ b/sound/pci/hda/patch_analog.c -@@ -4353,6 +4353,84 @@ static int ad1984a_thinkpad_init(struct hda_codec *codec) - } - - /* -+ * Precision R5500 -+ * 0x12 - HP/line-out -+ * 0x13 - speaker (mono) -+ * 0x15 - mic-in -+ */ -+ -+static struct hda_verb ad1984a_precision_verbs[] = { -+ /* Unmute main output path */ -+ {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */ -+ {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x1f}, /* 0dB */ -+ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5) + 0x17}, /* 0dB */ -+ /* Analog mixer; mute as default */ -+ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, -+ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, -+ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, -+ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, -+ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, -+ /* Select mic as input */ -+ {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1}, -+ {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x27}, /* 0dB */ -+ /* Configure as mic */ -+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, -+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */ -+ /* HP unmute */ -+ {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, -+ /* turn on EAPD */ -+ {0x13, AC_VERB_SET_EAPD_BTLENABLE, 0x02}, -+ /* unsolicited event for pin-sense */ -+ {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT}, -+ { } /* end */ -+}; -+ -+static struct snd_kcontrol_new ad1984a_precision_mixers[] = { -+ HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT), -+ HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT), -+ HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT), -+ HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT), -+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT), -+ HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT), -+ HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT), -+ HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT), -+ HDA_CODEC_VOLUME("Speaker Playback Volume", 0x13, 0x0, HDA_OUTPUT), -+ HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), -+ HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), -+ { } /* end */ -+}; -+ -+ -+/* mute internal speaker if HP is plugged */ -+static void ad1984a_precision_automute(struct hda_codec *codec) -+{ -+ unsigned int present; -+ -+ present = snd_hda_jack_detect(codec, 0x12); -+ snd_hda_codec_amp_stereo(codec, 0x13, HDA_OUTPUT, 0, -+ HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); -+} -+ -+ -+/* unsolicited event for HP jack sensing */ -+static void ad1984a_precision_unsol_event(struct hda_codec *codec, -+ unsigned int res) -+{ -+ if ((res >> 26) != AD1884A_HP_EVENT) -+ return; -+ ad1984a_precision_automute(codec); -+} -+ -+/* initialize jack-sensing, too */ -+static int ad1984a_precision_init(struct hda_codec *codec) -+{ -+ ad198x_init(codec); -+ ad1984a_precision_automute(codec); -+ return 0; -+} -+ -+ -+/* - * HP Touchsmart - * port-A (0x11) - front hp-out - * port-B (0x14) - unused -@@ -4481,6 +4559,7 @@ enum { - AD1884A_MOBILE, - AD1884A_THINKPAD, - AD1984A_TOUCHSMART, -+ AD1984A_PRECISION, - AD1884A_MODELS - }; - -@@ -4490,9 +4569,11 @@ static const char * const ad1884a_models[AD1884A_MODELS] = { - [AD1884A_MOBILE] = "mobile", - [AD1884A_THINKPAD] = "thinkpad", - [AD1984A_TOUCHSMART] = "touchsmart", -+ [AD1984A_PRECISION] = "precision", - }; - - static struct snd_pci_quirk ad1884a_cfg_tbl[] = { -+ SND_PCI_QUIRK(0x1028, 0x04ac, "Precision R5500", AD1984A_PRECISION), - SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE), - SND_PCI_QUIRK(0x103c, 0x3037, "HP 2230s", AD1884A_LAPTOP), - SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE), -@@ -4586,6 +4667,14 @@ static int patch_ad1884a(struct hda_codec *codec) - codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event; - codec->patch_ops.init = ad1984a_thinkpad_init; - break; -+ case AD1984A_PRECISION: -+ spec->mixers[0] = ad1984a_precision_mixers; -+ spec->init_verbs[spec->num_init_verbs++] = -+ ad1984a_precision_verbs; -+ spec->multiout.dig_out_nid = 0; -+ codec->patch_ops.unsol_event = ad1984a_precision_unsol_event; -+ codec->patch_ops.init = ad1984a_precision_init; -+ break; - case AD1984A_TOUCHSMART: - spec->mixers[0] = ad1984a_touchsmart_mixers; - spec->init_verbs[0] = ad1984a_touchsmart_verbs; -diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c -index 4d5004e..e33d69e 100644 ---- a/sound/pci/hda/patch_conexant.c -+++ b/sound/pci/hda/patch_conexant.c -@@ -3130,6 +3130,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { - SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), - SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), - SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), -+ SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), -+ SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), - SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), - SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ - {} -diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c -index ec0fa2d..520f94a 100644 ---- a/sound/pci/hda/patch_hdmi.c -+++ b/sound/pci/hda/patch_hdmi.c -@@ -1276,6 +1276,39 @@ static int simple_playback_pcm_prepare(struct hda_pcm_stream *hinfo, - stream_tag, format, substream); - } - -+static void nvhdmi_8ch_7x_set_info_frame_parameters(struct hda_codec *codec, -+ int channels) -+{ -+ unsigned int chanmask; -+ int chan = channels ? (channels - 1) : 1; -+ -+ switch (channels) { -+ default: -+ case 0: -+ case 2: -+ chanmask = 0x00; -+ break; -+ case 4: -+ chanmask = 0x08; -+ break; -+ case 6: -+ chanmask = 0x0b; -+ break; -+ case 8: -+ chanmask = 0x13; -+ break; -+ } -+ -+ /* Set the audio infoframe channel allocation and checksum fields. The -+ * channel count is computed implicitly by the hardware. */ -+ snd_hda_codec_write(codec, 0x1, 0, -+ Nv_VERB_SET_Channel_Allocation, chanmask); -+ -+ snd_hda_codec_write(codec, 0x1, 0, -+ Nv_VERB_SET_Info_Frame_Checksum, -+ (0x71 - chan - chanmask)); -+} -+ - static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - struct snd_pcm_substream *substream) -@@ -1294,6 +1327,10 @@ static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, - AC_VERB_SET_STREAM_FORMAT, 0); - } - -+ /* The audio hardware sends a channel count of 0x7 (8ch) when all the -+ * streams are disabled. */ -+ nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); -+ - return snd_hda_multi_out_dig_close(codec, &spec->multiout); - } - -@@ -1304,37 +1341,16 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, - struct snd_pcm_substream *substream) - { - int chs; -- unsigned int dataDCC1, dataDCC2, chan, chanmask, channel_id; -+ unsigned int dataDCC1, dataDCC2, channel_id; - int i; - - mutex_lock(&codec->spdif_mutex); - - chs = substream->runtime->channels; -- chan = chs ? (chs - 1) : 1; - -- switch (chs) { -- default: -- case 0: -- case 2: -- chanmask = 0x00; -- break; -- case 4: -- chanmask = 0x08; -- break; -- case 6: -- chanmask = 0x0b; -- break; -- case 8: -- chanmask = 0x13; -- break; -- } - dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT; - dataDCC2 = 0x2; - -- /* set the Audio InforFrame Channel Allocation */ -- snd_hda_codec_write(codec, 0x1, 0, -- Nv_VERB_SET_Channel_Allocation, chanmask); -- - /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */ - if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE)) - snd_hda_codec_write(codec, -@@ -1409,10 +1425,7 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, - } - } - -- /* set the Audio Info Frame Checksum */ -- snd_hda_codec_write(codec, 0x1, 0, -- Nv_VERB_SET_Info_Frame_Checksum, -- (0x71 - chan - chanmask)); -+ nvhdmi_8ch_7x_set_info_frame_parameters(codec, chs); - - mutex_unlock(&codec->spdif_mutex); - return 0; -@@ -1508,6 +1521,11 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec) - spec->multiout.max_channels = 8; - spec->pcm_playback = &nvhdmi_pcm_playback_8ch_7x; - codec->patch_ops = nvhdmi_patch_ops_8ch_7x; -+ -+ /* Initialize the audio infoframe channel mask and checksum to something -+ * valid */ -+ nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); -+ - return 0; - } - -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c -index c2eb6a7..e164a4b 100644 ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -1360,7 +1360,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) - case 0x10ec0883: - case 0x10ec0885: - case 0x10ec0887: -- case 0x10ec0889: -+ /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ - alc889_coef_init(codec); - break; - case 0x10ec0888: -@@ -14191,7 +14191,7 @@ static hda_nid_t alc269vb_capsrc_nids[1] = { - }; - - static hda_nid_t alc269_adc_candidates[] = { -- 0x08, 0x09, 0x07, -+ 0x08, 0x09, 0x07, 0x11, - }; - - #define alc269_modes alc260_modes -diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c -index 671ef8d..aab7765 100644 ---- a/sound/soc/imx/imx-pcm-dma-mx2.c -+++ b/sound/soc/imx/imx-pcm-dma-mx2.c -@@ -110,12 +110,12 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream, - slave_config.direction = DMA_TO_DEVICE; - slave_config.dst_addr = dma_params->dma_addr; - slave_config.dst_addr_width = buswidth; -- slave_config.dst_maxburst = dma_params->burstsize; -+ slave_config.dst_maxburst = dma_params->burstsize * buswidth; - } else { - slave_config.direction = DMA_FROM_DEVICE; - slave_config.src_addr = dma_params->dma_addr; - slave_config.src_addr_width = buswidth; -- slave_config.src_maxburst = dma_params->burstsize; -+ slave_config.src_maxburst = dma_params->burstsize * buswidth; - } - - ret = dmaengine_slave_config(iprtd->dma_chan, &slave_config); -@@ -303,6 +303,11 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = { - - static int __devinit imx_soc_platform_probe(struct platform_device *pdev) - { -+ struct imx_ssi *ssi = platform_get_drvdata(pdev); -+ -+ ssi->dma_params_tx.burstsize = 6; -+ ssi->dma_params_rx.burstsize = 4; -+ - return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); - } - -diff --git a/sound/soc/imx/imx-ssi.h b/sound/soc/imx/imx-ssi.h -index a4406a1..dc8a875 100644 ---- a/sound/soc/imx/imx-ssi.h -+++ b/sound/soc/imx/imx-ssi.h -@@ -234,7 +234,4 @@ void imx_pcm_free(struct snd_pcm *pcm); - */ - #define IMX_SSI_DMABUF_SIZE (64 * 1024) - --#define DMA_RXFIFO_BURST 0x4 --#define DMA_TXFIFO_BURST 0x6 -- - #endif /* _IMX_SSI_H */ -diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c -index 784cff5..9027da4 100644 ---- a/sound/soc/pxa/corgi.c -+++ b/sound/soc/pxa/corgi.c -@@ -310,7 +310,7 @@ static struct snd_soc_dai_link corgi_dai = { - .cpu_dai_name = "pxa2xx-i2s", - .codec_dai_name = "wm8731-hifi", - .platform_name = "pxa-pcm-audio", -- .codec_name = "wm8731-codec-0.001b", -+ .codec_name = "wm8731-codec.0-001b", - .init = corgi_wm8731_init, - .ops = &corgi_ops, - }; diff --git a/patches.qubes/nuke_balloon_minimum_target.patch b/patches.qubes/nuke_balloon_minimum_target.patch deleted file mode 100644 index 5c660ed..0000000 --- a/patches.qubes/nuke_balloon_minimum_target.patch +++ /dev/null @@ -1,12 +0,0 @@ ---- linux-2.6.34.1/drivers/xen/balloon/balloon.c.orig 2010-09-03 12:41:58.369787360 +0200 -+++ linux-2.6.34.1/drivers/xen/balloon/balloon.c 2010-09-03 12:43:27.771891174 +0200 -@@ -269,6 +269,9 @@ - min_pages = MB2PAGES(296) + (max_pfn >> 5); - #undef MB2PAGES - -+ /* We don't want any artificial limit */ -+ min_pages = 0; -+ - /* Don't enforce growth */ - return min(min_pages, curr_pages); - #ifndef CONFIG_XEN diff --git a/patches.rpmify/buildhost b/patches.rpmify/buildhost deleted file mode 100644 index 5f64d12..0000000 --- a/patches.rpmify/buildhost +++ /dev/null @@ -1,37 +0,0 @@ -From: Andreas Gruenbacher -Subject: Hide the build hostname -Patch-mainline: Never, SuSE-specific - -Instead of the real build host and user name, use "buildhost.suse.de" -and "geeko". - -Signed-off-by: Andreas Gruenbacher - - scripts/mkcompile_h | 16 +++------------- - 1 file changed, 3 insertions(+), 13 deletions(-) - ---- a/scripts/mkcompile_h -+++ b/scripts/mkcompile_h -@@ -64,19 +64,9 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" - echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" - - echo \#define LINUX_COMPILE_TIME \"`date +%T`\" -- echo \#define LINUX_COMPILE_BY \"`whoami`\" -- echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\" -- -- domain=`dnsdomainname 2> /dev/null` -- if [ -z "$domain" ]; then -- domain=`domainname 2> /dev/null` -- fi -- -- if [ -n "$domain" ]; then -- echo \#define LINUX_COMPILE_DOMAIN \"`echo $domain | $UTS_TRUNCATE`\" -- else -- echo \#define LINUX_COMPILE_DOMAIN -- fi -+ echo \#define LINUX_COMPILE_BY \"geeko\" -+ echo \#define LINUX_COMPILE_HOST \"buildhost\" -+ echo \#define LINUX_COMPILE_DOMAIN \"suse.de\" - - echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\" - ) > .tmpcompile diff --git a/patches.rpmify/cloneconfig.diff b/patches.rpmify/cloneconfig.diff deleted file mode 100644 index 4bfb615..0000000 --- a/patches.rpmify/cloneconfig.diff +++ /dev/null @@ -1,41 +0,0 @@ -From: Andreas Gruenbacher -Subject: Add ``cloneconfig'' target -Patch-mainline: Submitted 24 Feb 2011 - -Cloneconfig takes the first configuration it finds which appears -to belong to the running kernel, and configures the kernel sources -to match this configuration as closely as possible. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Jeff Mahoney ---- - - scripts/kconfig/Makefile | 17 +++++++++++++++++ - 1 file changed, 17 insertions(+) - ---- a/scripts/kconfig/Makefile -+++ b/scripts/kconfig/Makefile -@@ -99,6 +99,23 @@ PHONY += allnoconfig allyesconfig allmod - - allnoconfig allyesconfig allmodconfig alldefconfig randconfig: $(obj)/conf - $< --$@ $(Kconfig) -+ -+UNAME_RELEASE := $(shell uname -r) -+CLONECONFIG := $(firstword $(wildcard /proc/config.gz \ -+ /lib/modules/$(UNAME_RELEASE)/.config \ -+ /etc/kernel-config \ -+ /boot/config-$(UNAME_RELEASE))) -+cloneconfig: $(obj)/conf -+ $(Q)case "$(CLONECONFIG)" in \ -+ '') echo -e "The configuration of the running" \ -+ "kernel could not be determined\n"; \ -+ false ;; \ -+ *.gz) gzip -cd $(CLONECONFIG) > .config.running ;; \ -+ *) cat $(CLONECONFIG) > .config.running ;; \ -+ esac && \ -+ echo -e "Cloning configuration file $(CLONECONFIG)\n" -+ $(Q)$< --defconfig=.config.running arch/$(SRCARCH)/Kconfig -+ - - PHONY += listnewconfig oldnoconfig savedefconfig defconfig - diff --git a/patches.rpmify/dw_spi-fix-PPC-build.patch b/patches.rpmify/dw_spi-fix-PPC-build.patch deleted file mode 100644 index dfaf46d..0000000 --- a/patches.rpmify/dw_spi-fix-PPC-build.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Jiri Slaby -Date: Fri, 18 Mar 2011 10:37:34 +0100 -Subject: SPI: dw_spi, fix PPC build -Patch-mainline: submitted Mar 18 - -Currently, build on PPC dies with: -In file included from drivers/spi/dw_spi_mmio.c:16: -include/linux/spi/dw_spi.h:147: error: field ‘tx_sgl’ has incomplete type -include/linux/spi/dw_spi.h:149: error: field ‘rx_sgl’ has incomplete type - -Add linux/scatterlist.h include to dw_spi.h, because we need to know -the contents of the structure. - -Signed-off-by: Jiri Slaby -Cc: David Brownell -Cc: Grant Likely -Cc: Benjamin Herrenschmidt -Cc: Paul Mackerras ---- - include/linux/spi/dw_spi.h | 1 + - 1 files changed, 1 insertions(+), 0 deletions(-) - -diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h -index 6cd10f6..fb0bce5 100644 ---- a/include/linux/spi/dw_spi.h -+++ b/include/linux/spi/dw_spi.h -@@ -2,6 +2,7 @@ - #define DW_SPI_HEADER_H - - #include -+#include - - /* Bit fields in CTRLR0 */ - #define SPI_DFS_OFFSET 0 --- -1.7.4.1 - diff --git a/patches.rpmify/firmware-path b/patches.rpmify/firmware-path deleted file mode 100644 index 1aa3d9f..0000000 --- a/patches.rpmify/firmware-path +++ /dev/null @@ -1,26 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] firmware: Allow release-specific firmware dir -Patch-mainline: not yet - - Every kernel package trying to provide files under /lib/firmware runs - into problems really quickly with multiple kernels installed. - - This patch moves them to /lib/firmware/$KERNELRELEASE. udev v127's - firmware.sh looks there first before falling back to /lib/firmware. - -Signed-off-by: Jeff Mahoney ---- - Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/Makefile -+++ b/Makefile -@@ -975,7 +975,7 @@ depend dep: - - # --------------------------------------------------------------------------- - # Firmware install --INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware -+INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware/$(KERNELRELEASE) - export INSTALL_FW_PATH - - PHONY += firmware_install diff --git a/patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning b/patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning deleted file mode 100644 index 88149bb..0000000 --- a/patches.rpmify/ia64-mca-fix-cast-from-integer-to-pointer-warning +++ /dev/null @@ -1,35 +0,0 @@ -From: Jeff Mahoney -Subject: ia64/mca: Fix cast from integer to pointer warning -Patch-mainline: Submitted 24 Feb 2011 - - ia64_mca_cpu_init has a void *data local variable that is assigned - the value from either __get_free_pages() or mca_bootmem(). The problem - is that __get_free_pages returns an unsigned long and mca_bootmem, via - alloc_bootmem(), returns a void *. format_mca_init_stack takes the void *, - and it's also used with __pa(), but that casts it to long anyway. - - This results in the following build warning: - - arch/ia64/kernel/mca.c:1898: warning: assignment makes pointer from - integer without a cast - - This patch casts the return of __get_free_pages to a void * to avoid - the warning. - -Signed-off-by: Jeff Mahoney ---- - arch/ia64/kernel/mca.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/arch/ia64/kernel/mca.c -+++ b/arch/ia64/kernel/mca.c -@@ -1859,7 +1859,8 @@ ia64_mca_cpu_init(void *cpu_data) - data = mca_bootmem(); - first_time = 0; - } else -- data = __get_free_pages(GFP_KERNEL, get_order(sz)); -+ data = (void *)__get_free_pages(GFP_KERNEL, -+ get_order(sz)); - if (!data) - panic("Could not allocate MCA memory for cpu %d\n", - cpu); diff --git a/patches.rpmify/qla4xx-missing-readq-definition b/patches.rpmify/qla4xx-missing-readq-definition deleted file mode 100644 index 2fa247f..0000000 --- a/patches.rpmify/qla4xx-missing-readq-definition +++ /dev/null @@ -1,38 +0,0 @@ -From: Jeff Mahoney -Subject: qla4xxx: add workaround for missing readq/writeq -Patch-mainline: submitted Sep 21, 2010 - - Commit f4f5df23 added support for ISP82XX devices but unconditionally - used readq/writeq without defining it for architectures that don't - support it natively. - - This patch copies the readq/writeq definitions from the qla2xxx driver - to allow the code to build on e.g. ppc32 hardware. - -Signed-off-by: Jeff Mahoney ---- - drivers/scsi/qla4xxx/ql4_nx.h | 15 +++++++++++++++ - 1 file changed, 15 insertions(+) - ---- a/drivers/scsi/qla4xxx/ql4_nx.h -+++ b/drivers/scsi/qla4xxx/ql4_nx.h -@@ -776,4 +776,19 @@ struct crb_addr_pair { - #define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) - #define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) - -+#ifndef readq -+static inline u64 readq(void __iomem *addr) -+{ -+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL); -+} -+#endif -+ -+#ifndef writeq -+static inline void writeq(u64 val, void __iomem *addr) -+{ -+ writel(((u32) (val)), (addr)); -+ writel(((u32) (val >> 32)), (addr + 4)); -+} -+#endif -+ - #endif diff --git a/patches.rpmify/rpm-kernel-config b/patches.rpmify/rpm-kernel-config deleted file mode 100644 index 63e25ac..0000000 --- a/patches.rpmify/rpm-kernel-config +++ /dev/null @@ -1,22 +0,0 @@ -From: Andreas Gruenbacher -Subject: Add the CONFIG_SUSE_KERNEL option -Patch-mainline: Never, SuSE-specific - -CONFIG_SUSE_KERNEL is set automatically in our config files. It must -still be added in kconfig so that the option does not disappear -whenever the kernel is reconfigured (e.g., ``make oldconfig''). - -Signed-off-by: Andreas Gruenbacher - - init/Kconfig | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1,3 +1,6 @@ -+config SUSE_KERNEL -+ def_bool y -+ - config ARCH - string - option env="ARCH" diff --git a/patches.rpmify/split-package b/patches.rpmify/split-package deleted file mode 100644 index 2c636eb..0000000 --- a/patches.rpmify/split-package +++ /dev/null @@ -1,33 +0,0 @@ -From: Jeff Mahoney -Subject: Add SPLIT_PACKAGE option -Patch-mainline: Never - - This patch adds a SPLIT_PACKAGE option which allows the packager to - make decisions on a per-config basis. - -Signed-off-by: Jeff Mahoney ---- - init/Kconfig | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1,6 +1,18 @@ config SUSE_KERNEL - config SUSE_KERNEL - def_bool y - -+config SPLIT_PACKAGE -+ bool "Split the kernel package into multiple RPMs" -+ depends on SUSE_KERNEL && MODULES -+ help -+ This is an option used by the kernel packaging infrastructure -+ to split kernel modules into different packages. It isn't used -+ by the kernel itself, but allows the the packager to make -+ decisions on a per-config basis. -+ -+ If you aren't packaging a kernel for distribution, it's safe to -+ say n. -+ - config ARCH - string - option env="ARCH" diff --git a/patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings b/patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings deleted file mode 100644 index 2af1b42..0000000 --- a/patches.rpmify/tioca-fix-assignment-from-incompatible-pointer-warnings +++ /dev/null @@ -1,23 +0,0 @@ -From: Jeff Mahoney -Subject: tioca: Fix assignment from incompatible pointer warnings -Patch-mainline: Submitted 24 Feb 2011 - - The prototype for sn_pci_provider->{dma_map,dma_map_consistent} expects - an unsigned long instead of a u64. - -Signed-off-by: Jeff Mahoney ---- - arch/ia64/sn/pci/tioca_provider.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/ia64/sn/pci/tioca_provider.c -+++ b/arch/ia64/sn/pci/tioca_provider.c -@@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dm - * use the GART mapped mode. - */ - static u64 --tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) -+tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) - { - u64 mapaddr; - diff --git a/patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch b/patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch deleted file mode 100644 index 792ea23..0000000 --- a/patches.suse/0001-vfs-Hooks-for-more-fine-grained-directory-permission.patch +++ /dev/null @@ -1,221 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:44 +0530 -Subject: [PATCH 01/16] vfs: Hooks for more fine-grained directory permission checking -Patch-mainline: not yet - -Add iop->may_create and iop->may_delete for overriding the POSIX file -permission checks when creating and deleting files. File systems can -implement these hooks to support permission models which use different -rules for file creation and deletion. - -When these hooks are not used, the vfs behavior remains unchanged. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/namei.c | 82 ++++++++++++++++++++++++++++++++++++++++++----------- - include/linux/fs.h | 4 ++ - 2 files changed, 69 insertions(+), 17 deletions(-) - ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -1283,6 +1283,26 @@ static inline int check_sticky(struct in - } - - /* -+ * Do the directory specific tests of inode_permission() and call the -+ * may_delete inode operation. The may_delete inode operation must do the -+ * sticky check when needed. -+ */ -+static int may_delete_iop(struct inode *dir, struct inode *inode, int replace) -+{ -+ int error; -+ -+ if (IS_RDONLY(dir)) -+ return -EROFS; -+ if (IS_IMMUTABLE(dir)) -+ return -EACCES; -+ error = dir->i_op->may_delete(dir, inode, replace); -+ if (!error) -+ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ -+ return error; -+} -+ -+/* - * Check whether we can remove a link victim from directory dir, check - * whether the type of victim is right. - * 1. We can't do it if dir is read-only (done in permission()) -@@ -1301,7 +1321,8 @@ static inline int check_sticky(struct in - * 10. We don't allow removal of NFS sillyrenamed files; it's handled by - * nfs_async_unlink(). - */ --static int may_delete(struct inode *dir,struct dentry *victim,int isdir) -+static int may_delete(struct inode *dir, struct dentry *victim, -+ int isdir, int replace) - { - int error; - -@@ -1310,14 +1331,19 @@ static int may_delete(struct inode *dir, - - BUG_ON(victim->d_parent->d_inode != dir); - audit_inode_child(victim, dir); -- -- error = inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ if (dir->i_op->may_delete) -+ error = may_delete_iop(dir, victim->d_inode, replace); -+ else { -+ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ if (!error && check_sticky(dir, victim->d_inode)) -+ error = -EPERM; -+ } - if (error) - return error; - if (IS_APPEND(dir)) - return -EPERM; -- if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| -- IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) -+ if (IS_APPEND(victim->d_inode) || IS_IMMUTABLE(victim->d_inode) || -+ IS_SWAPFILE(victim->d_inode)) - return -EPERM; - if (isdir) { - if (!S_ISDIR(victim->d_inode->i_mode)) -@@ -1333,6 +1359,25 @@ static int may_delete(struct inode *dir, - return 0; - } - -+/* -+ * Do the directory specific tests of inode_permission() and call the -+ * may_create inode operation. -+ */ -+static int may_create_iop(struct inode *dir, int isdir) -+{ -+ int error; -+ -+ if (IS_RDONLY(dir)) -+ return -EROFS; -+ if (IS_IMMUTABLE(dir)) -+ return -EACCES; -+ error = dir->i_op->may_create(dir, isdir); -+ if (!error) -+ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ -+ return error; -+} -+ - /* Check whether we can create an object with dentry child in directory - * dir. - * 1. We can't do it if child already exists (open has special treatment for -@@ -1341,13 +1386,16 @@ static int may_delete(struct inode *dir, - * 3. We should have write and exec permissions on dir - * 4. We can't do it if dir is immutable (done in permission()) - */ --static inline int may_create(struct inode *dir, struct dentry *child) -+static inline int may_create(struct inode *dir, struct dentry *child, int isdir) - { - if (child->d_inode) - return -EEXIST; - if (IS_DEADDIR(dir)) - return -ENOENT; -- return inode_permission(dir, MAY_WRITE | MAY_EXEC); -+ if (dir->i_op->may_create) -+ return may_create_iop(dir, isdir); -+ else -+ return inode_permission(dir, MAY_WRITE | MAY_EXEC); - } - - /* -@@ -1395,7 +1443,7 @@ void unlock_rename(struct dentry *p1, st - int vfs_create(struct inode *dir, struct dentry *dentry, int mode, - struct nameidata *nd) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 0); - - if (error) - return error; -@@ -1957,7 +2005,7 @@ EXPORT_SYMBOL_GPL(lookup_create); - - int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 0); - - if (error) - return error; -@@ -2061,7 +2109,7 @@ SYSCALL_DEFINE3(mknod, const char __user - - int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 1); - - if (error) - return error; -@@ -2151,7 +2199,7 @@ void dentry_unhash(struct dentry *dentry - - int vfs_rmdir(struct inode *dir, struct dentry *dentry) - { -- int error = may_delete(dir, dentry, 1); -+ int error = may_delete(dir, dentry, 1, 0); - - if (error) - return error; -@@ -2238,7 +2286,7 @@ SYSCALL_DEFINE1(rmdir, const char __user - - int vfs_unlink(struct inode *dir, struct dentry *dentry) - { -- int error = may_delete(dir, dentry, 0); -+ int error = may_delete(dir, dentry, 0, 0); - - if (error) - return error; -@@ -2346,7 +2394,7 @@ SYSCALL_DEFINE1(unlink, const char __use - - int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) - { -- int error = may_create(dir, dentry); -+ int error = may_create(dir, dentry, 0); - - if (error) - return error; -@@ -2419,7 +2467,7 @@ int vfs_link(struct dentry *old_dentry, - if (!inode) - return -ENOENT; - -- error = may_create(dir, new_dentry); -+ error = may_create(dir, new_dentry, S_ISDIR(inode->i_mode)); - if (error) - return error; - -@@ -2630,14 +2678,14 @@ int vfs_rename(struct inode *old_dir, st - if (old_dentry->d_inode == new_dentry->d_inode) - return 0; - -- error = may_delete(old_dir, old_dentry, is_dir); -+ error = may_delete(old_dir, old_dentry, is_dir, 0); - if (error) - return error; - - if (!new_dentry->d_inode) -- error = may_create(new_dir, new_dentry); -+ error = may_create(new_dir, new_dentry, is_dir); - else -- error = may_delete(new_dir, new_dentry, is_dir); -+ error = may_delete(new_dir, new_dentry, is_dir, 1); - if (error) - return error; - ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1542,6 +1542,10 @@ struct inode_operations { - void (*truncate_range)(struct inode *, loff_t, loff_t); - int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, - u64 len); -+ int (*may_create) (struct inode *, int); -+ int (*may_delete) (struct inode *, struct inode *, int); -+ -+ - } ____cacheline_aligned; - - struct seq_file; diff --git a/patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch b/patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch deleted file mode 100644 index 2146eef..0000000 --- a/patches.suse/0002-vfs-Add-generic-IS_ACL-test-for-acl-support.patch +++ /dev/null @@ -1,73 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:45 +0530 -Subject: [PATCH 02/16] vfs: Add generic IS_ACL() test for acl support -Patch-mainline: not yet - -When IS_POSIXACL() is true, the vfs does not apply the umask. Other acl -models will need the same exception, so introduce a separate IS_ACL() -test. - -The IS_POSIX_ACL() test is still needed so that nfsd can determine when -the underlying file system supports POSIX ACLs (as opposed to some other -kind). - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/namei.c | 6 +++--- - include/linux/fs.h | 8 +++++++- - 2 files changed, 10 insertions(+), 4 deletions(-) - ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -2215,7 +2215,7 @@ static int __open_namei_create(struct na - int error; - struct dentry *dir = nd->path.dentry; - -- if (!IS_POSIXACL(dir->d_inode)) -+ if (!IS_ACL(dir->d_inode)) - mode &= ~current_umask(); - error = security_path_mknod(&nd->path, path->dentry, mode, 0); - if (error) -@@ -2749,7 +2749,7 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const - error = PTR_ERR(dentry); - goto out_unlock; - } -- if (!IS_POSIXACL(nd.path.dentry->d_inode)) -+ if (!IS_ACL(nd.path.dentry->d_inode)) - mode &= ~current_umask(); - error = may_mknod(mode); - if (error) -@@ -2826,7 +2826,7 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const - if (IS_ERR(dentry)) - goto out_unlock; - -- if (!IS_POSIXACL(nd.path.dentry->d_inode)) -+ if (!IS_ACL(nd.path.dentry->d_inode)) - mode &= ~current_umask(); - error = mnt_want_write(nd.path.mnt); - if (error) ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -205,7 +205,7 @@ struct inodes_stat_t { - #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. - MS_VERBOSE is deprecated. */ - #define MS_SILENT 32768 --#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ -+#define MS_POSIXACL (1<<16) /* Supports POSIX ACLs */ - #define MS_UNBINDABLE (1<<17) /* change to unbindable */ - #define MS_PRIVATE (1<<18) /* change to private */ - #define MS_SLAVE (1<<19) /* change to slave */ -@@ -280,6 +280,12 @@ struct inodes_stat_t { - #define IS_IMA(inode) ((inode)->i_flags & S_IMA) - #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) - -+/* -+ * IS_ACL() tells the VFS to not apply the umask -+ * and use iop->check_acl for acl permission checks when defined. -+ */ -+#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL) -+ - /* the read-only stuff doesn't really belong here, but any other place is - probably as bad and I don't want to create yet another include file. */ - diff --git a/patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch b/patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch deleted file mode 100644 index 9707339..0000000 --- a/patches.suse/0003-vfs-Add-IS_RICHACL-test-for-richacl-support.patch +++ /dev/null @@ -1,42 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:45 +0530 -Subject: [PATCH 03/16] vfs: Add IS_RICHACL() test for richacl support -Patch-mainline: not yet - -Introduce a new MS_RICHACL super-block flag and a new IS_RICHACL() test -which file systems like nfs can use. IS_ACL() is true if IS_POSIXACL() -or IS_RICHACL() is true. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - include/linux/fs.h | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -217,6 +217,7 @@ struct inodes_stat_t { - #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ - #define MS_I_VERSION (1<<23) /* Update inode I_version field */ - #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ -+#define MS_RICHACL (1<<25) /* Supports richacls */ - #define MS_BORN (1<<29) - #define MS_ACTIVE (1<<30) - #define MS_NOUSER (1<<31) -@@ -273,6 +274,7 @@ struct inodes_stat_t { - #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) - #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) - #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) -+#define IS_RICHACL(inode) __IS_FLG(inode, MS_RICHACL) - - #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) - #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) -@@ -283,7 +285,7 @@ struct inodes_stat_t { - * IS_ACL() tells the VFS to not apply the umask - * and use iop->check_acl for acl permission checks when defined. - */ --#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL) -+#define IS_ACL(inode) __IS_FLG(inode, MS_POSIXACL | MS_RICHACL) - - /* the read-only stuff doesn't really belong here, but any other place is - probably as bad and I don't want to create yet another include file. */ diff --git a/patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch b/patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch deleted file mode 100644 index 67206b5..0000000 --- a/patches.suse/0004-richacl-In-memory-representation-and-helper-function.patch +++ /dev/null @@ -1,415 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:46 +0530 -Subject: [PATCH 04/16] richacl: In-memory representation and helper functions -Patch-mainline: not yet - -A richacl consists of an NFSv4 acl and an owner, group, and other mask. -These three masks correspond to the owner, group, and other file -permission bits, but they contain NFSv4 permissions instead of POSIX -permissions. - -Each entry in the NFSv4 acl applies to the file owner (OWNER@), the -owning group (GROUP@), literally everyone (EVERYONE@), or to a specific -uid or gid. - -As in the standard POSIX file permission model, each process is the -owner, group, or other file class. A richacl grants a requested access -only if the NFSv4 acl in the richacl grants the access (according to the -NFSv4 permission check algorithm), and the file mask that applies to the -process includes the requested permissions. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/Kconfig | 4 - fs/Makefile | 3 - fs/richacl_base.c | 109 +++++++++++++++++++++ - include/linux/richacl.h | 245 ++++++++++++++++++++++++++++++++++++++++++++++++ - 4 files changed, 361 insertions(+) - create mode 100644 fs/richacl_base.c - create mode 100644 include/linux/richacl.h - ---- a/fs/Kconfig -+++ b/fs/Kconfig -@@ -39,6 +39,9 @@ config FS_POSIX_ACL - source "fs/reiserfs/Kconfig" - source "fs/jfs/Kconfig" - -+config FS_RICHACL -+ bool -+ - source "fs/xfs/Kconfig" - source "fs/gfs2/Kconfig" - source "fs/ocfs2/Kconfig" ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -51,6 +51,9 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl. - obj-$(CONFIG_NFS_COMMON) += nfs_common/ - obj-$(CONFIG_GENERIC_ACL) += generic_acl.o - -+obj-$(CONFIG_FS_RICHACL) += richacl.o -+richacl-y := richacl_base.o -+ - obj-y += quota/ - - obj-$(CONFIG_PROC_FS) += proc/ ---- /dev/null -+++ b/fs/richacl_base.c -@@ -0,0 +1,109 @@ -+/* -+ * Copyright (C) 2006, 2010 Novell, Inc. -+ * Written by Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+MODULE_LICENSE("GPL"); -+ -+/* -+ * Special e_who identifiers: ACEs which have ACE4_SPECIAL_WHO set in -+ * ace->e_flags use these constants in ace->u.e_who. -+ * -+ * For efficiency, we compare pointers instead of comparing strings. -+ */ -+const char richace_owner_who[] = "OWNER@"; -+EXPORT_SYMBOL_GPL(richace_owner_who); -+const char richace_group_who[] = "GROUP@"; -+EXPORT_SYMBOL_GPL(richace_group_who); -+const char richace_everyone_who[] = "EVERYONE@"; -+EXPORT_SYMBOL_GPL(richace_everyone_who); -+ -+/** -+ * richacl_alloc - allocate a richacl -+ * @count: number of entries -+ */ -+struct richacl * -+richacl_alloc(int count) -+{ -+ size_t size = sizeof(struct richacl) + count * sizeof(struct richace); -+ struct richacl *acl = kzalloc(size, GFP_KERNEL); -+ -+ if (acl) { -+ atomic_set(&acl->a_refcount, 1); -+ acl->a_count = count; -+ } -+ return acl; -+} -+EXPORT_SYMBOL_GPL(richacl_alloc); -+ -+/** -+ * richacl_clone - create a copy of a richacl -+ */ -+static struct richacl * -+richacl_clone(const struct richacl *acl) -+{ -+ int count = acl->a_count; -+ size_t size = sizeof(struct richacl) + count * sizeof(struct richace); -+ struct richacl *dup = kmalloc(size, GFP_KERNEL); -+ -+ if (dup) { -+ memcpy(dup, acl, size); -+ atomic_set(&dup->a_refcount, 1); -+ } -+ return dup; -+} -+ -+/** -+ * richace_is_same_identifier - are both identifiers the same? -+ */ -+int -+richace_is_same_identifier(const struct richace *a, const struct richace *b) -+{ -+#define WHO_FLAGS (ACE4_SPECIAL_WHO | ACE4_IDENTIFIER_GROUP) -+ if ((a->e_flags & WHO_FLAGS) != (b->e_flags & WHO_FLAGS)) -+ return 0; -+ if (a->e_flags & ACE4_SPECIAL_WHO) -+ return a->u.e_who == b->u.e_who; -+ else -+ return a->u.e_id == b->u.e_id; -+#undef WHO_FLAGS -+} -+ -+/** -+ * richacl_set_who - set a special who value -+ * @ace: acl entry -+ * @who: who value to use -+ */ -+int -+richace_set_who(struct richace *ace, const char *who) -+{ -+ if (!strcmp(who, richace_owner_who)) -+ who = richace_owner_who; -+ else if (!strcmp(who, richace_group_who)) -+ who = richace_group_who; -+ else if (!strcmp(who, richace_everyone_who)) -+ who = richace_everyone_who; -+ else -+ return -EINVAL; -+ -+ ace->u.e_who = who; -+ ace->e_flags |= ACE4_SPECIAL_WHO; -+ ace->e_flags &= ~ACE4_IDENTIFIER_GROUP; -+ return 0; -+} -+EXPORT_SYMBOL_GPL(richace_set_who); ---- /dev/null -+++ b/include/linux/richacl.h -@@ -0,0 +1,245 @@ -+/* -+ * Copyright (C) 2006, 2010 Novell, Inc. -+ * Written by Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#ifndef __RICHACL_H -+#define __RICHACL_H -+#include -+ -+struct richace { -+ unsigned short e_type; -+ unsigned short e_flags; -+ unsigned int e_mask; -+ union { -+ unsigned int e_id; -+ const char *e_who; -+ } u; -+}; -+ -+struct richacl { -+ atomic_t a_refcount; -+ unsigned int a_owner_mask; -+ unsigned int a_group_mask; -+ unsigned int a_other_mask; -+ unsigned short a_count; -+ unsigned short a_flags; -+ struct richace a_entries[0]; -+}; -+ -+#define richacl_for_each_entry(_ace, _acl) \ -+ for (_ace = _acl->a_entries; \ -+ _ace != _acl->a_entries + _acl->a_count; \ -+ _ace++) -+ -+#define richacl_for_each_entry_reverse(_ace, _acl) \ -+ for (_ace = _acl->a_entries + _acl->a_count - 1; \ -+ _ace != _acl->a_entries - 1; \ -+ _ace--) -+ -+/* e_type values */ -+#define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000 -+#define ACE4_ACCESS_DENIED_ACE_TYPE 0x0001 -+/*#define ACE4_SYSTEM_AUDIT_ACE_TYPE 0x0002*/ -+/*#define ACE4_SYSTEM_ALARM_ACE_TYPE 0x0003*/ -+ -+/* e_flags bitflags */ -+#define ACE4_FILE_INHERIT_ACE 0x0001 -+#define ACE4_DIRECTORY_INHERIT_ACE 0x0002 -+#define ACE4_NO_PROPAGATE_INHERIT_ACE 0x0004 -+#define ACE4_INHERIT_ONLY_ACE 0x0008 -+/*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/ -+/*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/ -+#define ACE4_IDENTIFIER_GROUP 0x0040 -+/* in-memory representation only */ -+#define ACE4_SPECIAL_WHO 0x4000 -+ -+#define ACE4_VALID_FLAGS ( \ -+ ACE4_FILE_INHERIT_ACE | \ -+ ACE4_DIRECTORY_INHERIT_ACE | \ -+ ACE4_NO_PROPAGATE_INHERIT_ACE | \ -+ ACE4_INHERIT_ONLY_ACE | \ -+ ACE4_IDENTIFIER_GROUP) -+ -+/* e_mask bitflags */ -+#define ACE4_READ_DATA 0x00000001 -+#define ACE4_LIST_DIRECTORY 0x00000001 -+#define ACE4_WRITE_DATA 0x00000002 -+#define ACE4_ADD_FILE 0x00000002 -+#define ACE4_APPEND_DATA 0x00000004 -+#define ACE4_ADD_SUBDIRECTORY 0x00000004 -+#define ACE4_READ_NAMED_ATTRS 0x00000008 -+#define ACE4_WRITE_NAMED_ATTRS 0x00000010 -+#define ACE4_EXECUTE 0x00000020 -+#define ACE4_DELETE_CHILD 0x00000040 -+#define ACE4_READ_ATTRIBUTES 0x00000080 -+#define ACE4_WRITE_ATTRIBUTES 0x00000100 -+#define ACE4_WRITE_RETENTION 0x00000200 -+#define ACE4_WRITE_RETENTION_HOLD 0x00000400 -+#define ACE4_DELETE 0x00010000 -+#define ACE4_READ_ACL 0x00020000 -+#define ACE4_WRITE_ACL 0x00040000 -+#define ACE4_WRITE_OWNER 0x00080000 -+#define ACE4_SYNCHRONIZE 0x00100000 -+ -+/* Valid ACE4_* flags for directories and non-directories */ -+#define ACE4_VALID_MASK ( \ -+ ACE4_READ_DATA | ACE4_LIST_DIRECTORY | \ -+ ACE4_WRITE_DATA | ACE4_ADD_FILE | \ -+ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \ -+ ACE4_READ_NAMED_ATTRS | \ -+ ACE4_WRITE_NAMED_ATTRS | \ -+ ACE4_EXECUTE | \ -+ ACE4_DELETE_CHILD | \ -+ ACE4_READ_ATTRIBUTES | \ -+ ACE4_WRITE_ATTRIBUTES | \ -+ ACE4_WRITE_RETENTION | \ -+ ACE4_WRITE_RETENTION_HOLD | \ -+ ACE4_DELETE | \ -+ ACE4_READ_ACL | \ -+ ACE4_WRITE_ACL | \ -+ ACE4_WRITE_OWNER | \ -+ ACE4_SYNCHRONIZE) -+ -+/* These permissions are always allowed no matter what the acl says. */ -+#define ACE4_POSIX_ALWAYS_ALLOWED ( \ -+ ACE4_SYNCHRONIZE | \ -+ ACE4_READ_ATTRIBUTES | \ -+ ACE4_READ_ACL) -+ -+/** -+ * richacl_get - grab another reference to a richacl handle -+ */ -+static inline struct richacl * -+richacl_get(struct richacl *acl) -+{ -+ if (acl) -+ atomic_inc(&acl->a_refcount); -+ return acl; -+} -+ -+/** -+ * richacl_put - free a richacl handle -+ */ -+static inline void -+richacl_put(struct richacl *acl) -+{ -+ if (acl && atomic_dec_and_test(&acl->a_refcount)) -+ kfree(acl); -+} -+ -+/* -+ * Special e_who identifiers: we use these pointer values in comparisons -+ * instead of doing a strcmp. -+ */ -+extern const char richace_owner_who[]; -+extern const char richace_group_who[]; -+extern const char richace_everyone_who[]; -+ -+/** -+ * richace_is_owner - check if @ace is an OWNER@ entry -+ */ -+static inline int -+richace_is_owner(const struct richace *ace) -+{ -+ return (ace->e_flags & ACE4_SPECIAL_WHO) && -+ ace->u.e_who == richace_owner_who; -+} -+ -+/** -+ * richace_is_group - check if @ace is a GROUP@ entry -+ */ -+static inline int -+richace_is_group(const struct richace *ace) -+{ -+ return (ace->e_flags & ACE4_SPECIAL_WHO) && -+ ace->u.e_who == richace_group_who; -+} -+ -+/** -+ * richace_is_everyone - check if @ace is an EVERYONE@ entry -+ */ -+static inline int -+richace_is_everyone(const struct richace *ace) -+{ -+ return (ace->e_flags & ACE4_SPECIAL_WHO) && -+ ace->u.e_who == richace_everyone_who; -+} -+ -+/** -+ * richace_is_unix_id - check if @ace applies to a specific uid or gid -+ */ -+static inline int -+richace_is_unix_id(const struct richace *ace) -+{ -+ return !(ace->e_flags & ACE4_SPECIAL_WHO); -+} -+ -+/** -+ * richace_is_inherit_only - check if @ace is for inheritance only -+ * -+ * ACEs with the %ACE4_INHERIT_ONLY_ACE flag set have no effect during -+ * permission checking. -+ */ -+static inline int -+richace_is_inherit_only(const struct richace *ace) -+{ -+ return ace->e_flags & ACE4_INHERIT_ONLY_ACE; -+} -+ -+/** -+ * richace_is_inheritable - check if @ace is inheritable -+ */ -+static inline int -+richace_is_inheritable(const struct richace *ace) -+{ -+ return ace->e_flags & (ACE4_FILE_INHERIT_ACE | -+ ACE4_DIRECTORY_INHERIT_ACE); -+} -+ -+/** -+ * richace_clear_inheritance_flags - clear all inheritance flags in @ace -+ */ -+static inline void -+richace_clear_inheritance_flags(struct richace *ace) -+{ -+ ace->e_flags &= ~(ACE4_FILE_INHERIT_ACE | -+ ACE4_DIRECTORY_INHERIT_ACE | -+ ACE4_NO_PROPAGATE_INHERIT_ACE | -+ ACE4_INHERIT_ONLY_ACE); -+} -+ -+/** -+ * richace_is_allow - check if @ace is an %ALLOW type entry -+ */ -+static inline int -+richace_is_allow(const struct richace *ace) -+{ -+ return ace->e_type == ACE4_ACCESS_ALLOWED_ACE_TYPE; -+} -+ -+/** -+ * richace_is_deny - check if @ace is a %DENY type entry -+ */ -+static inline int -+richace_is_deny(const struct richace *ace) -+{ -+ return ace->e_type == ACE4_ACCESS_DENIED_ACE_TYPE; -+} -+ -+extern struct richacl *richacl_alloc(int); -+extern int richace_is_same_identifier(const struct richace *, -+ const struct richace *); -+extern int richace_set_who(struct richace *, const char *); -+ -+#endif /* __RICHACL_H */ diff --git a/patches.suse/0005-richacl-Permission-mapping-functions.patch b/patches.suse/0005-richacl-Permission-mapping-functions.patch deleted file mode 100644 index cb7adc8..0000000 --- a/patches.suse/0005-richacl-Permission-mapping-functions.patch +++ /dev/null @@ -1,167 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:47 +0530 -Subject: [PATCH 05/16] richacl: Permission mapping functions -Patch-mainline: not yet - -We need to map from POSIX permissions to NFSv4 permissions when a -chmod() is done, from NFSv4 permissions to POSIX permissions when an acl -is set (which implicitly sets the file permission bits), and from the -MAY_READ/MAY_WRITE/MAY_EXEC/MAY_APPEND flags to NFSv4 permissions when -doing an access check in a richacl. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/richacl.h | 27 +++++++++++++ - 2 files changed, 125 insertions(+) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -69,6 +69,104 @@ richacl_clone(const struct richacl *acl) - } - - /** -+ * richacl_mask_to_mode - compute the file permission bits which correspond to @mask -+ * @mask: %ACE4_* permission mask -+ * -+ * See richacl_masks_to_mode(). -+ */ -+static int -+richacl_mask_to_mode(unsigned int mask) -+{ -+ int mode = 0; -+ -+ if (mask & ACE4_POSIX_MODE_READ) -+ mode |= MAY_READ; -+ if (mask & ACE4_POSIX_MODE_WRITE) -+ mode |= MAY_WRITE; -+ if (mask & ACE4_POSIX_MODE_EXEC) -+ mode |= MAY_EXEC; -+ -+ return mode; -+} -+ -+/** -+ * richacl_masks_to_mode - compute the file permission bits from the file masks -+ * -+ * When setting a richacl, we set the file permission bits to indicate maximum -+ * permissions: for example, we set the Write permission when a mask contains -+ * ACE4_APPEND_DATA even if it does not also contain ACE4_WRITE_DATA. -+ * -+ * Permissions which are not in ACE4_POSIX_MODE_READ, ACE4_POSIX_MODE_WRITE, or -+ * ACE4_POSIX_MODE_EXEC cannot be represented in the file permission bits. -+ * Such permissions can still be effective, but not for new files or after a -+ * chmod(), and only if they were set explicitly, for example, by setting a -+ * richacl. -+ */ -+int -+richacl_masks_to_mode(const struct richacl *acl) -+{ -+ return richacl_mask_to_mode(acl->a_owner_mask) << 6 | -+ richacl_mask_to_mode(acl->a_group_mask) << 3 | -+ richacl_mask_to_mode(acl->a_other_mask); -+} -+EXPORT_SYMBOL_GPL(richacl_masks_to_mode); -+ -+/** -+ * richacl_mode_to_mask - compute a file mask from the lowest three mode bits -+ * -+ * When the file permission bits of a file are set with chmod(), this specifies -+ * the maximum permissions that processes will get. All permissions beyond -+ * that will be removed from the file masks, and become ineffective. -+ * -+ * We also add in the permissions which are always allowed no matter what the -+ * acl says. -+ */ -+unsigned int -+richacl_mode_to_mask(mode_t mode) -+{ -+ unsigned int mask = ACE4_POSIX_ALWAYS_ALLOWED; -+ -+ if (mode & MAY_READ) -+ mask |= ACE4_POSIX_MODE_READ; -+ if (mode & MAY_WRITE) -+ mask |= ACE4_POSIX_MODE_WRITE; -+ if (mode & MAY_EXEC) -+ mask |= ACE4_POSIX_MODE_EXEC; -+ -+ return mask; -+} -+ -+/** -+ * richacl_want_to_mask - convert the iop->permission want argument to a mask -+ * @want: @want argument of the permission inode operation -+ * -+ * When checking for append, @want is (MAY_WRITE | MAY_APPEND). -+ * -+ * Richacls use the iop->may_create and iop->may_delete hooks which are -+ * used for checking if creating and deleting files is allowed. These hooks do -+ * not use richacl_want_to_mask(), so we do not have to deal with mapping -+ * MAY_WRITE to ACE4_ADD_FILE, ACE4_ADD_SUBDIRECTORY, and ACE4_DELETE_CHILD -+ * here. -+ */ -+unsigned int -+richacl_want_to_mask(int want) -+{ -+ unsigned int mask = 0; -+ -+ if (want & MAY_READ) -+ mask |= ACE4_READ_DATA; -+ if (want & MAY_APPEND) -+ mask |= ACE4_APPEND_DATA; -+ else if (want & MAY_WRITE) -+ mask |= ACE4_WRITE_DATA; -+ if (want & MAY_EXEC) -+ mask |= ACE4_EXECUTE; -+ -+ return mask; -+} -+EXPORT_SYMBOL_GPL(richacl_want_to_mask); -+ -+/** - * richace_is_same_identifier - are both identifiers the same? - */ - int ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -111,6 +111,30 @@ struct richacl { - ACE4_WRITE_OWNER | \ - ACE4_SYNCHRONIZE) - -+/* -+ * The POSIX permissions are supersets of the following NFSv4 permissions: -+ * -+ * - MAY_READ maps to READ_DATA or LIST_DIRECTORY, depending on the type -+ * of the file system object. -+ * -+ * - MAY_WRITE maps to WRITE_DATA or ACE4_APPEND_DATA for files, and to -+ * ADD_FILE, ACE4_ADD_SUBDIRECTORY, or ACE4_DELETE_CHILD for directories. -+ * -+ * - MAY_EXECUTE maps to ACE4_EXECUTE. -+ * -+ * (Some of these NFSv4 permissions have the same bit values.) -+ */ -+#define ACE4_POSIX_MODE_READ ( \ -+ ACE4_READ_DATA | ACE4_LIST_DIRECTORY) -+#define ACE4_POSIX_MODE_WRITE ( \ -+ ACE4_WRITE_DATA | ACE4_ADD_FILE | \ -+ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \ -+ ACE4_DELETE_CHILD) -+#define ACE4_POSIX_MODE_EXEC ( \ -+ ACE4_EXECUTE) -+#define ACE4_POSIX_MODE_ALL (ACE4_POSIX_MODE_READ | ACE4_POSIX_MODE_WRITE | \ -+ ACE4_POSIX_MODE_EXEC) -+ - /* These permissions are always allowed no matter what the acl says. */ - #define ACE4_POSIX_ALWAYS_ALLOWED ( \ - ACE4_SYNCHRONIZE | \ -@@ -241,5 +265,8 @@ extern struct richacl *richacl_alloc(int - extern int richace_is_same_identifier(const struct richace *, - const struct richace *); - extern int richace_set_who(struct richace *, const char *); -+extern int richacl_masks_to_mode(const struct richacl *); -+extern unsigned int richacl_mode_to_mask(mode_t); -+extern unsigned int richacl_want_to_mask(int); - - #endif /* __RICHACL_H */ diff --git a/patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch b/patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch deleted file mode 100644 index de094b3..0000000 --- a/patches.suse/0006-richacl-Compute-maximum-file-masks-from-an-acl.patch +++ /dev/null @@ -1,164 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:47 +0530 -Subject: [PATCH 06/16] richacl: Compute maximum file masks from an acl -Patch-mainline: not yet - -Compute upper bound owner, group, and other file masks with as few -permissions as possible without denying any permissions that the NFSv4 -acl in a richacl grants. - -This algorithm is used when a file inherits an acl at create time and -when an acl is set via a mechanism that does not specify file modes -(such as via nfsd). When user-space sets an acl, the file masks are -passed in as part of the xattr. - -When setting a richacl, the file masks determine what the file -permission bits will be set to; see richacl_masks_to_mode(). - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/richacl.h | 1 - 2 files changed, 126 insertions(+) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -205,3 +205,128 @@ richace_set_who(struct richace *ace, con - return 0; - } - EXPORT_SYMBOL_GPL(richace_set_who); -+ -+/** -+ * richacl_allowed_to_who - mask flags allowed to a specific who value -+ * -+ * Computes the mask values allowed to a specific who value, taking -+ * EVERYONE@ entries into account. -+ */ -+static unsigned int richacl_allowed_to_who(struct richacl *acl, -+ struct richace *who) -+{ -+ struct richace *ace; -+ unsigned int allowed = 0; -+ -+ richacl_for_each_entry_reverse(ace, acl) { -+ if (richace_is_inherit_only(ace)) -+ continue; -+ if (richace_is_same_identifier(ace, who) || -+ richace_is_everyone(ace)) { -+ if (richace_is_allow(ace)) -+ allowed |= ace->e_mask; -+ else if (richace_is_deny(ace)) -+ allowed &= ~ace->e_mask; -+ } -+ } -+ return allowed; -+} -+ -+/** -+ * richacl_group_class_allowed - maximum permissions the group class is allowed -+ * -+ * See richacl_compute_max_masks(). -+ */ -+static unsigned int richacl_group_class_allowed(struct richacl *acl) -+{ -+ struct richace *ace; -+ unsigned int everyone_allowed = 0, group_class_allowed = 0; -+ int had_group_ace = 0; -+ -+ richacl_for_each_entry_reverse(ace, acl) { -+ if (richace_is_inherit_only(ace) || -+ richace_is_owner(ace)) -+ continue; -+ -+ if (richace_is_everyone(ace)) { -+ if (richace_is_allow(ace)) -+ everyone_allowed |= ace->e_mask; -+ else if (richace_is_deny(ace)) -+ everyone_allowed &= ~ace->e_mask; -+ } else { -+ group_class_allowed |= -+ richacl_allowed_to_who(acl, ace); -+ -+ if (richace_is_group(ace)) -+ had_group_ace = 1; -+ } -+ } -+ if (!had_group_ace) -+ group_class_allowed |= everyone_allowed; -+ return group_class_allowed; -+} -+ -+/** -+ * richacl_compute_max_masks - compute upper bound masks -+ * -+ * Computes upper bound owner, group, and other masks so that none of -+ * the mask flags allowed by the acl are disabled (for any choice of the -+ * file owner or group membership). -+ */ -+void richacl_compute_max_masks(struct richacl *acl) -+{ -+ unsigned int gmask = ~0; -+ struct richace *ace; -+ -+ /* -+ * @gmask contains all permissions which the group class is ever -+ * allowed. We use it to avoid adding permissions to the group mask -+ * from everyone@ allow aces which the group class is always denied -+ * through other aces. For example, the following acl would otherwise -+ * result in a group mask or rw: -+ * -+ * group@:w::deny -+ * everyone@:rw::allow -+ * -+ * Avoid computing @gmask for acls which do not include any group class -+ * deny aces: in such acls, the group class is never denied any -+ * permissions from everyone@ allow aces. -+ */ -+ -+restart: -+ acl->a_owner_mask = 0; -+ acl->a_group_mask = 0; -+ acl->a_other_mask = 0; -+ -+ richacl_for_each_entry_reverse(ace, acl) { -+ if (richace_is_inherit_only(ace)) -+ continue; -+ -+ if (richace_is_owner(ace)) { -+ if (richace_is_allow(ace)) -+ acl->a_owner_mask |= ace->e_mask; -+ else if (richace_is_deny(ace)) -+ acl->a_owner_mask &= ~ace->e_mask; -+ } else if (richace_is_everyone(ace)) { -+ if (richace_is_allow(ace)) { -+ acl->a_owner_mask |= ace->e_mask; -+ acl->a_group_mask |= ace->e_mask & gmask; -+ acl->a_other_mask |= ace->e_mask; -+ } else if (richace_is_deny(ace)) { -+ acl->a_owner_mask &= ~ace->e_mask; -+ acl->a_group_mask &= ~ace->e_mask; -+ acl->a_other_mask &= ~ace->e_mask; -+ } -+ } else { -+ if (richace_is_allow(ace)) { -+ acl->a_owner_mask |= ace->e_mask & gmask; -+ acl->a_group_mask |= ace->e_mask & gmask; -+ } else if (richace_is_deny(ace) && gmask == ~0) { -+ gmask = richacl_group_class_allowed(acl); -+ if (likely(gmask != ~0)) /* should always be true */ -+ goto restart; -+ } -+ } -+ } -+} -+EXPORT_SYMBOL_GPL(richacl_compute_max_masks); ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -268,5 +268,6 @@ extern int richace_set_who(struct richac - extern int richacl_masks_to_mode(const struct richacl *); - extern unsigned int richacl_mode_to_mask(mode_t); - extern unsigned int richacl_want_to_mask(int); -+extern void richacl_compute_max_masks(struct richacl *); - - #endif /* __RICHACL_H */ diff --git a/patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch b/patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch deleted file mode 100644 index 281d4cb..0000000 --- a/patches.suse/0007-richacl-Update-the-file-masks-in-chmod.patch +++ /dev/null @@ -1,79 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:47 +0530 -Subject: [PATCH 07/16] richacl: Update the file masks in chmod() -Patch-mainline: not yet - -Doing a chmod() sets the file mode, which includes the file permission -bits. When a file has a richacl, the permissions that the richacl -grants need to be limited to what the new file permission bits allow. - -This is done by setting the file masks in the richacl to what the file -permission bits map to. The richacl access check algorithm takes the -file masks into account, which ensures that the richacl cannot grant too -many permissions. - -It is possible to explicitly add permissions to the file masks which go -beyond what the file permission bits can grant (like the ACE4_WRITE_ACL -permission). The POSIX.1 standard calls this an alternate file access -control mechanism. A subsequent chmod() would ensure that those -permissions are disabled again. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 37 +++++++++++++++++++++++++++++++++++++ - include/linux/richacl.h | 1 + - 2 files changed, 38 insertions(+) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -330,3 +330,40 @@ restart: - } - } - EXPORT_SYMBOL_GPL(richacl_compute_max_masks); -+ -+/** -+ * richacl_chmod - update the file masks to reflect the new mode -+ * @mode: new file permission bits -+ * -+ * Return a copy of @acl where the file masks have been replaced by the file -+ * masks corresponding to the file permission bits in @mode, or returns @acl -+ * itself if the file masks are already up to date. Takes over a reference -+ * to @acl. -+ */ -+struct richacl * -+richacl_chmod(struct richacl *acl, mode_t mode) -+{ -+ unsigned int owner_mask, group_mask, other_mask; -+ struct richacl *clone; -+ -+ owner_mask = richacl_mode_to_mask(mode >> 6); -+ group_mask = richacl_mode_to_mask(mode >> 3); -+ other_mask = richacl_mode_to_mask(mode); -+ -+ if (acl->a_owner_mask == owner_mask && -+ acl->a_group_mask == group_mask && -+ acl->a_other_mask == other_mask) -+ return acl; -+ -+ clone = richacl_clone(acl); -+ richacl_put(acl); -+ if (!clone) -+ return ERR_PTR(-ENOMEM); -+ -+ clone->a_owner_mask = owner_mask; -+ clone->a_group_mask = group_mask; -+ clone->a_other_mask = other_mask; -+ -+ return clone; -+} -+EXPORT_SYMBOL_GPL(richacl_chmod); ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -269,5 +269,6 @@ extern int richacl_masks_to_mode(const s - extern unsigned int richacl_mode_to_mask(mode_t); - extern unsigned int richacl_want_to_mask(int); - extern void richacl_compute_max_masks(struct richacl *); -+extern struct richacl *richacl_chmod(struct richacl *, mode_t); - - #endif /* __RICHACL_H */ diff --git a/patches.suse/0008-richacl-Permission-check-algorithm.patch b/patches.suse/0008-richacl-Permission-check-algorithm.patch deleted file mode 100644 index 8d5eb7f..0000000 --- a/patches.suse/0008-richacl-Permission-check-algorithm.patch +++ /dev/null @@ -1,130 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:48 +0530 -Subject: [PATCH 08/16] richacl: Permission check algorithm -Patch-mainline: not yet - -As in the standard POSIX file permission model, each process is the -owner, group, or other file class. A process is - - - in the owner file class if it owns the file, - - in the group file class if it is in the file's owning group or it - matches any of the user or group entries, and - - in the other file class otherwise. - -Each file class is associated with a file mask. - -A richacl grants a requested access if the NFSv4 acl in the richacl -grants the requested permissions (according to the NFSv4 permission -check algorithm) and the file mask that applies to the process includes -the requested permissions. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/richacl.h | 2 + - 2 files changed, 89 insertions(+) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -367,3 +367,90 @@ richacl_chmod(struct richacl *acl, mode_ - return clone; - } - EXPORT_SYMBOL_GPL(richacl_chmod); -+ -+/** -+ * richacl_permission - richacl permission check algorithm -+ * @inode: inode to check -+ * @acl: rich acl of the inode -+ * @mask: requested access (ACE4_* bitmask) -+ * -+ * Checks if the current process is granted @mask flags in @acl. -+ */ -+int -+richacl_permission(struct inode *inode, const struct richacl *acl, -+ unsigned int mask) -+{ -+ const struct richace *ace; -+ unsigned int file_mask, requested = mask, denied = 0; -+ int in_owning_group = in_group_p(inode->i_gid); -+ int in_owner_or_group_class = in_owning_group; -+ -+ /* -+ * A process is -+ * - in the owner file class if it owns the file, -+ * - in the group file class if it is in the file's owning group or -+ * it matches any of the user or group entries, and -+ * - in the other file class otherwise. -+ */ -+ -+ /* -+ * Check if the acl grants the requested access and determine which -+ * file class the process is in. -+ */ -+ richacl_for_each_entry(ace, acl) { -+ unsigned int ace_mask = ace->e_mask; -+ -+ if (richace_is_inherit_only(ace)) -+ continue; -+ if (richace_is_owner(ace)) { -+ if (current_fsuid() != inode->i_uid) -+ continue; -+ goto is_owner; -+ } else if (richace_is_group(ace)) { -+ if (!in_owning_group) -+ continue; -+ } else if (richace_is_unix_id(ace)) { -+ if (ace->e_flags & ACE4_IDENTIFIER_GROUP) { -+ if (!in_group_p(ace->u.e_id)) -+ continue; -+ } else { -+ if (current_fsuid() != ace->u.e_id) -+ continue; -+ } -+ } else -+ goto is_everyone; -+ -+is_owner: -+ /* The process is in the owner or group file class. */ -+ in_owner_or_group_class = 1; -+ -+is_everyone: -+ /* Check which mask flags the ACE allows or denies. */ -+ if (richace_is_deny(ace)) -+ denied |= ace_mask & mask; -+ mask &= ~ace_mask; -+ -+ /* -+ * Keep going until we know which file class -+ * the process is in. -+ */ -+ if (!mask && in_owner_or_group_class) -+ break; -+ } -+ denied |= mask; -+ -+ /* -+ * The file class a process is in determines which file mask applies. -+ * Check if that file mask also grants the requested access. -+ */ -+ if (current_fsuid() == inode->i_uid) -+ file_mask = acl->a_owner_mask; -+ else if (in_owner_or_group_class) -+ file_mask = acl->a_group_mask; -+ else -+ file_mask = acl->a_other_mask; -+ denied |= requested & ~file_mask; -+ -+ return denied ? -EACCES : 0; -+} -+EXPORT_SYMBOL_GPL(richacl_permission); ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -270,5 +270,7 @@ extern unsigned int richacl_mode_to_mask - extern unsigned int richacl_want_to_mask(int); - extern void richacl_compute_max_masks(struct richacl *); - extern struct richacl *richacl_chmod(struct richacl *, mode_t); -+extern int richacl_permission(struct inode *, const struct richacl *, -+ unsigned int); - - #endif /* __RICHACL_H */ diff --git a/patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch b/patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch deleted file mode 100644 index 4f96841..0000000 --- a/patches.suse/0009-richacl-Helper-functions-for-implementing-richacl-in.patch +++ /dev/null @@ -1,252 +0,0 @@ -From: Andreas Gruenbacher -Date: Sat, 12 Jun 2010 19:48:47 +0200 -Subject: [PATCH 09/16] richacl: Helper functions for implementing richacl inode operations -Patch-mainline: not yet - -These functions are supposed to be used by file systems so that the file -system independent code remains in the vfs. - -Signed-off-by: Andreas Gruenbacher ---- - fs/Makefile | 2 - fs/richacl_inode.c | 194 ++++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/richacl.h | 21 +++++ - 3 files changed, 216 insertions(+), 1 deletion(-) - create mode 100644 fs/richacl_inode.c - ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -52,7 +52,7 @@ obj-$(CONFIG_NFS_COMMON) += nfs_common/ - obj-$(CONFIG_GENERIC_ACL) += generic_acl.o - - obj-$(CONFIG_FS_RICHACL) += richacl.o --richacl-y := richacl_base.o -+richacl-y := richacl_base.o richacl_inode.o - - obj-y += quota/ - ---- /dev/null -+++ b/fs/richacl_inode.c -@@ -0,0 +1,194 @@ -+/* -+ * Copyright (C) 2010 Novell, Inc. -+ * Written by Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+/** -+ * richacl_may_create - helper for implementing iop->may_create -+ */ -+int -+richacl_may_create(struct inode *dir, int isdir, -+ int (*richacl_permission)(struct inode *, unsigned int)) -+{ -+ if (IS_RICHACL(dir)) -+ return richacl_permission(dir, -+ ACE4_EXECUTE | (isdir ? -+ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); -+ else -+ return generic_permission(dir, MAY_WRITE | MAY_EXEC, -+ dir->i_op->check_acl); -+} -+EXPORT_SYMBOL(richacl_may_create); -+ -+static int -+check_sticky(struct inode *dir, struct inode *inode) -+{ -+ if (!(dir->i_mode & S_ISVTX)) -+ return 0; -+ if (inode->i_uid == current_fsuid()) -+ return 0; -+ if (dir->i_uid == current_fsuid()) -+ return 0; -+ return !capable(CAP_FOWNER); -+} -+ -+/** -+ * richacl_may_delete - helper for implementing iop->may_delete -+ */ -+int -+richacl_may_delete(struct inode *dir, struct inode *inode, int replace, -+ int (*richacl_permission)(struct inode *, unsigned int)) -+{ -+ int error; -+ -+ if (IS_RICHACL(inode)) { -+ error = richacl_permission(dir, -+ ACE4_EXECUTE | ACE4_DELETE_CHILD); -+ if (!error && check_sticky(dir, inode)) -+ error = -EPERM; -+ if (error && !richacl_permission(inode, ACE4_DELETE)) -+ error = 0; -+ if (!error && replace) -+ error = richacl_permission(dir, -+ ACE4_EXECUTE | (S_ISDIR(inode->i_mode) ? -+ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); -+ } else { -+ error = generic_permission(dir, MAY_WRITE | MAY_EXEC, -+ dir->i_op->check_acl); -+ if (!error && check_sticky(dir, inode)) -+ error = -EPERM; -+ } -+ -+ return error; -+} -+EXPORT_SYMBOL(richacl_may_delete); -+ -+/** -+ * richacl_inode_permission - helper for implementing iop->permission -+ * @inode: inode to check -+ * @acl: rich acl of the inode (may be NULL) -+ * @mask: requested access (ACE4_* bitmask) -+ * -+ * This function is supposed to be used by file systems for implementing the -+ * permission inode operation. -+ */ -+int -+richacl_inode_permission(struct inode *inode, const struct richacl *acl, -+ unsigned int mask) -+{ -+ if (acl) { -+ if (!richacl_permission(inode, acl, mask)) -+ return 0; -+ } else { -+ int mode = inode->i_mode; -+ -+ if (current_fsuid() == inode->i_uid) -+ mode >>= 6; -+ else if (in_group_p(inode->i_gid)) -+ mode >>= 3; -+ if (!(mask & ~richacl_mode_to_mask(mode))) -+ return 0; -+ } -+ -+ /* -+ * Keep in sync with the capability checks in generic_permission(). -+ */ -+ if (!(mask & ~ACE4_POSIX_MODE_ALL)) { -+ /* -+ * Read/write DACs are always overridable. -+ * Executable DACs are overridable if at -+ * least one exec bit is set. -+ */ -+ if (!(mask & ACE4_POSIX_MODE_EXEC) || execute_ok(inode)) -+ if (capable(CAP_DAC_OVERRIDE)) -+ return 0; -+ } -+ /* -+ * Searching includes executable on directories, else just read. -+ */ -+ if (!(mask & ~(ACE4_READ_DATA | ACE4_LIST_DIRECTORY | ACE4_EXECUTE)) && -+ (S_ISDIR(inode->i_mode) || !(mask & ACE4_EXECUTE))) -+ if (capable(CAP_DAC_READ_SEARCH)) -+ return 0; -+ -+ return -EACCES; -+} -+EXPORT_SYMBOL_GPL(richacl_inode_permission); -+ -+/** -+ * richacl_inode_change_ok - helper for implementing iop->setattr -+ * @inode: inode to check -+ * @attr: requested inode attribute changes -+ * @richacl_permission: permission function taking an inode and ACE4_* flags -+ * -+ * Keep in sync with inode_change_ok(). -+ */ -+int -+richacl_inode_change_ok(struct inode *inode, struct iattr *attr, -+ int (*richacl_permission)(struct inode *, unsigned int)) -+{ -+ unsigned int ia_valid = attr->ia_valid; -+ -+ /* If force is set do it anyway. */ -+ if (ia_valid & ATTR_FORCE) -+ return 0; -+ -+ /* Make sure a caller can chown. */ -+ if ((ia_valid & ATTR_UID) && -+ (current_fsuid() != inode->i_uid || -+ attr->ia_uid != inode->i_uid) && -+ (current_fsuid() != attr->ia_uid || -+ richacl_permission(inode, ACE4_WRITE_OWNER)) && -+ !capable(CAP_CHOWN)) -+ goto error; -+ -+ /* Make sure caller can chgrp. */ -+ if ((ia_valid & ATTR_GID)) { -+ int in_group = in_group_p(attr->ia_gid); -+ if ((current_fsuid() != inode->i_uid || -+ (!in_group && attr->ia_gid != inode->i_gid)) && -+ (!in_group || -+ richacl_permission(inode, ACE4_WRITE_OWNER)) && -+ !capable(CAP_CHOWN)) -+ goto error; -+ } -+ -+ /* Make sure a caller can chmod. */ -+ if (ia_valid & ATTR_MODE) { -+ if (current_fsuid() != inode->i_uid && -+ richacl_permission(inode, ACE4_WRITE_ACL) && -+ !capable(CAP_FOWNER)) -+ goto error; -+ /* Also check the setgid bit! */ -+ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : -+ inode->i_gid) && !capable(CAP_FSETID)) -+ attr->ia_mode &= ~S_ISGID; -+ } -+ -+ /* Check for setting the inode time. */ -+ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) { -+ if (current_fsuid() != inode->i_uid && -+ richacl_permission(inode, ACE4_WRITE_ATTRIBUTES) && -+ !capable(CAP_FOWNER)) -+ goto error; -+ } -+ return 0; -+error: -+ return -EPERM; -+} -+EXPORT_SYMBOL_GPL(richacl_inode_change_ok); ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -273,4 +273,25 @@ extern struct richacl *richacl_chmod(str - extern int richacl_permission(struct inode *, const struct richacl *, - unsigned int); - -+/* richacl_inode.c */ -+ -+#ifdef CONFIG_FS_RICHACL -+extern int richacl_may_create(struct inode *, int, -+ int (*)(struct inode *, unsigned int)); -+extern int richacl_may_delete(struct inode *, struct inode *, int, -+ int (*)(struct inode *, unsigned int)); -+extern int richacl_inode_permission(struct inode *, const struct richacl *, -+ unsigned int); -+extern int richacl_inode_change_ok(struct inode *, struct iattr *, -+ int (*)(struct inode *, unsigned int)); -+#else -+static inline int -+richacl_inode_change_ok(struct inode *inode, struct iattr *attr, -+ int (*richacl_permission)(struct inode *inode, -+ unsigned int mask)) -+{ -+ return -EPERM; -+} -+#endif -+ - #endif /* __RICHACL_H */ diff --git a/patches.suse/0010-richacl-Create-time-inheritance.patch b/patches.suse/0010-richacl-Create-time-inheritance.patch deleted file mode 100644 index 5e81a2b..0000000 --- a/patches.suse/0010-richacl-Create-time-inheritance.patch +++ /dev/null @@ -1,127 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:49 +0530 -Subject: [PATCH 10/16] richacl: Create-time inheritance -Patch-mainline: not yet - -When a new file is created, it can inherit an acl from its parent -directory; this is similar to how default acls work in POSIX (draft) -ACLs. - -As with POSIX ACLs, if a file inherits an acl from its parent directory, -the intersection between the create mode and the permissions granted by -the inherited acl determines the file masks and file permission bits, -and the umask is ignored. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/richacl.h | 1 - 2 files changed, 91 insertions(+) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -454,3 +454,93 @@ is_everyone: - return denied ? -EACCES : 0; - } - EXPORT_SYMBOL_GPL(richacl_permission); -+ -+/** -+ * richacl_inherit - compute the inherited acl of a new file -+ * @dir_acl: acl of the containing direcory -+ * @inode: inode of the new file (create mode in i_mode) -+ * -+ * A directory can have acl entries which files and/or directories created -+ * inside the directory will inherit. This function computes the acl for such -+ * a new file. If there is no inheritable acl, it will return %NULL. -+ * -+ * The file permission bits in inode->i_mode must be set to the create mode. -+ * If there is an inheritable acl, the maximum permissions that the acl grants -+ * will be computed and permissions not granted by the acl will be removed from -+ * inode->i_mode. If there is no inheritable acl, the umask will be applied -+ * instead. -+ */ -+struct richacl * -+richacl_inherit(const struct richacl *dir_acl, struct inode *inode) -+{ -+ const struct richace *dir_ace; -+ struct richacl *acl = NULL; -+ struct richace *ace; -+ int count = 0; -+ mode_t mask = ~current_umask(); -+ -+ if (S_ISDIR(inode->i_mode)) { -+ richacl_for_each_entry(dir_ace, dir_acl) { -+ if (!richace_is_inheritable(dir_ace)) -+ continue; -+ count++; -+ } -+ if (!count) -+ goto mask; -+ acl = richacl_alloc(count); -+ if (!acl) -+ return ERR_PTR(-ENOMEM); -+ ace = acl->a_entries; -+ richacl_for_each_entry(dir_ace, dir_acl) { -+ if (!richace_is_inheritable(dir_ace)) -+ continue; -+ memcpy(ace, dir_ace, sizeof(struct richace)); -+ if (dir_ace->e_flags & ACE4_NO_PROPAGATE_INHERIT_ACE) -+ richace_clear_inheritance_flags(ace); -+ if ((dir_ace->e_flags & ACE4_FILE_INHERIT_ACE) && -+ !(dir_ace->e_flags & ACE4_DIRECTORY_INHERIT_ACE)) -+ ace->e_flags |= ACE4_INHERIT_ONLY_ACE; -+ ace++; -+ } -+ } else { -+ richacl_for_each_entry(dir_ace, dir_acl) { -+ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE)) -+ continue; -+ count++; -+ } -+ if (!count) -+ goto mask; -+ acl = richacl_alloc(count); -+ if (!acl) -+ return ERR_PTR(-ENOMEM); -+ ace = acl->a_entries; -+ richacl_for_each_entry(dir_ace, dir_acl) { -+ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE)) -+ continue; -+ memcpy(ace, dir_ace, sizeof(struct richace)); -+ richace_clear_inheritance_flags(ace); -+ /* -+ * ACE4_DELETE_CHILD is meaningless for -+ * non-directories, so clear it. -+ */ -+ ace->e_mask &= ~ACE4_DELETE_CHILD; -+ ace++; -+ } -+ } -+ -+ richacl_compute_max_masks(acl); -+ -+ /* -+ * Ensure that the acl will not grant any permissions beyond the create -+ * mode. -+ */ -+ acl->a_owner_mask &= richacl_mode_to_mask(inode->i_mode >> 6); -+ acl->a_group_mask &= richacl_mode_to_mask(inode->i_mode >> 3); -+ acl->a_other_mask &= richacl_mode_to_mask(inode->i_mode); -+ mask = ~S_IRWXUGO | richacl_masks_to_mode(acl); -+ -+mask: -+ inode->i_mode &= mask; -+ return acl; -+} -+EXPORT_SYMBOL_GPL(richacl_inherit); ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -272,6 +272,7 @@ extern void richacl_compute_max_masks(st - extern struct richacl *richacl_chmod(struct richacl *, mode_t); - extern int richacl_permission(struct inode *, const struct richacl *, - unsigned int); -+extern struct richacl *richacl_inherit(const struct richacl *, struct inode *); - - /* richacl_inode.c */ - diff --git a/patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch b/patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch deleted file mode 100644 index cc53564..0000000 --- a/patches.suse/0011-richacl-Check-if-an-acl-is-equivalent-to-a-file-mode.patch +++ /dev/null @@ -1,79 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:50 +0530 -Subject: [PATCH 11/16] richacl: Check if an acl is equivalent to a file mode -Patch-mainline: not yet - -This function is used to avoid storing richacls on disk if the acl can -be computed from the file permission bits. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/richacl.h | 1 + - 2 files changed, 49 insertions(+) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -544,3 +544,51 @@ mask: - return acl; - } - EXPORT_SYMBOL_GPL(richacl_inherit); -+ -+/** -+ * richacl_equiv_mode - check if @acl is equivalent to file permission bits -+ * @mode_p: the file mode (including the file type) -+ * -+ * If @acl can be fully represented by file permission bits, this function -+ * returns 0, and the file permission bits in @mode_p are set to the equivalent -+ * of @acl. -+ * -+ * This function is used to avoid storing richacls on disk if the acl can be -+ * computed from the file permission bits. It allows user-space to make sure -+ * that a file has no explicit richacl set. -+ */ -+int -+richacl_equiv_mode(const struct richacl *acl, mode_t *mode_p) -+{ -+ const struct richace *ace = acl->a_entries; -+ unsigned int x; -+ mode_t mode; -+ -+ if (acl->a_count != 1 || -+ acl->a_flags || -+ !richace_is_everyone(ace) || -+ !richace_is_allow(ace) || -+ ace->e_flags & ~ACE4_SPECIAL_WHO) -+ return -1; -+ -+ /* -+ * Figure out the permissions we care about: ACE4_DELETE_CHILD is -+ * meaningless for non-directories, so we ignore it. -+ */ -+ x = ~ACE4_POSIX_ALWAYS_ALLOWED; -+ if (!S_ISDIR(*mode_p)) -+ x &= ~ACE4_DELETE_CHILD; -+ -+ if ((ace->e_mask & x) != (ACE4_POSIX_MODE_ALL & x)) -+ return -1; -+ -+ mode = richacl_masks_to_mode(acl); -+ if ((acl->a_owner_mask & x) != (richacl_mode_to_mask(mode >> 6) & x) || -+ (acl->a_group_mask & x) != (richacl_mode_to_mask(mode >> 3) & x) || -+ (acl->a_other_mask & x) != (richacl_mode_to_mask(mode) & x)) -+ return -1; -+ -+ *mode_p = (*mode_p & ~S_IRWXUGO) | mode; -+ return 0; -+} -+EXPORT_SYMBOL_GPL(richacl_equiv_mode); ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -273,6 +273,7 @@ extern struct richacl *richacl_chmod(str - extern int richacl_permission(struct inode *, const struct richacl *, - unsigned int); - extern struct richacl *richacl_inherit(const struct richacl *, struct inode *); -+extern int richacl_equiv_mode(const struct richacl *, mode_t *); - - /* richacl_inode.c */ - diff --git a/patches.suse/0012-richacl-Automatic-Inheritance.patch b/patches.suse/0012-richacl-Automatic-Inheritance.patch deleted file mode 100644 index 8e23aef..0000000 --- a/patches.suse/0012-richacl-Automatic-Inheritance.patch +++ /dev/null @@ -1,143 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:52 +0530 -Subject: [PATCH 12/16] richacl: Automatic Inheritance -Patch-mainline: not yet - -Automatic Inheritance (AI) allows changes to the acl of a directory to -recursively propagate down to files and directories in the directory. - -To implement this, the kernel keeps track of which permissions have been -inherited, and makes sure that permission propagation is turned off when -the file permission bits of a file are changed (upon create or chmod). - -The actual permission propagation is implemented in user space. - -AI works as follows: - - - When the ACL4_AUTO_INHERIT flag in the acl of a file is cleared, the - file is not affected by AI. - - - When the ACL4_AUTO_INHERIT flag in the acl of a directory is set and - a file or subdirectory is created in that directory, files created in - the directory will have the ACL4_AUTO_INHERIT flag set, and all - inherited aces will have the ACE4_INHERITED_ACE flag set. This - allows user space to distinguish between aces which have been - inherited, and aces which have been explicitly added. - - - When the ACL4_PROTECTED acl flag in the acl of a file is set, AI will - not modify the acl of the file. This does not affect propagation of - permissions from the file to its children (if the file is a - directory). - -Linux does not have a way of creating files without setting the file -permission bits, so all files created inside a directory with -ACL4_AUTO_INHERIT set will also have the ACL4_PROTECTED flag set. This -effectively disables AI. - -Protocols which support creating files without specifying permissions -can explicitly clear the ACL4_PROTECTED flag after creating a file (and -reset the file masks to "undo" applying the create mode; see -richacl_compute_max_masks()). This is a workaround; a per-create or -per-process flag indicating to ignore the create mode when AI is in -effect would fix this problem. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 15 ++++++++++++++- - include/linux/richacl.h | 25 ++++++++++++++++++++++++- - 2 files changed, 38 insertions(+), 2 deletions(-) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -352,7 +352,8 @@ richacl_chmod(struct richacl *acl, mode_ - - if (acl->a_owner_mask == owner_mask && - acl->a_group_mask == group_mask && -- acl->a_other_mask == other_mask) -+ acl->a_other_mask == other_mask && -+ (!richacl_is_auto_inherit(acl) || richacl_is_protected(acl))) - return acl; - - clone = richacl_clone(acl); -@@ -363,6 +364,8 @@ richacl_chmod(struct richacl *acl, mode_ - clone->a_owner_mask = owner_mask; - clone->a_group_mask = group_mask; - clone->a_other_mask = other_mask; -+ if (richacl_is_auto_inherit(clone)) -+ clone->a_flags |= ACL4_PROTECTED; - - return clone; - } -@@ -539,6 +542,16 @@ richacl_inherit(const struct richacl *di - acl->a_other_mask &= richacl_mode_to_mask(inode->i_mode); - mask = ~S_IRWXUGO | richacl_masks_to_mode(acl); - -+ if (richacl_is_auto_inherit(dir_acl)) { -+ /* -+ * We need to set ACL4_PROTECTED because we are -+ * doing an implicit chmod -+ */ -+ acl->a_flags = ACL4_AUTO_INHERIT | ACL4_PROTECTED; -+ richacl_for_each_entry(ace, acl) -+ ace->e_flags |= ACE4_INHERITED_ACE; -+ } -+ - mask: - inode->i_mode &= mask; - return acl; ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -47,6 +47,15 @@ struct richacl { - _ace != _acl->a_entries - 1; \ - _ace--) - -+/* a_flags values */ -+#define ACL4_AUTO_INHERIT 0x01 -+#define ACL4_PROTECTED 0x02 -+/*#define ACL4_DEFAULTED 0x04*/ -+ -+#define ACL4_VALID_FLAGS ( \ -+ ACL4_AUTO_INHERIT | \ -+ ACL4_PROTECTED) -+ - /* e_type values */ - #define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000 - #define ACE4_ACCESS_DENIED_ACE_TYPE 0x0001 -@@ -61,6 +70,7 @@ struct richacl { - /*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/ - /*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/ - #define ACE4_IDENTIFIER_GROUP 0x0040 -+#define ACE4_INHERITED_ACE 0x0080 - /* in-memory representation only */ - #define ACE4_SPECIAL_WHO 0x4000 - -@@ -69,7 +79,8 @@ struct richacl { - ACE4_DIRECTORY_INHERIT_ACE | \ - ACE4_NO_PROPAGATE_INHERIT_ACE | \ - ACE4_INHERIT_ONLY_ACE | \ -- ACE4_IDENTIFIER_GROUP) -+ ACE4_IDENTIFIER_GROUP | \ -+ ACE4_INHERITED_ACE) - - /* e_mask bitflags */ - #define ACE4_READ_DATA 0x00000001 -@@ -162,6 +173,18 @@ richacl_put(struct richacl *acl) - kfree(acl); - } - -+static inline int -+richacl_is_auto_inherit(const struct richacl *acl) -+{ -+ return acl->a_flags & ACL4_AUTO_INHERIT; -+} -+ -+static inline int -+richacl_is_protected(const struct richacl *acl) -+{ -+ return acl->a_flags & ACL4_PROTECTED; -+} -+ - /* - * Special e_who identifiers: we use these pointer values in comparisons - * instead of doing a strcmp. diff --git a/patches.suse/0013-richacl-Restrict-access-check-algorithm.patch b/patches.suse/0013-richacl-Restrict-access-check-algorithm.patch deleted file mode 100644 index 9d77726..0000000 --- a/patches.suse/0013-richacl-Restrict-access-check-algorithm.patch +++ /dev/null @@ -1,52 +0,0 @@ -From: Andreas Gruenbacher -Date: Mon, 14 Jun 2010 09:22:14 +0530 -Subject: [PATCH 13/16] richacl: Restrict access check algorithm -Patch-mainline: not yet - -We want to avoid applying the file masks to an acl when changing the -file permission bits or performing an access check. On the other hand, -when we *do* apply the file masks to the acl, we want the resulting acl -to produce the same access check results with the standard nfs4 access -check algorithm as the richacl access check algorithm with the original -acl. This is already the case, except in the following scenario: - -With file masks equivalent to file mode 0600, the following acl would -grant the owner rw access if the owner is in the owning group: - - group@:rw::allow - -There is no way to express this in an nfs4 acl; the result is always a -more restrictive acl. There are two approaches to deal with this -difference: either accept that it exists and that applying the file -masks is imperfect, or change the richacl access check algorithm so that -such accesses are denied. - -This patch denies such accesses and makes sure that the richacl access -check algorithm grants the same accesses as the nfsv4 acl with the file -masks applied. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/richacl_base.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -423,6 +423,16 @@ richacl_permission(struct inode *inode, - } else - goto is_everyone; - -+ /* -+ * Apply the group file mask to entries other than OWNER@ and -+ * EVERYONE@. This is not required for correct access checking -+ * but ensures that we grant the same permissions as the acl -+ * computed by richacl_apply_masks() would grant. See -+ * richacl_apply_masks() for a more detailed explanation. -+ */ -+ if (richace_is_allow(ace)) -+ ace_mask &= acl->a_group_mask; -+ - is_owner: - /* The process is in the owner or group file class. */ - in_owner_or_group_class = 1; diff --git a/patches.suse/0014-richacl-xattr-mapping-functions.patch b/patches.suse/0014-richacl-xattr-mapping-functions.patch deleted file mode 100644 index c2eb771..0000000 --- a/patches.suse/0014-richacl-xattr-mapping-functions.patch +++ /dev/null @@ -1,237 +0,0 @@ -From: Andreas Gruenbacher -Date: Fri, 11 Jun 2010 16:12:50 +0530 -Subject: [PATCH 14/16] richacl: xattr mapping functions -Patch-mainline: not yet - -Map between "system.richacl" xattrs and the in-kernel representation. - -Signed-off-by: Andreas Gruenbacher -Signed-off-by: Aneesh Kumar K.V ---- - fs/Makefile | 2 - fs/richacl_xattr.c | 156 ++++++++++++++++++++++++++++++++++++++++++ - include/linux/richacl_xattr.h | 47 ++++++++++++ - 3 files changed, 204 insertions(+), 1 deletion(-) - create mode 100644 fs/richacl_xattr.c - create mode 100644 include/linux/richacl_xattr.h - ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -52,7 +52,7 @@ obj-$(CONFIG_NFS_COMMON) += nfs_common/ - obj-$(CONFIG_GENERIC_ACL) += generic_acl.o - - obj-$(CONFIG_FS_RICHACL) += richacl.o --richacl-y := richacl_base.o richacl_inode.o -+richacl-y := richacl_base.o richacl_inode.o richacl_xattr.o - - obj-y += quota/ - ---- /dev/null -+++ b/fs/richacl_xattr.c -@@ -0,0 +1,156 @@ -+/* -+ * Copyright (C) 2006, 2010 Novell, Inc. -+ * Written by Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_LICENSE("GPL"); -+ -+/** -+ * richacl_from_xattr - convert a richacl xattr into the in-memory representation -+ */ -+struct richacl * -+richacl_from_xattr(const void *value, size_t size) -+{ -+ const struct richacl_xattr *xattr_acl = value; -+ const struct richace_xattr *xattr_ace = (void *)(xattr_acl + 1); -+ struct richacl *acl; -+ struct richace *ace; -+ int count; -+ -+ if (size < sizeof(struct richacl_xattr) || -+ xattr_acl->a_version != ACL4_XATTR_VERSION || -+ (xattr_acl->a_flags & ~ACL4_VALID_FLAGS)) -+ return ERR_PTR(-EINVAL); -+ -+ count = le16_to_cpu(xattr_acl->a_count); -+ if (count > ACL4_XATTR_MAX_COUNT) -+ return ERR_PTR(-EINVAL); -+ -+ acl = richacl_alloc(count); -+ if (!acl) -+ return ERR_PTR(-ENOMEM); -+ -+ acl->a_flags = xattr_acl->a_flags; -+ acl->a_owner_mask = le32_to_cpu(xattr_acl->a_owner_mask); -+ if (acl->a_owner_mask & ~ACE4_VALID_MASK) -+ goto fail_einval; -+ acl->a_group_mask = le32_to_cpu(xattr_acl->a_group_mask); -+ if (acl->a_group_mask & ~ACE4_VALID_MASK) -+ goto fail_einval; -+ acl->a_other_mask = le32_to_cpu(xattr_acl->a_other_mask); -+ if (acl->a_other_mask & ~ACE4_VALID_MASK) -+ goto fail_einval; -+ -+ richacl_for_each_entry(ace, acl) { -+ const char *who = (void *)(xattr_ace + 1), *end; -+ ssize_t used = (void *)who - value; -+ -+ if (used > size) -+ goto fail_einval; -+ end = memchr(who, 0, size - used); -+ if (!end) -+ goto fail_einval; -+ -+ ace->e_type = le16_to_cpu(xattr_ace->e_type); -+ ace->e_flags = le16_to_cpu(xattr_ace->e_flags); -+ ace->e_mask = le32_to_cpu(xattr_ace->e_mask); -+ ace->u.e_id = le32_to_cpu(xattr_ace->e_id); -+ -+ if (ace->e_flags & ~ACE4_VALID_FLAGS) -+ goto fail_einval; -+ if (ace->e_type > ACE4_ACCESS_DENIED_ACE_TYPE || -+ (ace->e_mask & ~ACE4_VALID_MASK)) -+ goto fail_einval; -+ -+ if (who == end) { -+ if (ace->u.e_id == -1) -+ goto fail_einval; /* uid/gid needed */ -+ } else if (richace_set_who(ace, who)) -+ goto fail_einval; -+ -+ xattr_ace = (void *)who + ALIGN(end - who + 1, 4); -+ } -+ -+ return acl; -+ -+fail_einval: -+ richacl_put(acl); -+ return ERR_PTR(-EINVAL); -+} -+EXPORT_SYMBOL_GPL(richacl_from_xattr); -+ -+/** -+ * richacl_xattr_size - compute the size of the xattr representation of @acl -+ */ -+size_t -+richacl_xattr_size(const struct richacl *acl) -+{ -+ size_t size = sizeof(struct richacl_xattr); -+ const struct richace *ace; -+ -+ richacl_for_each_entry(ace, acl) { -+ size += sizeof(struct richace_xattr) + -+ (richace_is_unix_id(ace) ? 4 : -+ ALIGN(strlen(ace->u.e_who) + 1, 4)); -+ } -+ return size; -+} -+EXPORT_SYMBOL_GPL(richacl_xattr_size); -+ -+/** -+ * richacl_to_xattr - convert @acl into its xattr representation -+ * @acl: the richacl to convert -+ * @buffer: buffer of size richacl_xattr_size(@acl) for the result -+ */ -+void -+richacl_to_xattr(const struct richacl *acl, void *buffer) -+{ -+ struct richacl_xattr *xattr_acl = buffer; -+ struct richace_xattr *xattr_ace; -+ const struct richace *ace; -+ -+ xattr_acl->a_version = ACL4_XATTR_VERSION; -+ xattr_acl->a_flags = acl->a_flags; -+ xattr_acl->a_count = cpu_to_le16(acl->a_count); -+ -+ xattr_acl->a_owner_mask = cpu_to_le32(acl->a_owner_mask); -+ xattr_acl->a_group_mask = cpu_to_le32(acl->a_group_mask); -+ xattr_acl->a_other_mask = cpu_to_le32(acl->a_other_mask); -+ -+ xattr_ace = (void *)(xattr_acl + 1); -+ richacl_for_each_entry(ace, acl) { -+ xattr_ace->e_type = cpu_to_le16(ace->e_type); -+ xattr_ace->e_flags = cpu_to_le16(ace->e_flags & -+ ACE4_VALID_FLAGS); -+ xattr_ace->e_mask = cpu_to_le32(ace->e_mask); -+ if (richace_is_unix_id(ace)) { -+ xattr_ace->e_id = cpu_to_le32(ace->u.e_id); -+ memset(xattr_ace->e_who, 0, 4); -+ xattr_ace = (void *)xattr_ace->e_who + 4; -+ } else { -+ int sz = ALIGN(strlen(ace->u.e_who) + 1, 4); -+ -+ xattr_ace->e_id = cpu_to_le32(-1); -+ memset(xattr_ace->e_who + sz - 4, 0, 4); -+ strcpy(xattr_ace->e_who, ace->u.e_who); -+ xattr_ace = (void *)xattr_ace->e_who + sz; -+ } -+ } -+} -+EXPORT_SYMBOL_GPL(richacl_to_xattr); ---- /dev/null -+++ b/include/linux/richacl_xattr.h -@@ -0,0 +1,47 @@ -+/* -+ * Copyright (C) 2006, 2010 Novell, Inc. -+ * Written by Andreas Gruenbacher -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2, or (at your option) any -+ * later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ */ -+ -+#ifndef __RICHACL_XATTR_H -+#define __RICHACL_XATTR_H -+ -+#include -+ -+#define RICHACL_XATTR "system.richacl" -+ -+struct richace_xattr { -+ __le16 e_type; -+ __le16 e_flags; -+ __le32 e_mask; -+ __le32 e_id; -+ char e_who[0]; -+}; -+ -+struct richacl_xattr { -+ unsigned char a_version; -+ unsigned char a_flags; -+ __le16 a_count; -+ __le32 a_owner_mask; -+ __le32 a_group_mask; -+ __le32 a_other_mask; -+}; -+ -+#define ACL4_XATTR_VERSION 0 -+#define ACL4_XATTR_MAX_COUNT 1024 -+ -+extern struct richacl *richacl_from_xattr(const void *, size_t); -+extern size_t richacl_xattr_size(const struct richacl *acl); -+extern void richacl_to_xattr(const struct richacl *, void *); -+ -+#endif /* __RICHACL_XATTR_H */ diff --git a/patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch b/patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch deleted file mode 100644 index 9c04b2c..0000000 --- a/patches.suse/0015-ext4-Use-IS_POSIXACL-to-check-for-POSIX-ACL-support.patch +++ /dev/null @@ -1,156 +0,0 @@ -From: Aneesh Kumar K.V -Date: Fri, 11 Jun 2010 16:12:51 +0530 -Subject: [PATCH 15/16] ext4: Use IS_POSIXACL() to check for POSIX ACL support -Patch-mainline: not yet - -Use IS_POSIXACL() instead of a file system specific mount flag since we -have IS_POSIXACL() in the vfs already, anyway. - -Signed-off-by: Aneesh Kumar K.V -Signed-off-by: Andreas Gruenbacher ---- - fs/ext4/acl.c | 16 ++++++++-------- - fs/ext4/ext4.h | 1 - - fs/ext4/super.c | 16 +++++----------- - 3 files changed, 13 insertions(+), 20 deletions(-) - ---- a/fs/ext4/acl.c -+++ b/fs/ext4/acl.c -@@ -139,7 +139,7 @@ ext4_get_acl(struct inode *inode, int ty - struct posix_acl *acl; - int retval; - -- if (!test_opt(inode->i_sb, POSIX_ACL)) -+ if (!IS_POSIXACL(inode)) - return NULL; - - acl = get_cached_acl(inode, type); -@@ -266,7 +266,7 @@ ext4_init_acl(handle_t *handle, struct i - int error = 0; - - if (!S_ISLNK(inode->i_mode)) { -- if (test_opt(dir->i_sb, POSIX_ACL)) { -+ if (IS_POSIXACL(inode)) { - acl = ext4_get_acl(dir, ACL_TYPE_DEFAULT); - if (IS_ERR(acl)) - return PTR_ERR(acl); -@@ -274,7 +274,7 @@ ext4_init_acl(handle_t *handle, struct i - if (!acl) - inode->i_mode &= ~current_umask(); - } -- if (test_opt(inode->i_sb, POSIX_ACL) && acl) { -+ if (IS_POSIXACL(inode) && acl) { - struct posix_acl *clone; - mode_t mode; - -@@ -328,7 +328,7 @@ ext4_acl_chmod(struct inode *inode) - - if (S_ISLNK(inode->i_mode)) - return -EOPNOTSUPP; -- if (!test_opt(inode->i_sb, POSIX_ACL)) -+ if (!IS_POSIXACL(inode)) - return 0; - acl = ext4_get_acl(inode, ACL_TYPE_ACCESS); - if (IS_ERR(acl) || !acl) -@@ -370,7 +370,7 @@ ext4_xattr_list_acl_access(struct dentry - { - const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS); - -- if (!test_opt(dentry->d_sb, POSIX_ACL)) -+ if (!IS_POSIXACL(dentry->d_inode)) - return 0; - if (list && size <= list_len) - memcpy(list, POSIX_ACL_XATTR_ACCESS, size); -@@ -383,7 +383,7 @@ ext4_xattr_list_acl_default(struct dentr - { - const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT); - -- if (!test_opt(dentry->d_sb, POSIX_ACL)) -+ if (!IS_POSIXACL(dentry->d_inode)) - return 0; - if (list && size <= list_len) - memcpy(list, POSIX_ACL_XATTR_DEFAULT, size); -@@ -399,7 +399,7 @@ ext4_xattr_get_acl(struct dentry *dentry - - if (strcmp(name, "") != 0) - return -EINVAL; -- if (!test_opt(dentry->d_sb, POSIX_ACL)) -+ if (!IS_POSIXACL(dentry->d_inode)) - return -EOPNOTSUPP; - - acl = ext4_get_acl(dentry->d_inode, type); -@@ -424,7 +424,7 @@ ext4_xattr_set_acl(struct dentry *dentry - - if (strcmp(name, "") != 0) - return -EINVAL; -- if (!test_opt(inode->i_sb, POSIX_ACL)) -+ if (!IS_POSIXACL(dentry->d_inode)) - return -EOPNOTSUPP; - if (!is_owner_or_cap(inode)) - return -EPERM; ---- a/fs/ext4/ext4.h -+++ b/fs/ext4/ext4.h -@@ -875,7 +875,6 @@ struct ext4_inode_info { - #define EXT4_MOUNT_UPDATE_JOURNAL 0x01000 /* Update the journal format */ - #define EXT4_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */ - #define EXT4_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */ --#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ - #define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */ - #define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */ - #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ ---- a/fs/ext4/super.c -+++ b/fs/ext4/super.c -@@ -974,9 +974,9 @@ static int ext4_show_options(struct seq_ - } - #endif - #ifdef CONFIG_EXT4_FS_POSIX_ACL -- if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL)) -+ if ((sb->s_flags & MS_POSIXACL) && !(def_mount_opts & EXT4_DEFM_ACL)) - seq_puts(seq, ",acl"); -- if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL)) -+ if (!(sb->s_flags & MS_POSIXACL) && (def_mount_opts & EXT4_DEFM_ACL)) - seq_puts(seq, ",noacl"); - #endif - if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { -@@ -1481,10 +1481,10 @@ static int parse_options(char *options, - #endif - #ifdef CONFIG_EXT4_FS_POSIX_ACL - case Opt_acl: -- set_opt(sb, POSIX_ACL); -+ sb->s_flags |= MS_POSIXACL; - break; - case Opt_noacl: -- clear_opt(sb, POSIX_ACL); -+ sb->s_flags &= ~MS_POSIXACL; - break; - #else - case Opt_acl: -@@ -2644,7 +2644,7 @@ static int ext4_fill_super(struct super_ - #endif - #ifdef CONFIG_EXT4_FS_POSIX_ACL - if (def_mount_opts & EXT4_DEFM_ACL) -- set_opt(sb, POSIX_ACL); -+ sb->s_flags |= MS_POSIXACL; - #endif - if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) - set_opt(sb, JOURNAL_DATA); -@@ -2691,9 +2691,6 @@ static int ext4_fill_super(struct super_ - &journal_ioprio, NULL, 0)) - goto failed_mount; - -- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | -- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); -- - if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && - (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || - EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) || -@@ -3753,9 +3750,6 @@ static int ext4_remount(struct super_blo - if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) - ext4_abort(sb, "Abort forced by user"); - -- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | -- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); -- - es = sbi->s_es; - - if (sbi->s_journal) { diff --git a/patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch b/patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch deleted file mode 100644 index 74601f4..0000000 --- a/patches.suse/0016-ext4-Implement-richacl-support-in-ext4.patch +++ /dev/null @@ -1,690 +0,0 @@ -From: Aneesh Kumar K.V -Date: Fri, 11 Jun 2010 16:12:52 +0530 -Subject: [PATCH 16/16] ext4: Implement richacl support in ext4 -Patch-mainline: not yet - -Support the richacl permission model in ext4. The richacls are stored -in "system.richacl" xattrs. - -Signed-off-by: Aneesh Kumar K.V -Signed-off-by: Andreas Gruenbacher ---- - fs/ext4/Kconfig | 10 + - fs/ext4/Makefile | 1 - fs/ext4/ext4.h | 4 - fs/ext4/file.c | 4 - fs/ext4/ialloc.c | 7 + - fs/ext4/inode.c | 19 ++- - fs/ext4/namei.c | 7 + - fs/ext4/richacl.c | 293 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ - fs/ext4/richacl.h | 56 ++++++++++ - fs/ext4/super.c | 45 ++++++-- - fs/ext4/xattr.c | 6 + - fs/ext4/xattr.h | 5 - 12 files changed, 441 insertions(+), 16 deletions(-) - create mode 100644 fs/ext4/richacl.c - create mode 100644 fs/ext4/richacl.h - ---- a/fs/ext4/Kconfig -+++ b/fs/ext4/Kconfig -@@ -83,3 +83,13 @@ config EXT4_DEBUG - - If you select Y here, then you will be able to turn on debugging - with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug" -+ -+config EXT4_FS_RICHACL -+ bool "Ext4 Rich Access Control Lists (EXPERIMENTAL)" -+ depends on EXT4_FS_XATTR && EXPERIMENTAL -+ select FS_RICHACL -+ help -+ Rich ACLs are an implementation of NFSv4 ACLs, extended by file masks -+ to fit into the standard POSIX file permission model. They are -+ designed to work seamlessly locally as well as across the NFSv4 and -+ CIFS/SMB2 network file system protocols. ---- a/fs/ext4/Makefile -+++ b/fs/ext4/Makefile -@@ -11,3 +11,4 @@ ext4-y := balloc.o bitmap.o dir.o file.o - ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o - ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o - ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o -+ext4-$(CONFIG_EXT4_FS_RICHACL) += richacl.o ---- a/fs/ext4/ext4.h -+++ b/fs/ext4/ext4.h -@@ -841,6 +841,10 @@ struct ext4_inode_info { - */ - tid_t i_sync_tid; - tid_t i_datasync_tid; -+#ifdef CONFIG_EXT4_FS_RICHACL -+ struct richacl *i_richacl; -+#endif -+ - }; - - /* ---- a/fs/ext4/file.c -+++ b/fs/ext4/file.c -@@ -28,6 +28,7 @@ - #include "ext4_jbd2.h" - #include "xattr.h" - #include "acl.h" -+#include "richacl.h" - - /* - * Called when an inode is released. Note that this is different -@@ -161,5 +162,8 @@ const struct inode_operations ext4_file_ - #endif - .check_acl = ext4_check_acl, - .fiemap = ext4_fiemap, -+ .permission = ext4_permission, -+ .may_create = ext4_may_create, -+ .may_delete = ext4_may_delete, - }; - ---- a/fs/ext4/ialloc.c -+++ b/fs/ext4/ialloc.c -@@ -28,6 +28,7 @@ - #include "ext4_jbd2.h" - #include "xattr.h" - #include "acl.h" -+#include "richacl.h" - - #include - -@@ -1023,7 +1024,11 @@ got: - if (err) - goto fail_drop; - -- err = ext4_init_acl(handle, inode, dir); -+ if (EXT4_IS_RICHACL(dir)) -+ err = ext4_init_richacl(handle, inode, dir); -+ else -+ err = ext4_init_acl(handle, inode, dir); -+ - if (err) - goto fail_free_drop; - ---- a/fs/ext4/inode.c -+++ b/fs/ext4/inode.c -@@ -45,6 +45,7 @@ - #include "xattr.h" - #include "acl.h" - #include "ext4_extents.h" -+#include "richacl.h" - - #include - -@@ -5041,6 +5042,9 @@ struct inode *ext4_iget(struct super_blo - inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); - - ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ -+#ifdef CONFIG_EXT4_FS_RICHACL -+ ei->i_richacl = EXT4_RICHACL_NOT_CACHED; -+#endif - ei->i_dir_start_lookup = 0; - ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); - /* We now have enough fields to check if the inode was active or not. -@@ -5466,7 +5470,11 @@ int ext4_setattr(struct dentry *dentry, - int orphan = 0; - const unsigned int ia_valid = attr->ia_valid; - -- error = inode_change_ok(inode, attr); -+ if (EXT4_IS_RICHACL(inode)) -+ error = richacl_inode_change_ok(inode, attr, -+ ext4_richacl_permission); -+ else -+ error = inode_change_ok(inode, attr); - if (error) - return error; - -@@ -5563,9 +5571,12 @@ int ext4_setattr(struct dentry *dentry, - if (orphan && inode->i_nlink) - ext4_orphan_del(NULL, inode); - -- if (!rc && (ia_valid & ATTR_MODE)) -- rc = ext4_acl_chmod(inode); -- -+ if (!rc && (ia_valid & ATTR_MODE)) { -+ if (EXT4_IS_RICHACL(inode)) -+ rc = ext4_richacl_chmod(inode); -+ else -+ rc = ext4_acl_chmod(inode); -+ } - err_out: - ext4_std_error(inode->i_sb, error); - if (!error) ---- a/fs/ext4/namei.c -+++ b/fs/ext4/namei.c -@@ -39,6 +39,7 @@ - - #include "xattr.h" - #include "acl.h" -+#include "richacl.h" - - /* - * define how far ahead to read directories while searching them. -@@ -2522,6 +2523,9 @@ const struct inode_operations ext4_dir_i - #endif - .check_acl = ext4_check_acl, - .fiemap = ext4_fiemap, -+ .permission = ext4_permission, -+ .may_create = ext4_may_create, -+ .may_delete = ext4_may_delete, - }; - - const struct inode_operations ext4_special_inode_operations = { -@@ -2533,4 +2537,7 @@ const struct inode_operations ext4_speci - .removexattr = generic_removexattr, - #endif - .check_acl = ext4_check_acl, -+ .permission = ext4_permission, -+ .may_create = ext4_may_create, -+ .may_delete = ext4_may_delete, - }; ---- /dev/null -+++ b/fs/ext4/richacl.c -@@ -0,0 +1,293 @@ -+/* -+ * Copyright IBM Corporation, 2010 -+ * Author Aneesh Kumar K.V -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2.1 of the GNU Lesser General Public License -+ * as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "ext4.h" -+#include "ext4_jbd2.h" -+#include "xattr.h" -+#include "acl.h" -+#include "richacl.h" -+ -+static inline struct richacl * -+ext4_iget_richacl(struct inode *inode) -+{ -+ struct richacl *acl = EXT4_RICHACL_NOT_CACHED; -+ struct ext4_inode_info *ei = EXT4_I(inode); -+ -+ spin_lock(&inode->i_lock); -+ if (ei->i_richacl != EXT4_RICHACL_NOT_CACHED) -+ acl = richacl_get(ei->i_richacl); -+ spin_unlock(&inode->i_lock); -+ -+ return acl; -+} -+ -+static inline void -+ext4_iset_richacl(struct inode *inode, struct richacl *acl) -+{ -+ struct ext4_inode_info *ei = EXT4_I(inode); -+ -+ spin_lock(&inode->i_lock); -+ if (ei->i_richacl != EXT4_RICHACL_NOT_CACHED) -+ richacl_put(ei->i_richacl); -+ ei->i_richacl = richacl_get(acl); -+ spin_unlock(&inode->i_lock); -+} -+ -+static struct richacl * -+ext4_get_richacl(struct inode *inode) -+{ -+ const int name_index = EXT4_XATTR_INDEX_RICHACL; -+ void *value = NULL; -+ struct richacl *acl; -+ int retval; -+ -+ if (!IS_RICHACL(inode)) -+ return ERR_PTR(-EOPNOTSUPP); -+ acl = ext4_iget_richacl(inode); -+ if (acl != EXT4_RICHACL_NOT_CACHED) -+ return acl; -+ retval = ext4_xattr_get(inode, name_index, "", NULL, 0); -+ if (retval > 0) { -+ value = kmalloc(retval, GFP_KERNEL); -+ if (!value) -+ return ERR_PTR(-ENOMEM); -+ retval = ext4_xattr_get(inode, name_index, "", value, retval); -+ } -+ if (retval > 0) { -+ acl = richacl_from_xattr(value, retval); -+ if (acl == ERR_PTR(-EINVAL)) -+ acl = ERR_PTR(-EIO); -+ } else if (retval == -ENODATA || retval == -ENOSYS) -+ acl = NULL; -+ else -+ acl = ERR_PTR(retval); -+ kfree(value); -+ -+ if (!IS_ERR_OR_NULL(acl)) -+ ext4_iset_richacl(inode, acl); -+ -+ return acl; -+} -+ -+static int -+ext4_set_richacl(handle_t *handle, struct inode *inode, struct richacl *acl) -+{ -+ const int name_index = EXT4_XATTR_INDEX_RICHACL; -+ size_t size = 0; -+ void *value = NULL; -+ int retval; -+ -+ if (acl) { -+ mode_t mode = inode->i_mode; -+ if (richacl_equiv_mode(acl, &mode) == 0) { -+ inode->i_mode = mode; -+ ext4_mark_inode_dirty(handle, inode); -+ acl = NULL; -+ } -+ } -+ if (acl) { -+ size = richacl_xattr_size(acl); -+ value = kmalloc(size, GFP_KERNEL); -+ if (!value) -+ return -ENOMEM; -+ richacl_to_xattr(acl, value); -+ } -+ if (handle) -+ retval = ext4_xattr_set_handle(handle, inode, name_index, "", -+ value, size, 0); -+ else -+ retval = ext4_xattr_set(inode, name_index, "", value, size, 0); -+ kfree(value); -+ if (!retval) -+ ext4_iset_richacl(inode, acl); -+ -+ return retval; -+} -+ -+int -+ext4_richacl_permission(struct inode *inode, unsigned int mask) -+{ -+ struct richacl *acl; -+ int retval; -+ -+ if (!IS_RICHACL(inode)) -+ BUG(); -+ -+ acl = ext4_get_richacl(inode); -+ if (acl && IS_ERR(acl)) -+ retval = PTR_ERR(acl); -+ else { -+ retval = richacl_inode_permission(inode, acl, mask); -+ richacl_put(acl); -+ } -+ -+ return retval; -+} -+ -+int ext4_permission(struct inode *inode, int mask) -+{ -+ if (IS_RICHACL(inode)) -+ return ext4_richacl_permission(inode, -+ richacl_want_to_mask(mask)); -+ else -+ return generic_permission(inode, mask, ext4_check_acl); -+} -+ -+int ext4_may_create(struct inode *dir, int isdir) -+{ -+ return richacl_may_create(dir, isdir, ext4_richacl_permission); -+} -+ -+int ext4_may_delete(struct inode *dir, struct inode *inode, int replace) -+{ -+ return richacl_may_delete(dir, inode, replace, ext4_richacl_permission); -+} -+ -+int -+ext4_init_richacl(handle_t *handle, struct inode *inode, struct inode *dir) -+{ -+ struct richacl *dir_acl = NULL; -+ -+ if (!S_ISLNK(inode->i_mode)) { -+ dir_acl = ext4_get_richacl(dir); -+ if (IS_ERR(dir_acl)) -+ return PTR_ERR(dir_acl); -+ } -+ if (dir_acl) { -+ struct richacl *acl; -+ int retval; -+ -+ acl = richacl_inherit(dir_acl, inode); -+ richacl_put(dir_acl); -+ -+ retval = PTR_ERR(acl); -+ if (acl && !IS_ERR(acl)) { -+ retval = ext4_set_richacl(handle, inode, acl); -+ richacl_put(acl); -+ } -+ return retval; -+ } else { -+ inode->i_mode &= ~current_umask(); -+ return 0; -+ } -+} -+ -+int -+ext4_richacl_chmod(struct inode *inode) -+{ -+ struct richacl *acl; -+ int retval; -+ -+ if (S_ISLNK(inode->i_mode)) -+ return -EOPNOTSUPP; -+ acl = ext4_get_richacl(inode); -+ if (IS_ERR_OR_NULL(acl)) -+ return PTR_ERR(acl); -+ acl = richacl_chmod(acl, inode->i_mode); -+ if (IS_ERR(acl)) -+ return PTR_ERR(acl); -+ retval = ext4_set_richacl(NULL, inode, acl); -+ richacl_put(acl); -+ -+ return retval; -+} -+ -+static size_t -+ext4_xattr_list_richacl(struct dentry *dentry, char *list, size_t list_len, -+ const char *name, size_t name_len, int type) -+{ -+ const size_t size = sizeof(RICHACL_XATTR); -+ if (!IS_RICHACL(dentry->d_inode)) -+ return 0; -+ if (list && size <= list_len) -+ memcpy(list, RICHACL_XATTR, size); -+ return size; -+} -+ -+static int -+ext4_xattr_get_richacl(struct dentry *dentry, const char *name, void *buffer, -+ size_t buffer_size, int type) -+{ -+ struct richacl *acl; -+ size_t size; -+ -+ if (strcmp(name, "") != 0) -+ return -EINVAL; -+ acl = ext4_get_richacl(dentry->d_inode); -+ if (IS_ERR(acl)) -+ return PTR_ERR(acl); -+ if (acl == NULL) -+ return -ENODATA; -+ size = richacl_xattr_size(acl); -+ if (buffer) { -+ if (size > buffer_size) -+ return -ERANGE; -+ richacl_to_xattr(acl, buffer); -+ } -+ richacl_put(acl); -+ -+ return size; -+} -+ -+static int -+ext4_xattr_set_richacl(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags, int type) -+{ -+ handle_t *handle; -+ struct richacl *acl = NULL; -+ int retval, retries = 0; -+ struct inode *inode = dentry->d_inode; -+ -+ if (!IS_RICHACL(dentry->d_inode)) -+ return -EOPNOTSUPP; -+ if (S_ISLNK(inode->i_mode)) -+ return -EOPNOTSUPP; -+ if (strcmp(name, "") != 0) -+ return -EINVAL; -+ if (current_fsuid() != inode->i_uid && -+ ext4_richacl_permission(inode, ACE4_WRITE_ACL) && -+ !capable(CAP_FOWNER)) -+ return -EPERM; -+ if (value) { -+ acl = richacl_from_xattr(value, size); -+ if (IS_ERR(acl)) -+ return PTR_ERR(acl); -+ -+ inode->i_mode &= ~S_IRWXUGO; -+ inode->i_mode |= richacl_masks_to_mode(acl); -+ } -+ -+retry: -+ handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); -+ if (IS_ERR(handle)) -+ return PTR_ERR(handle); -+ ext4_mark_inode_dirty(handle, inode); -+ retval = ext4_set_richacl(handle, inode, acl); -+ ext4_journal_stop(handle); -+ if (retval == ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) -+ goto retry; -+ richacl_put(acl); -+ return retval; -+} -+ -+const struct xattr_handler ext4_richacl_xattr_handler = { -+ .prefix = RICHACL_XATTR, -+ .list = ext4_xattr_list_richacl, -+ .get = ext4_xattr_get_richacl, -+ .set = ext4_xattr_set_richacl, -+}; ---- /dev/null -+++ b/fs/ext4/richacl.h -@@ -0,0 +1,56 @@ -+/* -+ * Copyright IBM Corporation, 2010 -+ * Author Aneesh Kumar K.V -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2.1 of the GNU Lesser General Public License -+ * as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it would be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+ * -+ */ -+ -+#ifndef __FS_EXT4_RICHACL_H -+#define __FS_EXT4_RICHACL_H -+ -+#include -+ -+#ifdef CONFIG_EXT4_FS_RICHACL -+ -+# define EXT4_IS_RICHACL(inode) IS_RICHACL(inode) -+ -+/* Value for i_richacl if RICHACL has not been cached */ -+# define EXT4_RICHACL_NOT_CACHED ((void *)-1) -+ -+extern int ext4_permission(struct inode *, int); -+extern int ext4_richacl_permission(struct inode *, unsigned int); -+extern int ext4_may_create(struct inode *, int); -+extern int ext4_may_delete(struct inode *, struct inode *, int); -+extern int ext4_init_richacl(handle_t *, struct inode *, struct inode *); -+extern int ext4_richacl_chmod(struct inode *); -+ -+#else /* CONFIG_FS_EXT4_RICHACL */ -+ -+# define EXT4_IS_RICHACL(inode) (0) -+ -+# define ext4_permission NULL -+# define ext4_may_create NULL -+# define ext4_may_delete NULL -+# define ext4_richacl_permission NULL -+ -+static inline int -+ext4_init_richacl(handle_t *handle, struct inode *inode, struct inode *dir) -+{ -+ return 0; -+} -+ -+static inline int -+ext4_richacl_chmod(struct inode *inode) -+{ -+ return 0; -+} -+ -+#endif /* CONFIG_FS_EXT4_RICHACL */ -+#endif /* __FS_EXT4_RICHACL_H */ ---- a/fs/ext4/super.c -+++ b/fs/ext4/super.c -@@ -46,6 +46,7 @@ - #include "xattr.h" - #include "acl.h" - #include "mballoc.h" -+#include "richacl.h" - - #define CREATE_TRACE_POINTS - #include -@@ -795,7 +796,9 @@ static struct inode *ext4_alloc_inode(st - ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); - if (!ei) - return NULL; -- -+#ifdef CONFIG_EXT4_FS_RICHACL -+ ei->i_richacl = EXT4_RICHACL_NOT_CACHED; -+#endif - ei->vfs_inode.i_version = 1; - ei->vfs_inode.i_data.writeback_index = 0; - memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); -@@ -873,6 +876,13 @@ void ext4_clear_inode(struct inode *inod - invalidate_inode_buffers(inode); - end_writeback(inode); - dquot_drop(inode); -+#ifdef CONFIG_EXT4_FS_RICHACL -+ if (EXT4_I(inode)->i_richacl && -+ EXT4_I(inode)->i_richacl != EXT4_RICHACL_NOT_CACHED) { -+ richacl_put(EXT4_I(inode)->i_richacl); -+ EXT4_I(inode)->i_richacl = EXT4_RICHACL_NOT_CACHED; -+ } -+#endif - ext4_discard_preallocations(inode); - if (EXT4_I(inode)->jinode) { - jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), -@@ -973,10 +983,13 @@ static int ext4_show_options(struct seq_ - seq_puts(seq, ",nouser_xattr"); - } - #endif --#ifdef CONFIG_EXT4_FS_POSIX_ACL -- if ((sb->s_flags & MS_POSIXACL) && !(def_mount_opts & EXT4_DEFM_ACL)) -- seq_puts(seq, ",acl"); -- if (!(sb->s_flags & MS_POSIXACL) && (def_mount_opts & EXT4_DEFM_ACL)) -+#if defined(CONFIG_EXT4_FS_POSIX_ACL) || defined(CONFIG_EXT4_FS_RICHACL) -+ if (sb->s_flags & MS_POSIXACL) { -+ if (!(def_mount_opts & EXT4_DEFM_ACL)) -+ seq_puts(seq, ",acl"); -+ } else if (sb->s_flags & MS_RICHACL) -+ seq_puts(seq, ",richacl"); -+ else if (def_mount_opts & EXT4_DEFM_ACL) - seq_puts(seq, ",noacl"); - #endif - if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { -@@ -1203,7 +1216,7 @@ enum { - Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, - Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, - Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov, -- Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, -+ Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_richacl, Opt_noacl, - Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh, - Opt_commit, Opt_min_batch_time, Opt_max_batch_time, - Opt_journal_update, Opt_journal_dev, -@@ -1241,6 +1254,7 @@ static const match_table_t tokens = { - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_acl, "acl"}, -+ {Opt_richacl, "richacl"}, - {Opt_noacl, "noacl"}, - {Opt_noload, "noload"}, - {Opt_noload, "norecovery"}, -@@ -1479,17 +1493,26 @@ static int parse_options(char *options, - ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported"); - break; - #endif --#ifdef CONFIG_EXT4_FS_POSIX_ACL -+#if defined(CONFIG_EXT4_FS_POSIX_ACL) || defined(CONFIG_EXT4_FS_RICHACL) -+# ifdef CONFIG_EXT4_FS_POSIX_ACL - case Opt_acl: -- sb->s_flags |= MS_POSIXACL; -+ if (!(sb->s_flags & MS_RICHACL)) -+ sb->s_flags |= MS_POSIXACL; - break; -- case Opt_noacl: -+# endif -+# ifdef CONFIG_EXT4_FS_RICHACL -+ case Opt_richacl: - sb->s_flags &= ~MS_POSIXACL; -+ sb->s_flags |= MS_RICHACL; -+ break; -+# endif -+ case Opt_noacl: -+ sb->s_flags &= ~(MS_POSIXACL | MS_RICHACL); - break; - #else - case Opt_acl: - case Opt_noacl: -- ext4_msg(sb, KERN_ERR, "(no)acl options not supported"); -+ ext4_msg(sb, KERN_ERR, "(no)acl/richacl options not supported"); - break; - #endif - case Opt_journal_update: -@@ -2642,7 +2665,7 @@ static int ext4_fill_super(struct super_ - if (def_mount_opts & EXT4_DEFM_XATTR_USER) - set_opt(sb, XATTR_USER); - #endif --#ifdef CONFIG_EXT4_FS_POSIX_ACL -+#if defined(CONFIG_EXT4_FS_POSIX_ACL) - if (def_mount_opts & EXT4_DEFM_ACL) - sb->s_flags |= MS_POSIXACL; - #endif ---- a/fs/ext4/xattr.c -+++ b/fs/ext4/xattr.c -@@ -107,6 +107,9 @@ static const struct xattr_handler *ext4_ - #ifdef CONFIG_EXT4_FS_SECURITY - [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, - #endif -+#ifdef CONFIG_EXT4_FS_RICHACL -+ [EXT4_XATTR_INDEX_RICHACL] = &ext4_richacl_xattr_handler, -+#endif - }; - - const struct xattr_handler *ext4_xattr_handlers[] = { -@@ -119,6 +122,9 @@ const struct xattr_handler *ext4_xattr_h - #ifdef CONFIG_EXT4_FS_SECURITY - &ext4_xattr_security_handler, - #endif -+#ifdef CONFIG_EXT4_FS_RICHACL -+ &ext4_richacl_xattr_handler, -+#endif - NULL - }; - ---- a/fs/ext4/xattr.h -+++ b/fs/ext4/xattr.h -@@ -21,6 +21,7 @@ - #define EXT4_XATTR_INDEX_TRUSTED 4 - #define EXT4_XATTR_INDEX_LUSTRE 5 - #define EXT4_XATTR_INDEX_SECURITY 6 -+#define EXT4_XATTR_INDEX_RICHACL 7 - - struct ext4_xattr_header { - __le32 h_magic; /* magic number for identification */ -@@ -70,6 +71,10 @@ extern const struct xattr_handler ext4_x - extern const struct xattr_handler ext4_xattr_acl_access_handler; - extern const struct xattr_handler ext4_xattr_acl_default_handler; - extern const struct xattr_handler ext4_xattr_security_handler; -+extern const struct xattr_handler ext4_xattr_acl_access_handler; -+extern const struct xattr_handler ext4_xattr_acl_default_handler; -+extern const struct xattr_handler ext4_xattr_security_handler; -+extern const struct xattr_handler ext4_richacl_xattr_handler; - - extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); - diff --git a/patches.suse/8250-sysrq-ctrl_o.patch b/patches.suse/8250-sysrq-ctrl_o.patch deleted file mode 100644 index da7db84..0000000 --- a/patches.suse/8250-sysrq-ctrl_o.patch +++ /dev/null @@ -1,135 +0,0 @@ -Subject: no sysrq on Cell QS21/QS22 serial console -From: olh@suse.de -References: 422987 - LTC47675, 96313 - LTC16841 -Patch-mainline: not yet - - -a POWER4 system in 'full-system-partition' mode has the console device -on ttyS0. But the user interface to the Linux system console may still -be on the hardware management console (HMC). If this is the case, there -is no way to send a break to trigger a sysrq. -Other setups do already use 'ctrl o' to trigger sysrq. This includes iSeries -virtual console on tty1 or hvc0, and pSeries LPAR console on hvc0 or hvsi0. - -This affects also Cell Blades QS2x. - -To limit the 'ctrl o' only to the affected systems, query the model property -in the device-tree. The patch makes the serial console not-eight-bit-clean. -Booting with 'console=ttyS0' will disable 'ctrl o', it is only enabled -with console autodetection. - -'ctrl o' is currently mapped to 'flush output', see 'stty -a' - -Signed-off-by: Olaf Hering ---- - arch/powerpc/include/asm/serial.h | 6 ++++ - arch/powerpc/kernel/legacy_serial.c | 52 ++++++++++++++++++++++++++++++++++++ - drivers/tty/serial/8250.c | 6 ++++ - 3 files changed, 64 insertions(+) - ---- a/arch/powerpc/include/asm/serial.h -+++ b/arch/powerpc/include/asm/serial.h -@@ -15,6 +15,12 @@ - /* Default baud base if not found in device-tree */ - #define BASE_BAUD ( 1843200 / 16 ) - -+#if defined(SUPPORT_SYSRQ) && defined(CONFIG_PPC_PSERIES) -+#undef arch_8250_sysrq_via_ctrl_o -+extern int do_sysrq_via_ctrl_o; -+#define arch_8250_sysrq_via_ctrl_o(ch, port) ((ch) == '\x0f' && do_sysrq_via_ctrl_o && uart_handle_break((port))) -+#endif -+ - #ifdef CONFIG_PPC_UDBG_16550 - extern void find_legacy_serial_ports(void); - #else ---- a/arch/powerpc/kernel/legacy_serial.c -+++ b/arch/powerpc/kernel/legacy_serial.c -@@ -495,6 +495,55 @@ device_initcall(serial_dev_init); - - - #ifdef CONFIG_SERIAL_8250_CONSOLE -+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SERIAL_8250_CONSOLE) -+/* -+ * Handle the SysRq ^O Hack also via ttyS0 on POWER4 systems -+ * but only on the system console, see asm/serial.h -+ * If they run in FullSystemPartition mode, the firmware console comes in via ttyS0 -+ * But BREAK does not work via the HMC, to trigger sysrq. -+ * The same is required for Cell blades -+ */ -+int do_sysrq_via_ctrl_o; -+static const char __initdata *need_ctrl_o[] = { -+ "IBM,079", /* QS2x */ -+ "IBM,0792-32G", /* QS21 */ -+ "IBM,0793-2RZ", /* QS22 */ -+ "IBM,7040-681", /* p690 */ -+ "IBM,7040-671", /* p670 */ -+ "IBM,7039-651", /* p655 */ -+ "IBM,7038-6M2", /* p650 */ -+ "IBM,7028-6E4", /* p630 tower */ -+ "IBM,7028-6C4", /* p630 rack */ -+ "IBM,7029-6E3", /* p615 tower */ -+ "IBM,7029-6C3", /* p615 rack */ -+ NULL -+}; -+static void __init detect_need_for_ctrl_o(void) -+{ -+ struct device_node *root; -+ const char *model, *p; -+ int i; -+ -+ root = of_find_node_by_path("/"); -+ if (!root) -+ return; -+ model = of_get_property(root, "model", NULL); -+ if (model) { -+ i = 0; -+ while (need_ctrl_o[i]) { -+ p = need_ctrl_o[i]; -+ if (strncmp(p, model, strlen(p)) == 0) { -+ do_sysrq_via_ctrl_o = 1; -+ DBG("Enable sysrq via CTRL o on model %s\n", model); -+ break; -+ } -+ i++; -+ } -+ } -+ of_node_put(root); -+} -+#endif -+ - /* - * This is called very early, as part of console_init() (typically just after - * time_init()). This function is respondible for trying to find a good -@@ -563,6 +612,9 @@ static int __init check_legacy_serial_co - if (i >= legacy_serial_count) - goto not_found; - -+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SERIAL_8250_CONSOLE) -+ detect_need_for_ctrl_o(); -+#endif - of_node_put(prom_stdout); - - DBG("Found serial console at ttyS%d\n", offset); ---- a/drivers/tty/serial/8250.c -+++ b/drivers/tty/serial/8250.c -@@ -102,6 +102,8 @@ static unsigned int skip_txen_test; /* f - #define CONFIG_SERIAL_MANY_PORTS 1 - #endif - -+#define arch_8250_sysrq_via_ctrl_o(a,b) 0 -+ - /* - * HUB6 is always on. This will be removed once the header - * files have been cleaned. -@@ -1423,7 +1425,11 @@ receive_chars(struct uart_8250_port *up, - - do { - if (likely(lsr & UART_LSR_DR)) -+ { - ch = serial_inp(up, UART_RX); -+ if (arch_8250_sysrq_via_ctrl_o(ch, &up->port)) -+ goto ignore_char; -+ } - else - /* - * Intel 82571 has a Serial Over Lan device that will diff --git a/patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch b/patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch deleted file mode 100644 index c9484a4..0000000 --- a/patches.suse/Cleanup-and-make-boot-splash-work-with-KMS.patch +++ /dev/null @@ -1,1500 +0,0 @@ -From 1319de907e12d28894d8db0b3215a0443ff4bd5d Mon Sep 17 00:00:00 2001 -From: Egbert Eich -Date: Thu, 22 Oct 2009 13:32:11 +0200 -Subject: [PATCH] Cleanup and make boot splash work with KMS -References: bnc#544645 -Patch-mainline: not yet - - - Fix API: remove unneeded function argument - - Remove unneeded function splash_putc(). - - Remove need for 2 framebuffer basis: when swiching from silent to - verbose rely on update_region() to redraw the verbose picture. - This removes the need to double the framebuffer size when using - splash. - - Use worker to switch to verbose mode. - - Add support for 24 and 32bpp (24bpp still disabled due to lack of testing). - -Acked-by: Michal Marek ---- - drivers/tty/vt/vt.c | 10 - drivers/video/bootsplash/bootsplash.c | 407 +++++++++++++++++++++--------- - drivers/video/bootsplash/bootsplash.h | 23 - - drivers/video/bootsplash/decode-jpg.c | 50 +++ - drivers/video/bootsplash/render.c | 451 +++++++++++++++++++++------------- - drivers/video/console/bitblit.c | 12 - drivers/video/console/fbcon.c | 4 - drivers/video/vesafb.c | 8 - include/linux/fb.h | 3 - 9 files changed, 652 insertions(+), 316 deletions(-) - ---- a/drivers/tty/vt/vt.c -+++ b/drivers/tty/vt/vt.c -@@ -4216,7 +4216,7 @@ void vcs_scr_updated(struct vc_data *vc) - #ifdef CONFIG_BOOTSPLASH - void con_remap_def_color(struct vc_data *vc, int new_color) - { -- unsigned short *sbuf = vc->vc_screenbuf; -+ unsigned short *sbuf = screenpos(vc, 0, 1); - unsigned c, len = vc->vc_screenbuf_size >> 1; - int old_color; - -@@ -4224,11 +4224,13 @@ void con_remap_def_color(struct vc_data - old_color = vc->vc_def_color << 8; - new_color <<= 8; - while(len--) { -- c = *sbuf; -+ c = scr_readw(sbuf); - if (((c ^ old_color) & 0xf000) == 0) -- *sbuf ^= (old_color ^ new_color) & 0xf000; -+ scr_writew(c ^ ((old_color ^ new_color) & 0xf000), sbuf); -+ *sbuf ^= (old_color ^ new_color) & 0xf000; - if (((c ^ old_color) & 0x0f00) == 0) -- *sbuf ^= (old_color ^ new_color) & 0x0f00; -+ scr_writew(c ^ ((old_color ^ new_color) & 0x0f00), sbuf); -+ *sbuf ^= (old_color ^ new_color) & 0x0f00; - sbuf++; - } - new_color >>= 8; ---- a/drivers/video/bootsplash/bootsplash.c -+++ b/drivers/video/bootsplash/bootsplash.c -@@ -19,6 +19,8 @@ - #include - #include - #include -+#include -+#include - - #include - #include -@@ -27,7 +29,12 @@ - #include "bootsplash.h" - #include "decode-jpg.h" - --extern struct fb_ops vesafb_ops; -+#ifndef DEBUG -+# define SPLASH_DEBUG(fmt, args...) -+#else -+# define SPLASH_DEBUG(fmt, args...) \ -+ printk(KERN_WARNING "%s: " fmt "\n",__FUNCTION__, ##args) -+#endif - extern signed char con2fb_map[MAX_NR_CONSOLES]; - - #define SPLASH_VERSION "3.1.6-2004/03/31" -@@ -113,18 +120,20 @@ static int boxextract(unsigned char *buf - return 12; - } - --static void boxit(unsigned char *pic, int bytes, unsigned char *buf, int num, int percent, int overpaint) -+static void boxit(unsigned char *pic, int bytes, unsigned char *buf, int num, int percent, int overpaint, int octpp) - { -- int x, y, i, p, doblend, r, g, b, a, add; -+ int x, y, p, doblend, r, g, b, a, add; -+ unsigned int i = 0; - unsigned short data1[4]; - unsigned char cols1[16]; - unsigned short data2[4]; - unsigned char cols2[16]; - unsigned char *bufend; -- unsigned short *picp; -+ union pt picp; - unsigned int stipple[32], sti, stin, stinn, stixs, stixe, stiys, stiye; - int xs, xe, ys, ye, xo, yo; - -+ SPLASH_DEBUG(); - if (num == 0) - return; - bufend = buf + num * 12; -@@ -236,11 +245,21 @@ static void boxit(unsigned char *pic, in - } - add = (xs & 1); - add ^= (add ^ y) & 1 ? 1 : 3; /* 2x2 ordered dithering */ -- picp = (unsigned short *)(pic + xs * 2 + y * bytes); -+ picp.ub = (pic + xs * octpp + y * bytes); - for (x = xs; x <= xe; x++) { - if (!(sti & 0x80000000)) { - sti <<= 1; -- picp++; -+ switch (octpp) { -+ case 2: -+ picp.us++; -+ break; -+ case 3: -+ picp.ub += 3; -+ break; -+ case 4: -+ picp.ul++; -+ break; -+ } - add ^= 3; - continue; - } -@@ -255,18 +274,37 @@ static void boxit(unsigned char *pic, in - r = cols2[0]; - g = cols2[1]; - b = cols2[2]; -- if (a != 255) { -- i = *picp; -- r = ((i >> 8 & 0xf8) * (255 - a) + r * a) / 255; -- g = ((i >> 3 & 0xfc) * (255 - a) + g * a) / 255; -- b = ((i << 3 & 0xf8) * (255 - a) + b * a) / 255; -- } -- #define CLAMP(x) ((x) >= 256 ? 255 : (x)) -- i = ((CLAMP(r + add*2+1) & 0xf8) << 8) | -- ((CLAMP(g + add ) & 0xfc) << 3) | -- ((CLAMP(b + add*2+1) ) >> 3); -- *picp++ = i; -- add ^= 3; -+#define CLAMP(x) ((x) >= 256 ? 255 : (x)) -+ switch (octpp) { -+ case 2: -+ i = *picp.us; -+ if (a != 255) { -+ r = ((i >> 8 & 0xf8) * (255 - a) + r * a) / 255; -+ g = ((i >> 3 & 0xfc) * (255 - a) + g * a) / 255; -+ b = ((i << 3 & 0xf8) * (255 - a) + b * a) / 255; -+ } -+ i = ((CLAMP(r + add*2+1) & 0xf8) << 8) | -+ ((CLAMP(g + add ) & 0xfc) << 3) | -+ ((CLAMP(b + add*2+1) ) >> 3); -+ *(picp.us++) = i; -+ break; -+ case 3: -+ *(picp.ub++) = CLAMP(a == 255 ? r : (((i & 0xff) * (255 - a) + r * a) / 255)); -+ *(picp.ub++) = CLAMP(a == 255 ? r : (((i >> 8 & 0xff) * (255 - a) + r * a) / 255)); -+ *(picp.ub++) = CLAMP(a == 255 ? r : (((i >> 16 & 0xff) * (255 - a) + r * a) / 255)); -+ break; -+ case 4: -+ i = *picp.ul; -+ if (a != 255) { -+ r = ((i >> 16 & 0xff) * (255 - a) + r * a) / 255; -+ g = ((i >> 8 & 0xff) * (255 - a) + r * a) / 255; -+ b = ((i & 0xff) * (255 - a) + r * a) / 255; -+ } -+ i = ((CLAMP(r) << 16) | (CLAMP(g) << 8) | (CLAMP(b))); -+ *(picp.ul++) = i; -+ break; -+ } -+ add ^= 3; - } - } - } -@@ -293,16 +331,14 @@ static int splash_check_jpeg(unsigned ch - - static void splash_free(struct vc_data *vc, struct fb_info *info) - { -- if (!vc->vc_splash_data) -- return; -- if (info->silent_screen_base) -- info->screen_base = info->silent_screen_base; -- info->silent_screen_base = 0; -- if (vc->vc_splash_data->splash_silentjpeg) -- vfree(vc->vc_splash_data->splash_sboxes); -- vfree(vc->vc_splash_data); -- vc->vc_splash_data = 0; -- info->splash_data = 0; -+ SPLASH_DEBUG(); -+ if (!vc->vc_splash_data) -+ return; -+ if (vc->vc_splash_data->splash_silentjpeg) -+ vfree(vc->vc_splash_data->splash_sboxes); -+ vfree(vc->vc_splash_data); -+ vc->vc_splash_data = 0; -+ info->splash_data = 0; - } - - static int splash_mkpenguin(struct splash_data *data, int pxo, int pyo, int pwi, int phe, int pr, int pg, int pb) -@@ -590,37 +626,69 @@ static int splash_getraw(unsigned char * - return -1; - } - --int splash_verbose(void) -+int splash_do_verbose(void) - { -- struct vc_data *vc; -- struct fb_info *info; -+ struct vc_data *vc; -+ struct fb_info *info; -+ int ret = 0; - -- if (!splash_usesilent) -- return 0; -+ SPLASH_DEBUG(); -+ if (!oops_in_progress) -+ console_lock(); - -- vc = vc_cons[0].d; -+ if (!splash_usesilent) -+ goto done; - -- if (!vc || !vc->vc_splash_data || !vc->vc_splash_data->splash_state) -- return 0; -- if (fg_console != vc->vc_num) -- return 0; -- if (!vc->vc_splash_data->splash_silentjpeg || !vc->vc_splash_data->splash_dosilent) -- return 0; -- vc->vc_splash_data->splash_dosilent = 0; -- info = registered_fb[(int)con2fb_map[0]]; -- if (!info->silent_screen_base) -+ vc = vc_cons[0].d; -+ -+ if (!vc || !vc->vc_splash_data || !vc->vc_splash_data->splash_state) -+ goto done; -+ if (fg_console != vc->vc_num) -+ goto done; -+ if (!vc->vc_splash_data->splash_silentjpeg) -+ goto done; -+ -+ if(!vc->vc_splash_data->splash_dosilent) -+ goto done; -+ vc->vc_splash_data->splash_dosilent = 0; -+ -+ info = registered_fb[(int)con2fb_map[0]]; -+ -+ if (!info->splash_data) -+ goto done; -+ -+ update_region(vc, -+ vc->vc_origin + vc->vc_size_row * vc->vc_top, -+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2); -+ splash_clear_margins(vc, info, 0); -+ ret = 0; -+ -+ done: -+ if (!oops_in_progress) -+ console_unlock(); -+ -+ return ret; -+} -+ -+static void splash_verbose_callback(struct work_struct *ignored) -+{ -+ splash_do_verbose(); -+} -+ -+static DECLARE_WORK(splash_work, splash_verbose_callback); -+ -+int splash_verbose(void) -+{ -+ if (!oops_in_progress) -+ schedule_work(&splash_work); -+ else -+ return splash_do_verbose(); - return 0; -- splashcopy(info->silent_screen_base, info->screen_base, info->var.yres, info->var.xres, info->fix.line_length, info->fix.line_length); -- info->screen_base = info->silent_screen_base; -- info->silent_screen_base = 0; -- return 1; - } - - static void splash_off(struct fb_info *info) - { -- if (info->silent_screen_base) -- info->screen_base = info->silent_screen_base; -- info->silent_screen_base = 0; -+ SPLASH_DEBUG(); - info->splash_data = 0; - if (info->splash_pic) - vfree(info->splash_pic); -@@ -631,8 +699,9 @@ static void splash_off(struct fb_info *i - int splash_prepare(struct vc_data *vc, struct fb_info *info) - { - int err; -- int width, height, depth, size, sbytes; -+ int width, height, depth, octpp, size, sbytes; - -+ SPLASH_DEBUG("vc_num: %i", vc->vc_num); - if (!vc->vc_splash_data || !vc->vc_splash_data->splash_state) { - if (decdata) - vfree(decdata); -@@ -644,15 +713,19 @@ int splash_prepare(struct vc_data *vc, s - width = info->var.xres; - height = info->var.yres; - depth = info->var.bits_per_pixel; -- if (depth != 16) { /* Other targets might need fixing */ -+ octpp = (depth + 1) >> 3; -+ -+ if (depth == 24 || depth < 15) { /* Other targets might need fixing */ - splash_off(info); - return -2; - } - -- sbytes = ((width + 15) & ~15) * (depth >> 3); -+ sbytes = ((width + 15) & ~15) * octpp; - size = sbytes * ((height + 15) & ~15); -- if (size != info->splash_pic_size) -- splash_off(info); -+ if (size != info->splash_pic_size) { -+ vfree(info->splash_pic); -+ info->splash_pic = NULL; -+ } - if (!info->splash_pic) - info->splash_pic = vmalloc(size); - -@@ -668,38 +741,52 @@ int splash_prepare(struct vc_data *vc, s - if (vc->vc_splash_data->splash_silentjpeg && vc->vc_splash_data->splash_dosilent) { - /* fill area after framebuffer with other jpeg */ - if ((err = jpeg_decode(vc->vc_splash_data->splash_silentjpeg, info->splash_pic, -- ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) { -- printk(KERN_INFO "bootsplash: error while decompressing silent picture: %s (%d)\n", jpg_errors[err - 1], err); -- if (info->silent_screen_base) -- info->screen_base = info->silent_screen_base; -+ ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) { -+ printk(KERN_INFO "bootsplash: error while decompressing silent picture: %s (%d)\n", -+ jpg_errors[err - 1], err); - vc->vc_splash_data->splash_dosilent = 0; - } else { - if (vc->vc_splash_data->splash_sboxcount) -- boxit(info->splash_pic, sbytes, vc->vc_splash_data->splash_sboxes, -- vc->vc_splash_data->splash_sboxcount, vc->vc_splash_data->splash_percent, 0); -- -- if (!info->silent_screen_base) -- info->silent_screen_base = info->screen_base; -- splashcopy(info->silent_screen_base, info->splash_pic, info->var.yres, info->var.xres, info->fix.line_length, sbytes); -- info->screen_base = info->silent_screen_base + info->fix.line_length * info->var.yres; -+ boxit(info->splash_pic, -+ sbytes, -+ vc->vc_splash_data->splash_sboxes, -+ vc->vc_splash_data->splash_sboxcount, -+ vc->vc_splash_data->splash_percent, -+ 0, -+ octpp); -+ splashcopy(info->screen_base, -+ info->splash_pic, -+ info->var.yres, -+ info->var.xres, -+ info->fix.line_length, sbytes, -+ octpp ); - } -- } else if (info->silent_screen_base) -- info->screen_base = info->silent_screen_base; -+ } else -+ vc->vc_splash_data->splash_dosilent = 0; - - if ((err = jpeg_decode(vc->vc_splash_data->splash_jpeg, info->splash_pic, - ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) { -- printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d) .\n", jpg_errors[err - 1], err); -+ printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d) .\n", -+ jpg_errors[err - 1], err); - splash_off(info); - return -4; - } - info->splash_pic_size = size; -- info->splash_bytes = sbytes; -+ info->splash_pic_stride = sbytes; - if (vc->vc_splash_data->splash_boxcount) -- boxit(info->splash_pic, sbytes, vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount, vc->vc_splash_data->splash_percent, 0); -+ boxit(info->splash_pic, -+ sbytes, -+ vc->vc_splash_data->splash_boxes, -+ vc->vc_splash_data->splash_boxcount, -+ vc->vc_splash_data->splash_percent, -+ 0, -+ octpp); - if (vc->vc_splash_data->splash_state) - info->splash_data = vc->vc_splash_data; -- else -+ else { - splash_off(info); -+ return -5; -+ } - return 0; - } - -@@ -720,6 +807,7 @@ static struct proc_dir_entry *proc_splas - - static int splash_recolor(struct vc_data *vc) - { -+ SPLASH_DEBUG(); - if (!vc->vc_splash_data) - return -1; - if (!vc->vc_splash_data->splash_state) -@@ -736,20 +824,27 @@ static int splash_recolor(struct vc_data - static int splash_status(struct vc_data *vc) - { - struct fb_info *info; -- printk(KERN_INFO "bootsplash: status on console %d changed to %s\n", vc->vc_num, vc->vc_splash_data && vc->vc_splash_data->splash_state ? "on" : "off"); -+ SPLASH_DEBUG("vc_num: %i",vc->vc_num); -+ printk(KERN_INFO "bootsplash: status on console %d changed to %s\n", -+ vc->vc_num, -+ vc->vc_splash_data && vc->vc_splash_data->splash_state ? "on" : "off"); - - info = registered_fb[(int) con2fb_map[vc->vc_num]]; -+ - if (fg_console == vc->vc_num) - splash_prepare(vc, info); - if (vc->vc_splash_data && vc->vc_splash_data->splash_state) { -- con_remap_def_color(vc, vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color); -- /* vc_resize also calls con_switch which resets yscroll */ -- vc_resize(vc, vc->vc_splash_data->splash_text_wi / vc->vc_font.width, vc->vc_splash_data->splash_text_he / vc->vc_font.height); -- if (fg_console == vc->vc_num) { -- update_region(vc, -- vc->vc_origin + vc->vc_size_row * vc->vc_top, -- vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2); -- splash_clear_margins(vc->vc_splash_data, vc, info, 0); -+ if (info->splash_data) { -+ con_remap_def_color(vc, info->splash_data->splash_color << 4 | info->splash_data->splash_fg_color); -+ /* vc_resize also calls con_switch which resets yscroll */ -+ vc_resize(vc, info->splash_data->splash_text_wi / vc->vc_font.width, -+ info->splash_data->splash_text_he / vc->vc_font.height); -+ if (fg_console == vc->vc_num) { -+ update_region(vc, -+ vc->vc_origin + vc->vc_size_row * vc->vc_top, -+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2); -+ splash_clear_margins(vc, info, 0); -+ } - } - } else { - /* Switch bootsplash off */ -@@ -787,6 +882,8 @@ void splash_set_percent(struct vc_data * - struct fbcon_ops *ops; - int oldpe; - -+ SPLASH_DEBUG(); -+ - if (pe < 0) - pe = 0; - if (pe > 65535) -@@ -805,15 +902,38 @@ void splash_set_percent(struct vc_data * - ops = info->fbcon_par; - if (ops->blank_state) - return; -- if (!vc->vc_splash_data->splash_overpaintok || pe == 65536 || pe < oldpe) { -- if (splash_hasinter(vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount)) -- splash_status(vc); -- else -- splash_prepare(vc, info); -+ if (!vc->vc_splash_data->splash_overpaintok -+ || pe == 65536 -+ || pe < oldpe) { -+ if (splash_hasinter(vc->vc_splash_data->splash_boxes, -+ vc->vc_splash_data->splash_boxcount)) { -+ splash_status(vc); -+ } -+ else -+ splash_prepare(vc, info); - } else { -- if (vc->vc_splash_data->splash_silentjpeg && vc->vc_splash_data->splash_dosilent && info->silent_screen_base) -- boxit(info->silent_screen_base, info->fix.line_length, vc->vc_splash_data->splash_sboxes, vc->vc_splash_data->splash_sboxcount, vc->vc_splash_data->splash_percent, 1); -- boxit(info->screen_base, info->fix.line_length, vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount, vc->vc_splash_data->splash_percent, 1); -+ int octpp = (info->var.bits_per_pixel + 1) >> 3; -+ if (info->splash_data) { -+ if ( info->splash_data->splash_silentjpeg -+ && info->splash_data->splash_dosilent) -+ boxit(info->screen_base, -+ info->fix.line_length, -+ info->splash_data->splash_sboxes, -+ info->splash_data->splash_sboxcount, -+ info->splash_data->splash_percent, -+ 1, -+ octpp); -+#if 0 -+ if (!info->splash_dosilent) -+ boxit(info->screen_base, -+ info->fix.line_length, -+ info->splash_data->splash_boxes, -+ info->splash_data->splash_boxcount, -+ info->splash_data->splash_percent, -+ 1, -+ octpp); -+#endif -+ } - } - } - -@@ -823,6 +943,8 @@ static int splash_write_proc(struct file - int new, unit; - struct vc_data *vc; - -+ SPLASH_DEBUG(); -+ - if (!buffer || !splash_default) - return count; - -@@ -842,8 +964,10 @@ static int splash_write_proc(struct file - return count; - } - } -+ SPLASH_DEBUG(" unit: %i",unit); - vc = vc_cons[unit].d; - if (!strncmp(buffer, "redraw", 6)) { -+ SPLASH_DEBUG( " redraw"); - splash_status(vc); - console_unlock(); - return count; -@@ -851,6 +975,7 @@ static int splash_write_proc(struct file - if (!strncmp(buffer, "show", 4) || !strncmp(buffer, "hide", 4)) { - int pe; - -+ SPLASH_DEBUG( " show/hide"); - if (buffer[4] == ' ' && buffer[5] == 'p') - pe = 0; - else if (buffer[4] == '\n') -@@ -867,51 +992,77 @@ static int splash_write_proc(struct file - console_unlock(); - return count; - } -+ - if (!strncmp(buffer,"silent\n",7) || !strncmp(buffer,"verbose\n",8)) { -+ SPLASH_DEBUG( " silent/verbose"); - if (vc->vc_splash_data && vc->vc_splash_data->splash_silentjpeg) { -- if (vc->vc_splash_data->splash_dosilent != (buffer[0] == 's')) { -- vc->vc_splash_data->splash_dosilent = buffer[0] == 's'; -- splash_status(vc); -- } -+ if (vc->vc_splash_data->splash_dosilent != (buffer[0] == 's')) { -+ vc->vc_splash_data->splash_dosilent = buffer[0] == 's'; -+ splash_status(vc); -+ } - } - console_unlock(); - return count; - } - if (!strncmp(buffer,"freesilent\n",11)) { -+ SPLASH_DEBUG( " freesilent"); - if (vc->vc_splash_data && vc->vc_splash_data->splash_silentjpeg) { -- printk(KERN_INFO "bootsplash: freeing silent jpeg\n"); -- vc->vc_splash_data->splash_silentjpeg = 0; -- vfree(vc->vc_splash_data->splash_sboxes); -- vc->vc_splash_data->splash_sboxes = 0; -- vc->vc_splash_data->splash_sboxcount = 0; -- if (vc->vc_splash_data->splash_dosilent) -- splash_status(vc); -- vc->vc_splash_data->splash_dosilent = 0; -+ printk(KERN_INFO "bootsplash: freeing silent jpeg\n"); -+ vc->vc_splash_data->splash_silentjpeg = 0; -+ vfree(vc->vc_splash_data->splash_sboxes); -+ vc->vc_splash_data->splash_sboxes = 0; -+ vc->vc_splash_data->splash_sboxcount = 0; -+ if (vc->vc_splash_data->splash_dosilent) { -+ splash_status(vc); -+ } -+ vc->vc_splash_data->splash_dosilent = 0; - } - console_unlock(); - return count; - } -- - if (!strncmp(buffer, "BOOTSPL", 7)) { -- int up = -1; -- unit = splash_getraw((unsigned char *)buffer, (unsigned char *)buffer + count, &up); -- if (unit >= 0) { -- vc = vc_cons[unit].d; -- if (up == -1) -- splash_status(vc); -- else { -- struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]]; -- struct fbcon_ops *ops = info->fbcon_par; -- if (ops->blank_state) -- up = 0; -- if ((up & 2) != 0 && vc->vc_splash_data->splash_silentjpeg && vc->vc_splash_data->splash_dosilent && info->silent_screen_base) -- boxit(info->silent_screen_base, info->fix.line_length, vc->vc_splash_data->splash_sboxes, vc->vc_splash_data->splash_sboxcount, vc->vc_splash_data->splash_percent, 1); -- if ((up & 1) != 0) -- boxit(info->screen_base, info->fix.line_length, vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount, vc->vc_splash_data->splash_percent, 1); -+ int up = -1; -+ -+ SPLASH_DEBUG( " BOOTSPL"); -+ unit = splash_getraw((unsigned char *)buffer, -+ (unsigned char *)buffer + count, -+ &up); -+ SPLASH_DEBUG( " unit: %i up: %i",unit,up); -+ if (unit >= 0) { -+ struct fb_info *info; -+ -+ vc = vc_cons[unit].d; -+ info = registered_fb[(int) con2fb_map[vc->vc_num]]; -+ if (up == -1) { -+ splash_status(vc); -+ } else { -+ struct fbcon_ops *ops = info->fbcon_par; -+ int octpp = (info->var.bits_per_pixel + 1) >> 3; -+ if (ops->blank_state || !vc->vc_splash_data || !info->splash_data) -+ up = 0; -+ if ((up & 2) != 0 -+ && info->splash_data->splash_silentjpeg -+ && info->splash_data->splash_dosilent) { -+ boxit(info->screen_base, -+ info->fix.line_length, -+ info->splash_data->splash_sboxes, -+ info->splash_data->splash_sboxcount, -+ info->splash_data->splash_percent, -+ 1, -+ octpp); -+ } else if ((up & 1) != 0) { -+ boxit(info->screen_base, -+ info->fix.line_length, -+ info->splash_data->splash_boxes, -+ info->splash_data->splash_boxcount, -+ info->splash_data->splash_percent, -+ 1, -+ octpp); -+ } -+ } - } -- } -- console_unlock(); -- return count; -+ console_unlock(); -+ return count; - } - if (!vc->vc_splash_data) { - console_unlock(); -@@ -919,6 +1070,7 @@ static int splash_write_proc(struct file - } - if (buffer[0] == 't') { - vc->vc_splash_data->splash_state ^= 1; -+ SPLASH_DEBUG(" t"); - splash_status(vc); - console_unlock(); - return count; -@@ -959,6 +1111,8 @@ static int splash_proc_unregister(void) - # endif - #endif /* CONFIG_PROC_FS */ - -+#define INIT_CONSOLE 0 -+ - void splash_init(void) - { - struct fb_info *info; -@@ -971,9 +1125,12 @@ void splash_init(void) - - if (splash_registered) - return; -- vc = vc_cons[0].d; -- info = registered_fb[0]; -- if (!vc || !info || info->var.bits_per_pixel != 16) -+ vc = vc_cons[INIT_CONSOLE].d; -+ info = registered_fb[(int)con2fb_map[INIT_CONSOLE]]; -+ if (!vc -+ || !info -+ || info->var.bits_per_pixel == 24 /* not tested */ -+ || info->var.bits_per_pixel < 15) /* not supported */ - return; - #ifdef CONFIG_PROC_FS - splash_proc_register(); -@@ -1004,7 +1161,9 @@ void splash_init(void) - mem = vmalloc(len); - if (mem) { - console_lock(); -- if ((int)sys_read(fd, mem, len) == len && splash_getraw((unsigned char *)mem, (unsigned char *)mem + len, (int *)0) == 0 && vc->vc_splash_data) -+ if ((int)sys_read(fd, mem, len) == len -+ && splash_getraw((unsigned char *)mem, (unsigned char *)mem + len, (int *)0) == INIT_CONSOLE -+ && vc->vc_splash_data) - vc->vc_splash_data->splash_state = splash_default & 1; - console_unlock(); - vfree(mem); ---- a/drivers/video/bootsplash/bootsplash.h -+++ b/drivers/video/bootsplash/bootsplash.h -@@ -12,27 +12,30 @@ - #define __BOOTSPLASH_H - - struct fb_info; -+union pt { -+ u32 *ul; -+ u16 *us; -+ u8 *ub; -+}; - - /* splash.c */ - extern int splash_prepare(struct vc_data *, struct fb_info *); - extern void splash_init(void); - - /* splash_render.c */ --extern void splash_putcs(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+extern void splash_putcs(struct vc_data *vc, struct fb_info *info, - const unsigned short *s, int count, int ypos, int xpos); --extern void splash_putc(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -- int c, int ypos, int xpos); --extern void splashcopy(u8 *dst, u8 *src, int height, int width, int dstbytes, int srcbytes); --extern void splash_clear(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+extern void splashcopy(u8 *dst, u8 *src, int height, int width, int dstbytes, int srcbytes, int depth); -+extern void splash_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width); --extern void splash_bmove(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+extern void splash_bmove(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width); --extern void splash_clear_margins(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+extern void splash_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only); --extern int splash_cursor(struct splash_data *sd, struct fb_info *info, struct fb_cursor *cursor); --extern void splash_bmove_redraw(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+extern int splash_cursor(struct fb_info *info, struct fb_cursor *cursor); -+extern void splash_bmove_redraw(struct vc_data *vc, struct fb_info *info, - int y, int sx, int dx, int width); --extern void splash_blank(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+extern void splash_blank(struct vc_data *vc, struct fb_info *info, - int blank); - - /* vt.c */ ---- a/drivers/video/bootsplash/decode-jpg.c -+++ b/drivers/video/bootsplash/decode-jpg.c -@@ -86,6 +86,7 @@ static void initcol __P((PREC[][64])); - - static void col221111 __P((int *, unsigned char *, int)); - static void col221111_16 __P((int *, unsigned char *, int)); -+static void col221111_32 __P((int *, unsigned char *, int)); - - /*********************************/ - -@@ -369,6 +370,9 @@ struct jpeg_decdata *decdata; - idct(decdata->dcts + 320, decdata->out + 320, decdata->dquant[2], IFIX(0.5), max[5]); - - switch (depth) { -+ case 32: -+ col221111_32(decdata->out, pic + (my * 16 * mcusx + mx) * 16 * 4, mcusx * 16 * 4); -+ break; - case 24: - col221111(decdata->out, pic + (my * 16 * mcusx + mx) * 16 * 3, mcusx * 16 * 3); - break; -@@ -882,6 +886,15 @@ PREC q[][64]; - #endif - #endif - -+#define PIC_32(yin, xin, p, xout) \ -+( \ -+ y = outy[(yin) * 8 + xin], \ -+ STORECLAMP(p[(xout) * 4 + 0], y + cr), \ -+ STORECLAMP(p[(xout) * 4 + 1], y - cg), \ -+ STORECLAMP(p[(xout) * 4 + 2], y + cb), \ -+ p[(xout) * 4 + 3] = 0 \ -+) -+ - #define PIC221111(xin) \ - ( \ - CBCRCG(0, xin), \ -@@ -900,6 +913,15 @@ PREC q[][64]; - PIC_16(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1, 2) \ - ) - -+#define PIC221111_32(xin) \ -+( \ -+ CBCRCG(0, xin), \ -+ PIC_32(xin / 4 * 8 + 0, (xin & 3) * 2 + 0, pic0, xin * 2 + 0),\ -+ PIC_32(xin / 4 * 8 + 0, (xin & 3) * 2 + 1, pic0, xin * 2 + 1),\ -+ PIC_32(xin / 4 * 8 + 1, (xin & 3) * 2 + 0, pic1, xin * 2 + 0),\ -+ PIC_32(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1) \ -+) -+ - static void col221111(out, pic, width) - int *out; - unsigned char *pic; -@@ -949,6 +971,34 @@ int width; - } - outc += 8; - outy += 16; -+ pic0 += 2 * width; -+ pic1 += 2 * width; -+ } -+ outy += 64 * 2 - 16 * 4; -+ } -+} -+ -+static void col221111_32(out, pic, width) -+int *out; -+unsigned char *pic; -+int width; -+{ -+ int i, j, k; -+ unsigned char *pic0, *pic1; -+ int *outy, *outc; -+ int cr, cg, cb, y; -+ -+ pic0 = pic; -+ pic1 = pic + width; -+ outy = out; -+ outc = out + 64 * 4; -+ for (i = 2; i > 0; i--) { -+ for (j = 4; j > 0; j--) { -+ for (k = 0; k < 8; k++) { -+ PIC221111_32(k); -+ } -+ outc += 8; -+ outy += 16; - pic0 += 2 * width; - pic1 += 2 * width; - } ---- a/drivers/video/bootsplash/render.c -+++ b/drivers/video/bootsplash/render.c -@@ -13,82 +13,131 @@ - #include "../console/fbcon.h" - #include "bootsplash.h" - --void splash_putcs(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -- const unsigned short *s, int count, int ypos, int xpos) --{ -- unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; -- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; -- int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; -- u8 *src; -- u8 *dst, *splashsrc; -- unsigned int d, x, y; -- u32 dd, fgx, bgx; -- u16 c = scr_readw(s); -+#ifndef DEBUG -+# define SPLASH_DEBUG(fmt, args...) -+#else -+# define SPLASH_DEBUG(fmt, args...) \ -+ printk(KERN_WARNING "%s: " fmt "\n",__FUNCTION__, ##args) -+#endif - -- int fg_color, bg_color, transparent; -- if (console_blanked) -- return; -- fg_color = attr_fgcol(fgshift, c); -- bg_color = attr_bgcol(bgshift, c); -- transparent = sd->splash_color == bg_color; -- xpos = xpos * vc->vc_font.width + sd->splash_text_xo; -- ypos = ypos * vc->vc_font.height + sd->splash_text_yo; -- splashsrc = (u8 *)(info->splash_pic + ypos * info->splash_bytes + xpos * 2); -- dst = (u8 *)(info->screen_base + ypos * info->fix.line_length + xpos * 2); -- -- fgx = ((u32 *)info->pseudo_palette)[fg_color]; -- if (transparent && sd->splash_color == 15) { -- if (fgx == 0xffea) -- fgx = 0xfe4a; -- else if (fgx == 0x57ea) -- fgx = 0x0540; -- else if (fgx == 0xffff) -- fgx = 0x52aa; -- } -- bgx = ((u32 *)info->pseudo_palette)[bg_color]; -- d = 0; -- -- while (count--) { -- c = scr_readw(s++); -- src = vc->vc_font.data + (c & charmask) * vc->vc_font.height * ((vc->vc_font.width + 7) >> 3); -- -- for (y = 0; y < vc->vc_font.height; y++) { -- for (x = 0; x < vc->vc_font.width; x += 2) { -- if ((x & 7) == 0) -- d = *src++; -- if (d & 0x80) -- dd = fgx; -- else -- dd = transparent ? *(u16 *)splashsrc : bgx; -- splashsrc += 2; -- if (d & 0x40) -- dd |= fgx << 16; -- else -- dd |= (transparent ? *(u16 *)splashsrc : bgx) << 16; -- splashsrc += 2; -- d <<= 2; -- fb_writel(dd, dst); -- dst += 4; -- } -- dst += info->fix.line_length - vc->vc_font.width * 2; -- splashsrc += info->splash_bytes - vc->vc_font.width * 2; -- } -- dst -= info->fix.line_length * vc->vc_font.height - vc->vc_font.width * 2; -- splashsrc -= info->splash_bytes * vc->vc_font.height - vc->vc_font.width * 2; -- } --} -- --static void splash_renderc(struct splash_data *sd, struct fb_info *info, int fg_color, int bg_color, u8 *src, int ypos, int xpos, int height, int width) -+void splash_putcs(struct vc_data *vc, struct fb_info *info, -+ const unsigned short *s, int count, int ypos, int xpos) -+{ -+ struct splash_data *sd; -+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; -+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; -+ int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; -+ union pt src; -+ union pt dst, splashsrc; -+ unsigned int d, x, y; -+ u32 dd, fgx, bgx; -+ u16 c = scr_readw(s); -+ int fg_color, bg_color, transparent; -+ int n; -+ int octpp = (info->var.bits_per_pixel + 1) >> 3; -+ -+ if (!oops_in_progress && (console_blanked || info->splash_data->splash_dosilent)) -+ return; -+ sd = info->splash_data; -+ -+ fg_color = attr_fgcol(fgshift, c); -+ bg_color = attr_bgcol(bgshift, c); -+ transparent = sd->splash_color == bg_color; -+ xpos = xpos * vc->vc_font.width + sd->splash_text_xo; -+ ypos = ypos * vc->vc_font.height + sd->splash_text_yo; -+ splashsrc.ub = (u8 *)(info->splash_pic + ypos * info->splash_pic_stride + xpos * octpp); -+ dst.ub = (u8 *)(info->screen_base + ypos * info->fix.line_length + xpos * octpp); -+ fgx = ((u32 *)info->pseudo_palette)[fg_color]; -+ if (transparent && sd->splash_color == 15) { -+ if (fgx == 0xffea) -+ fgx = 0xfe4a; -+ else if (fgx == 0x57ea) -+ fgx = 0x0540; -+ else if (fgx == 0xffff) -+ fgx = 0x52aa; -+ } -+ bgx = ((u32 *)info->pseudo_palette)[bg_color]; -+ d = 0; -+ while (count--) { -+ c = scr_readw(s++); -+ src.ub = vc->vc_font.data + (c & charmask) * vc->vc_font.height * ((vc->vc_font.width + 7) >> 3); -+ for (y = 0; y < vc->vc_font.height; y++) { -+ for (x = 0; x < vc->vc_font.width; ) { -+ if ((x & 7) == 0) -+ d = *src.ub++; -+ switch (octpp) { -+ case 2: -+ if (d & 0x80) -+ dd = fgx; -+ else -+ dd = transparent ? *splashsrc.us : bgx; -+ splashsrc.us += 1; -+ if (d & 0x40) -+ dd |= fgx << 16; -+ else -+ dd |= (transparent ? *splashsrc.us : bgx) << 16; -+ splashsrc.us += 1; -+ d <<= 2; -+ x += 2; -+ fb_writel(dd, dst.ul); -+ dst.ul += 1; -+ break; -+ case 3: -+ for (n = 0; n <= 16; n += 8) { -+ if (d & 0x80) -+ dd = (fgx >> n) && 0xff; -+ else -+ dd = (transparent ? *splashsrc.ul : ((bgx >> n) & 0xff) ); -+ splashsrc.ub += 1; -+ fb_writeb(dd, dst.ub); -+ dst.ub += 1; -+ } -+ d <<= 1; -+ x += 1; -+ break; -+ case 4: -+ if (d & 0x80) -+ dd = fgx; -+ else -+ dd = (transparent ? *splashsrc.ul : bgx); -+ splashsrc.ul += 1; -+ d <<= 1; -+ x += 1; -+ fb_writel(dd, dst.ul); -+ dst.ul += 1; -+ break; -+ } -+ } -+ dst.ub += info->fix.line_length - vc->vc_font.width * octpp; -+ splashsrc.ub += info->splash_pic_stride - vc->vc_font.width * octpp; -+ } -+ dst.ub -= info->fix.line_length * vc->vc_font.height - vc->vc_font.width * octpp; -+ splashsrc.ub -= info->splash_pic_stride * vc->vc_font.height - vc->vc_font.width * octpp; -+ } -+} -+ -+static void splash_renderc(struct fb_info *info, -+ int fg_color, int bg_color, -+ u8 *src, -+ int ypos, int xpos, -+ int height, int width) - { -- int transparent = sd->splash_color == bg_color; -+ struct splash_data *sd; -+ int transparent; - u32 dd, fgx, bgx; -- u8 *dst, *splashsrc; -+ union pt dst, splashsrc; - unsigned int d, x, y; -+ int n; -+ int octpp = (info->var.bits_per_pixel + 1) >> 3; - -- if (console_blanked) -- return; -- splashsrc = (u8 *)(info->splash_pic + ypos * info->splash_bytes + xpos * 2); -- dst = (u8 *)(info->screen_base + ypos * info->fix.line_length + xpos * 2); -+ if (!oops_in_progress && (console_blanked || info->splash_data->splash_dosilent)) -+ return; -+ -+ sd = info->splash_data; -+ -+ transparent = sd->splash_color == bg_color; -+ splashsrc.ub = (u8*)(info->splash_pic + ypos * info->splash_pic_stride + xpos * octpp); -+ dst.ub = (u8*)(info->screen_base + ypos * info->fix.line_length + xpos * octpp); - fgx = ((u32 *)info->pseudo_palette)[fg_color]; - if (transparent && sd->splash_color == 15) { - if (fgx == 0xffea) -@@ -101,93 +150,136 @@ static void splash_renderc(struct splash - bgx = ((u32 *)info->pseudo_palette)[bg_color]; - d = 0; - for (y = 0; y < height; y++) { -- for (x = 0; x < width; x += 2) { -- if ((x & 7) == 0) -- d = *src++; -- if (d & 0x80) -- dd = fgx; -- else -- dd = transparent ? *(u16 *)splashsrc : bgx; -- splashsrc += 2; -- if (d & 0x40) -- dd |= fgx << 16; -- else -- dd |= (transparent ? *(u16 *)splashsrc : bgx) << 16; -- splashsrc += 2; -- d <<= 2; -- fb_writel(dd, dst); -- dst += 4; -- } -- dst += info->fix.line_length - width * 2; -- splashsrc += info->splash_bytes - width * 2; -+ for (x = 0; x < width; ) { -+ if ((x & 7) == 0) -+ d = *src++; -+ switch (octpp) { -+ case 2: -+ if (d & 0x80) -+ dd = fgx; -+ else -+ dd = transparent ? *splashsrc.us : bgx; -+ splashsrc.us += 1; -+ if (d & 0x40) -+ dd |= fgx << 16; -+ else -+ dd |= (transparent ? *splashsrc.us : bgx) << 16; -+ splashsrc.us += 1; -+ d <<= 2; -+ x += 2; -+ fb_writel(dd, dst.ul); -+ dst.ul += 1; -+ break; -+ case 3: -+ for (n = 0; n <= 16; n += 8) { -+ if (d & 0x80) -+ dd = (fgx >> n) & 0xff; -+ else -+ dd = transparent ? *splashsrc.ub : bgx; -+ splashsrc.ub += 1; -+ fb_writeb(dd, dst.ub); -+ dst.ub += 1; -+ } -+ d <<= 1; -+ x += 1; -+ break; -+ case 4: -+ if (d & 0x80) -+ dd = fgx; -+ else -+ dd = transparent ? *splashsrc.ul : bgx; -+ splashsrc.ul += 1; -+ d <<= 1; -+ x += 1; -+ fb_writel(dd, dst.ul); -+ dst.ul += 1; -+ break; -+ } -+ } -+ dst.ub += info->fix.line_length - width * octpp; -+ splashsrc.ub += info->splash_pic_stride - width * octpp; - } - } - --void splash_putc(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -- int c, int ypos, int xpos) --{ -- unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; -- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; -- int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; -- u8 *src = vc->vc_font.data + (c & charmask) * vc->vc_font.height * ((vc->vc_font.width + 7) >> 3); -- xpos = xpos * vc->vc_font.width + sd->splash_text_xo; -- ypos = ypos * vc->vc_font.height + sd->splash_text_yo; -- splash_renderc(sd, info, attr_fgcol(fgshift, c), attr_bgcol(bgshift, c), src, ypos, xpos, vc->vc_font.height, vc->vc_font.width); --} -- --void splashcopy(u8 *dst, u8 *src, int height, int width, int dstbytes, int srcbytes) -+void splashcopy(u8 *dst, u8 *src, int height, int width, int dstbytes, int srcbytes, int octpp) - { - int i; - -+ width *= octpp; - while (height-- > 0) { -- u32 *p = (u32 *)dst; -- u32 *q = (u32 *)src; -- for (i=0; i < width/4; i++) { -- fb_writel(*q++,p++); -- fb_writel(*q++,p++); -- } -- if (width & 2) -- fb_writel(*q++,p++); -- if (width & 1) -- fb_writew(*(u16*)q,(u16*)p); -- dst += dstbytes; -- src += srcbytes; -+ union pt p, q; -+ p.ul = (u32 *)dst; -+ q.ul = (u32 *)src; -+ for (i=0; i < width/8; i++) { -+ fb_writel(*q.ul++,p.ul++); -+ fb_writel(*q.ul++,p.ul++); -+ } -+ if (width & 4) -+ fb_writel(*q.ul++,p.ul++); -+ if (width & 2) -+ fb_writew(*q.us++,p.us++); -+ if (width & 1) -+ fb_writeb(*q.ub,p.ub); -+ dst += dstbytes; -+ src += srcbytes; - } - } - --static void splashset(u8 *dst, int height, int width, int dstbytes, u32 bgx) { -+static void splashset(u8 *dst, int height, int width, int dstbytes, u32 bgx, int octpp) { - int i; - -- bgx |= bgx << 16; -+ width *= octpp; -+ if (octpp == 2) -+ bgx |= bgx << 16; - while (height-- > 0) { -- u32 *p = (u32 *)dst; -- for (i=0; i < width/4; i++) { -- fb_writel(bgx,p++); -- fb_writel(bgx,p++); -- } -- if (width & 2) -- fb_writel(bgx,p++); -- if (width & 1) -- fb_writew(bgx,(u16*)p); -- dst += dstbytes; -+ union pt p; -+ p.ul = (u32 *)dst; -+ if (octpp != 3) { -+ for (i=0; i < width/8; i++) { -+ fb_writel(bgx,p.ul++); -+ fb_writel(bgx,p.ul++); -+ } -+ if (width & 4) -+ fb_writel(bgx,p.ul++); -+ if (width & 2) -+ fb_writew(bgx,p.us++); -+ if (width & 1) -+ fb_writeb(bgx,p.ub); -+ dst += dstbytes; -+ } else { /* slow! */ -+ for (i=0; i < width; i++) -+ fb_writeb((bgx >> ((i & 0x3) * 8)) && 0xff,p.ub++); -+ } - } - } - - static void splashfill(struct fb_info *info, int sy, int sx, int height, int width) { -- splashcopy((u8 *)(info->screen_base + sy * info->fix.line_length + sx * 2), (u8 *)(info->splash_pic + sy * info->splash_bytes + sx * 2), height, width, info->fix.line_length, info->splash_bytes); -+ int octpp = (info->var.bits_per_pixel + 1) >> 3; -+ -+ splashcopy((u8 *)(info->screen_base + sy * info->fix.line_length + sx * octpp), -+ (u8 *)(info->splash_pic + sy * info->splash_pic_stride + sx * octpp), -+ height, width, info->fix.line_length, info->splash_pic_stride, -+ octpp); - } - --void splash_clear(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+void splash_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) - { -+ struct splash_data *sd; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - int bg_color = attr_bgcol_ec(bgshift, vc, info); -- int transparent = sd->splash_color == bg_color; -+ int transparent; -+ int octpp = (info->var.bits_per_pixel + 1) >> 3; - u32 bgx; - u8 *dst; - -- if (console_blanked) -- return; -+ if (!oops_in_progress && (console_blanked || info->splash_data->splash_dosilent)) -+ return; -+ -+ sd = info->splash_data; -+ -+ transparent = sd->splash_color == bg_color; -+ - sy = sy * vc->vc_font.height + sd->splash_text_yo; - sx = sx * vc->vc_font.width + sd->splash_text_xo; - height *= vc->vc_font.height; -@@ -196,18 +288,26 @@ void splash_clear(struct splash_data *sd - splashfill(info, sy, sx, height, width); - return; - } -- dst = (u8 *)(info->screen_base + sy * info->fix.line_length + sx * 2); -+ dst = (u8 *)(info->screen_base + sy * info->fix.line_length + sx * octpp); - bgx = ((u32 *)info->pseudo_palette)[bg_color]; -- splashset(dst, height, width, info->fix.line_length, bgx); -+ splashset(dst, -+ height, width, -+ info->fix.line_length, -+ bgx, -+ (info->var.bits_per_pixel + 1) >> 3); - } - --void splash_bmove(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+void splash_bmove(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width) - { -+ struct splash_data *sd; - struct fb_copyarea area; - -- if (console_blanked) -- return; -+ if (!oops_in_progress && (console_blanked || info->splash_data->splash_dosilent)) -+ return; -+ -+ sd = info->splash_data; -+ - area.sx = sx * vc->vc_font.width; - area.sy = sy * vc->vc_font.height; - area.dx = dx * vc->vc_font.width; -@@ -222,34 +322,57 @@ void splash_bmove(struct splash_data *sd - info->fbops->fb_copyarea(info, &area); - } - --void splash_clear_margins(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+void splash_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) - { -+ struct splash_data *sd; - unsigned int tw = vc->vc_cols*vc->vc_font.width; - unsigned int th = vc->vc_rows*vc->vc_font.height; -+ SPLASH_DEBUG(); -+ -+ if (!oops_in_progress && (console_blanked || info->splash_data->splash_dosilent)) -+ return; -+ -+ sd = info->splash_data; - -- if (console_blanked) -- return; - if (!bottom_only) { - /* top margin */ -- splashfill(info, 0, 0, sd->splash_text_yo, info->var.xres); -+ splashfill(info, -+ 0, -+ 0, -+ sd->splash_text_yo, -+ info->var.xres); - /* left margin */ -- splashfill(info, sd->splash_text_yo, 0, th, sd->splash_text_xo); -+ splashfill(info, -+ sd->splash_text_yo, -+ 0, -+ th, -+ sd->splash_text_xo); - /* right margin */ -- splashfill(info, sd->splash_text_yo, sd->splash_text_xo + tw, th, info->var.xres - sd->splash_text_xo - tw); -- -+ splashfill(info, -+ sd->splash_text_yo, -+ sd->splash_text_xo + tw, -+ th, -+ info->var.xres - sd->splash_text_xo - tw); - } -- splashfill(info, sd->splash_text_yo + th, 0, info->var.yres - sd->splash_text_yo - th, info->var.xres); -+ splashfill(info, -+ sd->splash_text_yo + th, -+ 0, -+ info->var.yres - sd->splash_text_yo - th, -+ info->var.xres); - } - --int splash_cursor(struct splash_data *sd, struct fb_info *info, struct fb_cursor *cursor) -+int splash_cursor(struct fb_info *info, struct fb_cursor *cursor) - { -+ struct splash_data *sd; - int i; - unsigned int dsize, s_pitch; - - if (info->state != FBINFO_STATE_RUNNING) - return 0; - -+ sd = info->splash_data; -+ - s_pitch = (cursor->image.width + 7) >> 3; - dsize = s_pitch * cursor->image.height; - if (cursor->enable) { -@@ -267,13 +390,15 @@ int splash_cursor(struct splash_data *sd - } else if (info->fb_cursordata != cursor->image.data) - memcpy(info->fb_cursordata, cursor->image.data, dsize); - cursor->image.data = info->fb_cursordata; -- splash_renderc(sd, info, cursor->image.fg_color, cursor->image.bg_color, (u8 *)info->fb_cursordata, cursor->image.dy + sd->splash_text_yo, cursor->image.dx + sd->splash_text_xo, cursor->image.height, cursor->image.width); -+ splash_renderc(info, cursor->image.fg_color, cursor->image.bg_color, (u8 *)info->fb_cursordata, cursor->image.dy + sd->splash_text_yo, cursor->image.dx + sd->splash_text_xo, cursor->image.height, cursor->image.width); - return 0; - } - --void splash_bmove_redraw(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) -+void splash_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) - { -- unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * y + dx * 2); -+ struct splash_data *sd; -+ int octpp = (info->var.bits_per_pixel + 1) >> 3; -+ unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * y + dx * octpp); - unsigned short *s = d + (dx - sx); - unsigned short *start = d; - unsigned short *ls = d; -@@ -282,21 +407,24 @@ void splash_bmove_redraw(struct splash_d - int x = dx; - unsigned short attr = 1; - -- if (console_blanked) -+ if (console_blanked || info->splash_data->splash_dosilent) - return; -+ -+ sd = info->splash_data; -+ - do { - c = scr_readw(d); - if (attr != (c & 0xff00)) { - attr = c & 0xff00; - if (d > start) { -- splash_putcs(sd, vc, info, start, d - start, y, x); -+ splash_putcs(vc, info, start, d - start, y, x); - x += d - start; - start = d; - } - } - if (s >= ls && s < le && c == scr_readw(s)) { - if (d > start) { -- splash_putcs(sd, vc, info, start, d - start, y, x); -+ splash_putcs(vc, info, start, d - start, y, x); - x += d - start + 1; - start = d + 1; - } else { -@@ -308,21 +436,22 @@ void splash_bmove_redraw(struct splash_d - d++; - } while (d < le); - if (d > start) -- splash_putcs(sd, vc, info, start, d - start, y, x); -+ splash_putcs(vc, info, start, d - start, y, x); - } - --void splash_blank(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int blank) -+void splash_blank(struct vc_data *vc, struct fb_info *info, int blank) - { -+ SPLASH_DEBUG(); - if (blank) { -- if (info->silent_screen_base) -- splashset((u8 *)info->silent_screen_base, info->var.yres, info->var.xres, info->fix.line_length, 0); -- splashset((u8 *)info->screen_base, info->var.yres, info->var.xres, info->fix.line_length, 0); -+ splashset((u8 *)info->screen_base, -+ info->var.yres, info->var.xres, -+ info->fix.line_length, -+ 0, -+ (info->var.bits_per_pixel + 1) >> 3); - } else { -- if (info->silent_screen_base) -- splash_prepare(vc, info); -- splash_clear_margins(vc->vc_splash_data, vc, info, 0); -+ // splash_prepare(vc, info); /* do we really need this? */ -+ splash_clear_margins(vc, info, 0); - /* no longer needed, done in fbcon_blank */ - /* update_screen(vc->vc_num); */ - } - } -- ---- a/drivers/video/console/bitblit.c -+++ b/drivers/video/console/bitblit.c -@@ -53,7 +53,7 @@ static void bit_bmove(struct vc_data *vc - - #ifdef CONFIG_BOOTSPLASH - if (info->splash_data) { -- splash_bmove(info->splash_data, vc, info, -+ splash_bmove(vc, info, - sy, sx, dy, dx, height, width); - return; - } -@@ -76,8 +76,8 @@ static void bit_clear(struct vc_data *vc - - #ifdef CONFIG_BOOTSPLASH - if (info->splash_data) { -- splash_clear(info->splash_data, vc, info, -- sy, sx, height, width); -+ splash_clear(vc, info, -+ sy, sx, height, width); - return; - } - #endif -@@ -180,7 +180,7 @@ static void bit_putcs(struct vc_data *vc - - #ifdef CONFIG_BOOTSPLASH - if (info->splash_data) { -- splash_putcs(info->splash_data, vc, info, s, count, yy, xx); -+ splash_putcs(vc, info, s, count, yy, xx); - return; - } - #endif -@@ -240,7 +240,7 @@ static void bit_clear_margins(struct vc_ - - #ifdef CONFIG_BOOTSPLASH - if (info->splash_data) { -- splash_clear_margins(info->splash_data, vc, info, bottom_only); -+ splash_clear_margins(vc, info, bottom_only); - return; - } - #endif -@@ -413,7 +413,7 @@ static void bit_cursor(struct vc_data *v - - #ifdef CONFIG_BOOTSPLASH - if (info->splash_data) { -- splash_cursor(info->splash_data, info, &cursor); -+ splash_cursor(info, &cursor); - ops->cursor_reset = 0; - return; - } ---- a/drivers/video/console/fbcon.c -+++ b/drivers/video/console/fbcon.c -@@ -2074,7 +2074,7 @@ static void fbcon_bmove_rec(struct vc_da - #ifdef CONFIG_BOOTSPLASH - if (info->splash_data && sy == dy && height == 1) { - /* must use slower redraw bmove to keep background pic intact */ -- splash_bmove_redraw(info->splash_data, vc, info, sy, sx, dx, width); -+ splash_bmove_redraw(vc, info, sy, sx, dx, width); - return; - } - #endif -@@ -2325,7 +2325,7 @@ static void fbcon_generic_blank(struct v - - #ifdef CONFIG_BOOTSPLASH - if (info->splash_data) { -- splash_blank(info->splash_data, vc, info, blank); -+ splash_blank(vc, info, blank); - return; - } - #endif ---- a/drivers/video/vesafb.c -+++ b/drivers/video/vesafb.c -@@ -181,10 +181,7 @@ static void vesafb_destroy(struct fb_inf - framebuffer_release(info); - } - --#ifndef CONFIG_BOOTSPLASH --static --#endif --struct fb_ops vesafb_ops = { -+static struct fb_ops vesafb_ops = { - .owner = THIS_MODULE, - .fb_destroy = vesafb_destroy, - .fb_setcolreg = vesafb_setcolreg, -@@ -269,9 +266,6 @@ static int __init vesafb_probe(struct pl - * option to simply use size_total as that - * wastes plenty of kernel address space. */ - size_remap = size_vmode * 2; --#ifdef CONFIG_BOOTSPLASH -- size_remap *= 2; /* some more for the images */ --#endif - if (vram_remap) - size_remap = vram_remap * 1024 * 1024; - if (size_remap < size_vmode) ---- a/include/linux/fb.h -+++ b/include/linux/fb.h -@@ -879,8 +879,7 @@ struct fb_info { - struct splash_data *splash_data; - unsigned char *splash_pic; - int splash_pic_size; -- int splash_bytes; -- char *silent_screen_base; /* real screen base */ -+ int splash_pic_stride; - char fb_cursordata[64]; - #endif - /* we need the PCI or similiar aperture base/size not diff --git a/patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch b/patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch deleted file mode 100644 index 8e16adf..0000000 --- a/patches.suse/SoN-01-mm-setup_per_zone_wmarks.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 01/31] mm: serialize access to min_free_kbytes -Patch-mainline: not yet - -There is a small race between the procfs caller and the memory hotplug caller -of setup_per_zone_wmarks(). Not a big deal, but the next patch will add yet -another caller. Time to close the gap. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - mm/page_alloc.c | 16 +++++++++++++--- - 1 file changed, 13 insertions(+), 3 deletions(-) - ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -165,6 +165,7 @@ static char * const zone_names[MAX_NR_ZO - "Movable", - }; - -+static DEFINE_SPINLOCK(min_free_lock); - int min_free_kbytes = 1024; - - static unsigned long __meminitdata nr_kernel_pages; -@@ -4845,13 +4846,13 @@ static void setup_per_zone_lowmem_reserv - } - - /** -- * setup_per_zone_wmarks - called when min_free_kbytes changes -+ * __setup_per_zone_wmarks - called when min_free_kbytes changes - * or when memory is hot-{added|removed} - * - * Ensures that the watermark[min,low,high] values for each zone are set - * correctly with respect to min_free_kbytes. - */ --void setup_per_zone_wmarks(void) -+static void __setup_per_zone_wmarks(void) - { - unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); - unsigned long lowmem_pages = 0; -@@ -4949,6 +4950,15 @@ static void __init setup_per_zone_inacti - calculate_zone_inactive_ratio(zone); - } - -+void setup_per_zone_wmarks(void) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&min_free_lock, flags); -+ __setup_per_zone_wmarks(); -+ spin_unlock_irqrestore(&min_free_lock, flags); -+} -+ - /* - * Initialise min_free_kbytes. - * -@@ -4984,7 +4994,7 @@ static int __init init_per_zone_wmark_mi - min_free_kbytes = 128; - if (min_free_kbytes > 65536) - min_free_kbytes = 65536; -- setup_per_zone_wmarks(); -+ __setup_per_zone_wmarks(); - setup_per_zone_lowmem_reserve(); - setup_per_zone_inactive_ratio(); - return 0; diff --git a/patches.suse/SoN-02-doc.patch b/patches.suse/SoN-02-doc.patch deleted file mode 100644 index 69fbacc..0000000 --- a/patches.suse/SoN-02-doc.patch +++ /dev/null @@ -1,286 +0,0 @@ -From: Neil Brown -Subject: [PATCH 02/31] swap over network documentation -Patch-mainline: not yet - -Document describing the problem and proposed solution - -Signed-off-by: Peter Zijlstra -Signed-off-by: Neil Brown -Signed-off-by: Suresh Jayaraman ---- - Documentation/network-swap.txt | 270 +++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 270 insertions(+) - ---- /dev/null -+++ b/Documentation/network-swap.txt -@@ -0,0 +1,270 @@ -+ -+Problem: -+ When Linux needs to allocate memory it may find that there is -+ insufficient free memory so it needs to reclaim space that is in -+ use but not needed at the moment. There are several options: -+ -+ 1/ Shrink a kernel cache such as the inode or dentry cache. This -+ is fairly easy but provides limited returns. -+ 2/ Discard 'clean' pages from the page cache. This is easy, and -+ works well as long as there are clean pages in the page cache. -+ Similarly clean 'anonymous' pages can be discarded - if there -+ are any. -+ 3/ Write out some dirty page-cache pages so that they become clean. -+ The VM limits the number of dirty page-cache pages to e.g. 40% -+ of available memory so that (among other reasons) a "sync" will -+ not take excessively long. So there should never be excessive -+ amounts of dirty pagecache. -+ Writing out dirty page-cache pages involves work by the -+ filesystem which may need to allocate memory itself. To avoid -+ deadlock, filesystems use GFP_NOFS when allocating memory on the -+ write-out path. When this is used, cleaning dirty page-cache -+ pages is not an option so if the filesystem finds that memory -+ is tight, another option must be found. -+ 4/ Write out dirty anonymous pages to the "Swap" partition/file. -+ This is the most interesting for a couple of reasons. -+ a/ Unlike dirty page-cache pages, there is no need to write anon -+ pages out unless we are actually short of memory. Thus they -+ tend to be left to last. -+ b/ Anon pages tend to be updated randomly and unpredictably, and -+ flushing them out of memory can have a very significant -+ performance impact on the process using them. This contrasts -+ with page-cache pages which are often written sequentially -+ and often treated as "write-once, read-many". -+ So anon pages tend to be left until last to be cleaned, and may -+ be the only cleanable pages while there are still some dirty -+ page-cache pages (which are waiting on a GFP_NOFS allocation). -+ -+[I don't find the above wholly satisfying. There seems to be too much -+ hand-waving. If someone can provide better text explaining why -+ swapout is a special case, that would be great.] -+ -+So we need to be able to write to the swap file/partition without -+needing to allocate any memory ... or only a small well controlled -+amount. -+ -+The VM reserves a small amount of memory that can only be allocated -+for use as part of the swap-out procedure. It is only available to -+processes with the PF_MEMALLOC flag set, which is typically just the -+memory cleaner. -+ -+Traditionally swap-out is performed directly to block devices (swap -+files on block-device filesystems are supported by examining the -+mapping from file offset to device offset in advance, and then using -+the device offsets to write directly to the device). Block devices -+are (required to be) written to pre-allocate any memory that might be -+needed during write-out, and to block when the pre-allocated memory is -+exhausted and no other memory is available. They can be sure not to -+block forever as the pre-allocated memory will be returned as soon as -+the data it is being used for has been written out. The primary -+mechanism for pre-allocating memory is called "mempools". -+ -+This approach does not work for writing anonymous pages -+(i.e. swapping) over a network, using e.g NFS or NBD or iSCSI. -+ -+ -+The main reason that it does not work is that when data from an anon -+page is written to the network, we must wait for a reply to confirm -+the data is safe. Receiving that reply will consume memory and, -+significantly, we need to allocate memory to an incoming packet before -+we can tell if it is the reply we are waiting for or not. -+ -+The secondary reason is that the network code is not written to use -+mempools and in most cases does not need to use them. Changing all -+allocations in the networking layer to use mempools would be quite -+intrusive, and would waste memory, and probably cause a slow-down in -+the common case of not swapping over the network. -+ -+These problems are addressed by enhancing the system of memory -+reserves used by PF_MEMALLOC and requiring any in-kernel networking -+client that is used for swap-out to indicate which sockets are used -+for swapout so they can be handled specially in low memory situations. -+ -+There are several major parts to this enhancement: -+ -+1/ page->reserve, GFP_MEMALLOC -+ -+ To handle low memory conditions we need to know when those -+ conditions exist. Having a global "low on memory" flag seems easy, -+ but its implementation is problematic. Instead we make it possible -+ to tell if a recent memory allocation required use of the emergency -+ memory pool. -+ For pages returned by alloc_page, the new page->reserve flag -+ can be tested. If this is set, then a low memory condition was -+ current when the page was allocated, so the memory should be used -+ carefully. (Because low memory conditions are transient, this -+ state is kept in an overloaded member instead of in page flags, which -+ would suggest a more permanent state.) -+ -+ For memory allocated using slab/slub: If a page that is added to a -+ kmem_cache is found to have page->reserve set, then a s->reserve -+ flag is set for the whole kmem_cache. Further allocations will only -+ be returned from that page (or any other page in the cache) if they -+ are emergency allocation (i.e. PF_MEMALLOC or GFP_MEMALLOC is set). -+ Non-emergency allocations will block in alloc_page until a -+ non-reserve page is available. Once a non-reserve page has been -+ added to the cache, the s->reserve flag on the cache is removed. -+ -+ Because slab objects have no individual state its hard to pass -+ reserve state along, the current code relies on a regular alloc -+ failing. There are various allocation wrappers help here. -+ -+ This allows us to -+ a/ request use of the emergency pool when allocating memory -+ (GFP_MEMALLOC), and -+ b/ to find out if the emergency pool was used. -+ -+2/ SK_MEMALLOC, sk_buff->emergency. -+ -+ When memory from the reserve is used to store incoming network -+ packets, the memory must be freed (and the packet dropped) as soon -+ as we find out that the packet is not for a socket that is used for -+ swap-out. -+ To achieve this we have an ->emergency flag for skbs, and an -+ SK_MEMALLOC flag for sockets. -+ When memory is allocated for an skb, it is allocated with -+ GFP_MEMALLOC (if we are currently swapping over the network at -+ all). If a subsequent test shows that the emergency pool was used, -+ ->emergency is set. -+ When the skb is finally attached to its destination socket, the -+ SK_MEMALLOC flag on the socket is tested. If the skb has -+ ->emergency set, but the socket does not have SK_MEMALLOC set, then -+ the skb is immediately freed and the packet is dropped. -+ This ensures that reserve memory is never queued on a socket that is -+ not used for swapout. -+ -+ Similarly, if an skb is ever queued for delivery to user-space for -+ example by netfilter, the ->emergency flag is tested and the skb is -+ released if ->emergency is set. (so obviously the storage route may -+ not pass through a userspace helper, otherwise the packets will never -+ arrive and we'll deadlock) -+ -+ This ensures that memory from the emergency reserve can be used to -+ allow swapout to proceed, but will not get caught up in any other -+ network queue. -+ -+ -+3/ pages_emergency -+ -+ The above would be sufficient if the total memory below the lowest -+ memory watermark (i.e the size of the emergency reserve) were known -+ to be enough to hold all transient allocations needed for writeout. -+ I'm a little blurry on how big the current emergency pool is, but it -+ isn't big and certainly hasn't been sized to allow network traffic -+ to consume any. -+ -+ We could simply make the size of the reserve bigger. However in the -+ common case that we are not swapping over the network, that would be -+ a waste of memory. -+ -+ So a new "watermark" is defined: pages_emergency. This is -+ effectively added to the current low water marks, so that pages from -+ this emergency pool can only be allocated if one of PF_MEMALLOC or -+ GFP_MEMALLOC are set. -+ -+ pages_emergency can be changed dynamically based on need. When -+ swapout over the network is required, pages_emergency is increased -+ to cover the maximum expected load. When network swapout is -+ disabled, pages_emergency is decreased. -+ -+ To determine how much to increase it by, we introduce reservation -+ groups.... -+ -+3a/ reservation groups -+ -+ The memory used transiently for swapout can be in a number of -+ different places. e.g. the network route cache, the network -+ fragment cache, in transit between network card and socket, or (in -+ the case of NFS) in sunrpc data structures awaiting a reply. -+ We need to ensure each of these is limited in the amount of memory -+ they use, and that the maximum is included in the reserve. -+ -+ The memory required by the network layer only needs to be reserved -+ once, even if there are multiple swapout paths using the network -+ (e.g. NFS and NDB and iSCSI, though using all three for swapout at -+ the same time would be unusual). -+ -+ So we create a tree of reservation groups. The network might -+ register a collection of reservations, but not mark them as being in -+ use. NFS and sunrpc might similarly register a collection of -+ reservations, and attach it to the network reservations as it -+ depends on them. -+ When swapout over NFS is requested, the NFS/sunrpc reservations are -+ activated which implicitly activates the network reservations. -+ -+ The total new reservation is added to pages_emergency. -+ -+ Provided each memory usage stays beneath the registered limit (at -+ least when allocating memory from reserves), the system will never -+ run out of emergency memory, and swapout will not deadlock. -+ -+ It is worth noting here that it is not critical that each usage -+ stays beneath the limit 100% of the time. Occasional excess is -+ acceptable provided that the memory will be freed again within a -+ short amount of time that does *not* require waiting for any event -+ that itself might require memory. -+ This is because, at all stages of transmit and receive, it is -+ acceptable to discard all transient memory associated with a -+ particular writeout and try again later. On transmit, the page can -+ be re-queued for later transmission. On receive, the packet can be -+ dropped assuming that the peer will resend after a timeout. -+ -+ Thus allocations that are truly transient and will be freed without -+ blocking do not strictly need to be reserved for. Doing so might -+ still be a good idea to ensure forward progress doesn't take too -+ long. -+ -+4/ low-mem accounting -+ -+ Most places that might hold on to emergency memory (e.g. route -+ cache, fragment cache etc) already place a limit on the amount of -+ memory that they can use. This limit can simply be reserved using -+ the above mechanism and no more needs to be done. -+ -+ However some memory usage might not be accounted with sufficient -+ firmness to allow an appropriate emergency reservation. The -+ in-flight skbs for incoming packets is one such example. -+ -+ To support this, a low-overhead mechanism for accounting memory -+ usage against the reserves is provided. This mechanism uses the -+ same data structure that is used to store the emergency memory -+ reservations through the addition of a 'usage' field. -+ -+ Before we attempt allocation from the memory reserves, we much check -+ if the resulting 'usage' is below the reservation. If so, we increase -+ the usage and attempt the allocation (which should succeed). If -+ the projected 'usage' exceeds the reservation we'll either fail the -+ allocation, or wait for 'usage' to decrease enough so that it would -+ succeed, depending on __GFP_WAIT. -+ -+ When memory that was allocated for that purpose is freed, the -+ 'usage' field is checked again. If it is non-zero, then the size of -+ the freed memory is subtracted from the usage, making sure the usage -+ never becomes less than zero. -+ -+ This provides adequate accounting with minimal overheads when not in -+ a low memory condition. When a low memory condition is encountered -+ it does add the cost of a spin lock necessary to serialise updates -+ to 'usage'. -+ -+ -+ -+5/ swapon/swapoff/swap_out/swap_in -+ -+ So that a filesystem (e.g. NFS) can know when to set SK_MEMALLOC on -+ any network socket that it uses, and can know when to account -+ reserve memory carefully, new address_space_operations are -+ available. -+ "swapon" requests that an address space (i.e a file) be make ready -+ for swapout. swap_out and swap_in request the actual IO. They -+ together must ensure that each swap_out request can succeed without -+ allocating more emergency memory that was reserved by swapon. swapoff -+ is used to reverse the state changes caused by swapon when we disable -+ the swap file. -+ -+ -+Thanks for reading this far. I hope it made sense :-) -+ -+Neil Brown (with updates from Peter Zijlstra) -+ -+ diff --git a/patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch b/patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch deleted file mode 100644 index c7982ec..0000000 --- a/patches.suse/SoN-03-mm-gfp-to-alloc_flags-expose.patch +++ /dev/null @@ -1,70 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 03/31] mm: expose gfp_to_alloc_flags() -Patch-mainline: not yet - -Expose the gfp to alloc_flags mapping, so we can use it in other parts -of the vm. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - mm/internal.h | 15 +++++++++++++++ - mm/page_alloc.c | 16 +--------------- - 2 files changed, 16 insertions(+), 15 deletions(-) - ---- a/mm/internal.h -+++ b/mm/internal.h -@@ -185,6 +185,21 @@ static inline struct page *mem_map_next( - #define __paginginit __init - #endif - -+/* The ALLOC_WMARK bits are used as an index to zone->watermark */ -+#define ALLOC_WMARK_MIN WMARK_MIN -+#define ALLOC_WMARK_LOW WMARK_LOW -+#define ALLOC_WMARK_HIGH WMARK_HIGH -+#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ -+ -+/* Mask to get the watermark bits */ -+#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) -+ -+#define ALLOC_HARDER 0x10 /* try to alloc harder */ -+#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ -+#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ -+ -+int gfp_to_alloc_flags(gfp_t gfp_mask); -+ - /* Memory initialisation debug and verification */ - enum mminit_level { - MMINIT_WARNING, ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -1345,19 +1345,6 @@ failed: - return NULL; - } - --/* The ALLOC_WMARK bits are used as an index to zone->watermark */ --#define ALLOC_WMARK_MIN WMARK_MIN --#define ALLOC_WMARK_LOW WMARK_LOW --#define ALLOC_WMARK_HIGH WMARK_HIGH --#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ -- --/* Mask to get the watermark bits */ --#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) -- --#define ALLOC_HARDER 0x10 /* try to alloc harder */ --#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ --#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ -- - #ifdef CONFIG_FAIL_PAGE_ALLOC - - static struct fail_page_alloc_attr { -@@ -1911,8 +1898,7 @@ void wake_all_kswapd(unsigned int order, - wakeup_kswapd(zone, order, classzone_idx); - } - --static inline int --gfp_to_alloc_flags(gfp_t gfp_mask) -+int gfp_to_alloc_flags(gfp_t gfp_mask) - { - int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; - const gfp_t wait = gfp_mask & __GFP_WAIT; diff --git a/patches.suse/SoN-04-page_alloc-reserve.patch b/patches.suse/SoN-04-page_alloc-reserve.patch deleted file mode 100644 index c780d6d..0000000 --- a/patches.suse/SoN-04-page_alloc-reserve.patch +++ /dev/null @@ -1,43 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 04/31] mm: tag reseve pages -Patch-mainline: not yet - -Tag pages allocated from the reserves with a non-zero page->reserve. -This allows us to distinguish and account reserve pages. - -Since low-memory situations are transient, and unrelated the the actual -page (any page can be on the freelist when we run low), don't mark the -page in any permanent way - just pass along the information to the -allocatee. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/mm_types.h | 1 + - mm/page_alloc.c | 4 +++- - 2 files changed, 4 insertions(+), 1 deletion(-) - ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -71,6 +71,7 @@ struct page { - union { - pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* SLUB: freelist req. slab lock */ -+ int reserve; /* page_alloc: page is a reserve page */ - }; - struct list_head lru; /* Pageout list, eg. active_list - * protected by zone->lru_lock ! ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -1656,8 +1656,10 @@ zonelist_scan: - try_this_zone: - page = buffered_rmqueue(preferred_zone, zone, order, - gfp_mask, migratetype); -- if (page) -+ if (page) { -+ page->reserve = !!(alloc_flags & ALLOC_NO_WATERMARKS); - break; -+ } - this_zone_full: - if (NUMA_BUILD) - zlc_mark_zone_full(zonelist, z); diff --git a/patches.suse/SoN-05-reserve-slub.patch b/patches.suse/SoN-05-reserve-slub.patch deleted file mode 100644 index fd47f19..0000000 --- a/patches.suse/SoN-05-reserve-slub.patch +++ /dev/null @@ -1,425 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 05/31] mm: sl[au]b: add knowledge of reserve pages -Patch-mainline: Not yet - -Restrict objects from reserve slabs (ALLOC_NO_WATERMARKS) to allocation -contexts that are entitled to it. This is done to ensure reserve pages don't -leak out and get consumed. - -The basic pattern used for all # allocators is the following, for each active -slab page we store if it came from an emergency allocation. When we find it -did, make sure the current allocation context would have been able to allocate -page from the emergency reserves as well. In that case allow the allocation. If -not, force a new slab allocation. When that works the memory pressure has -lifted enough to allow this context to get an object, otherwise fail the -allocation. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/slub_def.h | 1 - mm/slab.c | 61 ++++++++++++++++++++++++++++++++++++++++------- - mm/slob.c | 16 +++++++++++- - mm/slub.c | 41 ++++++++++++++++++++++++++----- - 4 files changed, 103 insertions(+), 16 deletions(-) - -Index: linux-2.6.35-master/include/linux/slub_def.h -=================================================================== ---- linux-2.6.35-master.orig/include/linux/slub_def.h -+++ linux-2.6.35-master/include/linux/slub_def.h -@@ -39,6 +39,7 @@ struct kmem_cache_cpu { - void **freelist; /* Pointer to first free per cpu object */ - struct page *page; /* The slab from which we are allocating */ - int node; /* The node of the page (or -1 for debug) */ -+ int reserve; /* Did the current page come from the reserve */ - #ifdef CONFIG_SLUB_STATS - unsigned stat[NR_SLUB_STAT_ITEMS]; - #endif -Index: linux-2.6.35-master/mm/slab.c -=================================================================== ---- linux-2.6.35-master.orig/mm/slab.c -+++ linux-2.6.35-master/mm/slab.c -@@ -120,6 +120,8 @@ - #include - #include - -+#include "internal.h" -+ - /* - * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. - * 0 for faster, smaller code (especially in the critical paths). -@@ -244,7 +246,8 @@ struct array_cache { - unsigned int avail; - unsigned int limit; - unsigned int batchcount; -- unsigned int touched; -+ unsigned int touched:1, -+ reserve:1; - spinlock_t lock; - void *entry[]; /* - * Must have this definition in here for the proper -@@ -680,6 +683,27 @@ static inline struct array_cache *cpu_ca - return cachep->array[smp_processor_id()]; - } - -+/* -+ * If the last page came from the reserves, and the current allocation context -+ * does not have access to them, force an allocation to test the watermarks. -+ */ -+static inline int slab_force_alloc(struct kmem_cache *cachep, gfp_t flags) -+{ -+ if (unlikely(cpu_cache_get(cachep)->reserve) && -+ !(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS)) -+ return 1; -+ -+ return 0; -+} -+ -+static inline void slab_set_reserve(struct kmem_cache *cachep, int reserve) -+{ -+ struct array_cache *ac = cpu_cache_get(cachep); -+ -+ if (unlikely(ac->reserve != reserve)) -+ ac->reserve = reserve; -+} -+ - static inline struct kmem_cache *__find_general_cachep(size_t size, - gfp_t gfpflags) - { -@@ -886,6 +910,7 @@ static struct array_cache *alloc_arrayca - nc->limit = entries; - nc->batchcount = batchcount; - nc->touched = 0; -+ nc->reserve = 0; - spin_lock_init(&nc->lock); - } - return nc; -@@ -1674,7 +1699,8 @@ __initcall(cpucache_init); - * did not request dmaable memory, we might get it, but that - * would be relatively rare and ignorable. - */ --static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) -+static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid, -+ int *reserve) - { - struct page *page; - int nr_pages; -@@ -1696,6 +1722,7 @@ static void *kmem_getpages(struct kmem_c - if (!page) - return NULL; - -+ *reserve = page->reserve; - nr_pages = (1 << cachep->gfporder); - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - add_zone_page_state(page_zone(page), -@@ -2128,6 +2155,7 @@ static int __init_refok setup_cpu_cache( - cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; - cpu_cache_get(cachep)->batchcount = 1; - cpu_cache_get(cachep)->touched = 0; -+ cpu_cache_get(cachep)->reserve = 0; - cachep->batchcount = 1; - cachep->limit = BOOT_CPUCACHE_ENTRIES; - return 0; -@@ -2813,6 +2841,7 @@ static int cache_grow(struct kmem_cache - size_t offset; - gfp_t local_flags; - struct kmem_list3 *l3; -+ int reserve; - - /* - * Be lazy and only check for valid flags here, keeping it out of the -@@ -2851,7 +2880,7 @@ static int cache_grow(struct kmem_cache - * 'nodeid'. - */ - if (!objp) -- objp = kmem_getpages(cachep, local_flags, nodeid); -+ objp = kmem_getpages(cachep, local_flags, nodeid, &reserve); - if (!objp) - goto failed; - -@@ -2868,6 +2897,7 @@ static int cache_grow(struct kmem_cache - if (local_flags & __GFP_WAIT) - local_irq_disable(); - check_irq_off(); -+ slab_set_reserve(cachep, reserve); - spin_lock(&l3->list_lock); - - /* Make slab active. */ -@@ -3002,7 +3032,8 @@ bad: - #define check_slabp(x,y) do { } while(0) - #endif - --static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) -+static void *cache_alloc_refill(struct kmem_cache *cachep, -+ gfp_t flags, int must_refill) - { - int batchcount; - struct kmem_list3 *l3; -@@ -3012,6 +3043,8 @@ static void *cache_alloc_refill(struct k - retry: - check_irq_off(); - node = numa_mem_id(); -+ if (unlikely(must_refill)) -+ goto force_grow; - ac = cpu_cache_get(cachep); - batchcount = ac->batchcount; - if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { -@@ -3081,11 +3114,14 @@ alloc_done: - - if (unlikely(!ac->avail)) { - int x; -+force_grow: - x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); - - /* cache_grow can reenable interrupts, then ac could change. */ - ac = cpu_cache_get(cachep); -- if (!x && ac->avail == 0) /* no objects in sight? abort */ -+ -+ /* no objects in sight? abort */ -+ if (!x && (ac->avail == 0 || must_refill)) - return NULL; - - if (!ac->avail) /* objects refilled by interrupt? */ -@@ -3175,17 +3211,18 @@ static inline void *____cache_alloc(stru - { - void *objp; - struct array_cache *ac; -+ int must_refill = slab_force_alloc(cachep, flags); - - check_irq_off(); - - ac = cpu_cache_get(cachep); -- if (likely(ac->avail)) { -+ if (likely(ac->avail && !must_refill)) { - STATS_INC_ALLOCHIT(cachep); - ac->touched = 1; - objp = ac->entry[--ac->avail]; - } else { - STATS_INC_ALLOCMISS(cachep); -- objp = cache_alloc_refill(cachep, flags); -+ objp = cache_alloc_refill(cachep, flags, must_refill); - /* - * the 'ac' may be updated by cache_alloc_refill(), - * and kmemleak_erase() requires its correct value. -@@ -3243,7 +3280,7 @@ static void *fallback_alloc(struct kmem_ - struct zone *zone; - enum zone_type high_zoneidx = gfp_zone(flags); - void *obj = NULL; -- int nid; -+ int nid, reserve; - - if (flags & __GFP_THISNODE) - return NULL; -@@ -3280,10 +3317,12 @@ retry: - if (local_flags & __GFP_WAIT) - local_irq_enable(); - kmem_flagcheck(cache, flags); -- obj = kmem_getpages(cache, local_flags, numa_mem_id()); -+ obj = kmem_getpages(cache, local_flags, numa_mem_id(), -+ &reserve); - if (local_flags & __GFP_WAIT) - local_irq_disable(); - if (obj) { -+ slab_set_reserve(cache, reserve); - /* - * Insert into the appropriate per node queues - */ -@@ -3323,6 +3362,9 @@ static void *____cache_alloc_node(struct - l3 = cachep->nodelists[nodeid]; - BUG_ON(!l3); - -+ if (unlikely(slab_force_alloc(cachep, flags))) -+ goto force_grow; -+ - retry: - check_irq_off(); - spin_lock(&l3->list_lock); -@@ -3360,6 +3402,7 @@ retry: - - must_grow: - spin_unlock(&l3->list_lock); -+force_grow: - x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); - if (x) - goto retry; -Index: linux-2.6.35-master/mm/slob.c -=================================================================== ---- linux-2.6.35-master.orig/mm/slob.c -+++ linux-2.6.35-master/mm/slob.c -@@ -71,6 +71,7 @@ - #include - - #include -+#include "internal.h" - - /* - * slob_block has a field 'units', which indicates size of block if +ve, -@@ -193,6 +194,11 @@ struct slob_rcu { - static DEFINE_SPINLOCK(slob_lock); - - /* -+ * tracks the reserve state for the allocator. -+ */ -+static int slob_reserve; -+ -+/* - * Encode the given size and next info into a free slob block s. - */ - static void set_slob(slob_t *s, slobidx_t size, slob_t *next) -@@ -242,7 +248,7 @@ static int slob_last(slob_t *s) - - static void *slob_new_pages(gfp_t gfp, int order, int node) - { -- void *page; -+ struct page *page; - - #ifdef CONFIG_NUMA - if (node != -1) -@@ -254,6 +260,8 @@ static void *slob_new_pages(gfp_t gfp, i - if (!page) - return NULL; - -+ slob_reserve = page->reserve; -+ - return page_address(page); - } - -@@ -326,6 +334,11 @@ static void *slob_alloc(size_t size, gfp - slob_t *b = NULL; - unsigned long flags; - -+ if (unlikely(slob_reserve)) { -+ if (!(gfp_to_alloc_flags(gfp) & ALLOC_NO_WATERMARKS)) -+ goto grow; -+ } -+ - if (size < SLOB_BREAK1) - slob_list = &free_slob_small; - else if (size < SLOB_BREAK2) -@@ -364,6 +377,7 @@ static void *slob_alloc(size_t size, gfp - } - spin_unlock_irqrestore(&slob_lock, flags); - -+grow: - /* Not enough space: must allocate a new page */ - if (!b) { - b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); -Index: linux-2.6.35-master/mm/slub.c -=================================================================== ---- linux-2.6.35-master.orig/mm/slub.c -+++ linux-2.6.35-master/mm/slub.c -@@ -27,6 +27,8 @@ - #include - #include - #include -+#include "internal.h" -+ - - #include - -@@ -1145,7 +1147,8 @@ static void setup_object(struct kmem_cac - s->ctor(object); - } - --static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) -+static -+struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node, int *reserve) - { - struct page *page; - void *start; -@@ -1159,6 +1162,8 @@ static struct page *new_slab(struct kmem - if (!page) - goto out; - -+ *reserve = page->reserve; -+ - inc_slabs_node(s, page_to_nid(page), page->objects); - page->slab = s; - page->flags |= 1 << PG_slab; -@@ -1607,10 +1612,20 @@ static void *__slab_alloc(struct kmem_ca - { - void **object; - struct page *new; -+ int reserve; - - /* We handle __GFP_ZERO in the caller */ - gfpflags &= ~__GFP_ZERO; - -+ if (unlikely(c->reserve)) { -+ /* -+ * If the current slab is a reserve slab and the current -+ * allocation context does not allow access to the reserves we -+ * must force an allocation to test the current levels. -+ */ -+ if (!(gfp_to_alloc_flags(gfpflags) & ALLOC_NO_WATERMARKS)) -+ goto grow_slab; -+ } - if (!c->page) - goto new_slab; - -@@ -1624,7 +1639,7 @@ load_freelist: - object = c->page->freelist; - if (unlikely(!object)) - goto another_slab; -- if (kmem_cache_debug(s)) -+ if (unlikely(kmem_cache_debug(s) || c->reserve)) - goto debug; - - c->freelist = get_freepointer(s, object); -@@ -1647,17 +1662,19 @@ new_slab: - goto load_freelist; - } - -+grow_slab: - gfpflags &= gfp_allowed_mask; - if (gfpflags & __GFP_WAIT) - local_irq_enable(); - -- new = new_slab(s, gfpflags, node); -+ new = new_slab(s, gfpflags, node, &reserve); - - if (gfpflags & __GFP_WAIT) - local_irq_disable(); - - if (new) { - c = __this_cpu_ptr(s->cpu_slab); -+ c->reserve = reserve; - stat(s, ALLOC_SLAB); - if (c->page) - flush_slab(s, c); -@@ -1668,10 +1685,21 @@ new_slab: - if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) - slab_out_of_memory(s, gfpflags, node); - return NULL; -+ - debug: -- if (!alloc_debug_processing(s, c->page, object, addr)) -+ if (kmem_cache_debug(s) && -+ !alloc_debug_processing(s, c->page, object, addr)) - goto another_slab; - -+ /* -+ * Avoid the slub fast path in slab_alloc() by not setting -+ * c->freelist and the fast path in slab_free() by making -+ * node_match() fail by setting c->node to -1. -+ * -+ * We use this for for debug and reserve checks which need -+ * to be done for each allocation. -+ */ -+ - c->page->inuse++; - c->page->freelist = get_freepointer(s, object); - c->node = NUMA_NO_NODE; -@@ -2096,10 +2124,11 @@ static void early_kmem_cache_node_alloc( - struct page *page; - struct kmem_cache_node *n; - unsigned long flags; -+ int reserve; - - BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); - -- page = new_slab(kmem_cache_node, GFP_NOWAIT, node); -+ page = new_slab(kmem_cache_node, GFP_NOWAIT, node, &reserve); - - BUG_ON(!page); - if (page_to_nid(page) != node) { diff --git a/patches.suse/SoN-06-mm-kmem_estimate_pages.patch b/patches.suse/SoN-06-mm-kmem_estimate_pages.patch deleted file mode 100644 index 7cd202c..0000000 --- a/patches.suse/SoN-06-mm-kmem_estimate_pages.patch +++ /dev/null @@ -1,314 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 06/31] mm: kmem_alloc_estimate() -Patch-mainline: not yet - -Feb 8 2011: Refreshed patch to accomodate an upstream change - commit 55136592 -removed dynamic dma slab allocation. - -Provide a method to get the upper bound on the pages needed to allocate -a given number of objects from a given kmem_cache. - -This lays the foundation for a generic reserve framework as presented in -a later patch in this series. This framework needs to convert object demand -(kmalloc() bytes, kmem_cache_alloc() objects) to pages. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/slab.h | 4 ++ - mm/slab.c | 75 +++++++++++++++++++++++++++++++++++++++++++ - mm/slob.c | 67 +++++++++++++++++++++++++++++++++++++++ - mm/slub.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++ - 4 files changed, 233 insertions(+) - -Index: linux-2.6.37-master/include/linux/slab.h -=================================================================== ---- linux-2.6.37-master.orig/include/linux/slab.h -+++ linux-2.6.37-master/include/linux/slab.h -@@ -106,6 +106,8 @@ int kmem_cache_shrink(struct kmem_cache - void kmem_cache_free(struct kmem_cache *, void *); - unsigned int kmem_cache_size(struct kmem_cache *); - const char *kmem_cache_name(struct kmem_cache *); -+unsigned kmem_alloc_estimate(struct kmem_cache *cachep, -+ gfp_t flags, int objects); - - /* - * Please use this macro to create slab caches. Simply specify the -@@ -142,6 +144,8 @@ void * __must_check krealloc(const void - void kfree(const void *); - void kzfree(const void *); - size_t ksize(const void *); -+unsigned kmalloc_estimate_objs(size_t, gfp_t, int); -+unsigned kmalloc_estimate_bytes(gfp_t, size_t); - - /* - * Allocator specific definitions. These are mainly used to establish optimized -Index: linux-2.6.37-master/mm/slab.c -=================================================================== ---- linux-2.6.37-master.orig/mm/slab.c -+++ linux-2.6.37-master/mm/slab.c -@@ -3890,6 +3890,81 @@ const char *kmem_cache_name(struct kmem_ - EXPORT_SYMBOL_GPL(kmem_cache_name); - - /* -+ * Calculate the upper bound of pages required to sequentially allocate -+ * @objects objects from @cachep. -+ */ -+unsigned kmem_alloc_estimate(struct kmem_cache *cachep, -+ gfp_t flags, int objects) -+{ -+ /* -+ * (1) memory for objects, -+ */ -+ unsigned nr_slabs = DIV_ROUND_UP(objects, cachep->num); -+ unsigned nr_pages = nr_slabs << cachep->gfporder; -+ -+ /* -+ * (2) memory for each per-cpu queue (nr_cpu_ids), -+ * (3) memory for each per-node alien queues (nr_cpu_ids), and -+ * (4) some amount of memory for the slab management structures -+ * -+ * XXX: truely account these -+ */ -+ nr_pages += 1 + ilog2(nr_pages); -+ -+ return nr_pages; -+} -+ -+/* -+ * Calculate the upper bound of pages required to sequentially allocate -+ * @count objects of @size bytes from kmalloc given @flags. -+ */ -+unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count) -+{ -+ struct kmem_cache *s = kmem_find_general_cachep(size, flags); -+ if (!s) -+ return 0; -+ -+ return kmem_alloc_estimate(s, flags, count); -+} -+EXPORT_SYMBOL_GPL(kmalloc_estimate_objs); -+ -+/* -+ * Calculate the upper bound of pages requires to sequentially allocate @bytes -+ * from kmalloc in an unspecified number of allocations of nonuniform size. -+ */ -+unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes) -+{ -+ unsigned long pages; -+ struct cache_sizes *csizep = malloc_sizes; -+ -+ /* -+ * multiply by two, in order to account the worst case slack space -+ * due to the power-of-two allocation sizes. -+ */ -+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE); -+ -+ /* -+ * add the kmem_cache overhead of each possible kmalloc cache -+ */ -+ for (csizep = malloc_sizes; csizep->cs_cachep; csizep++) { -+ struct kmem_cache *s; -+ -+#ifdef CONFIG_ZONE_DMA -+ if (unlikely(flags & __GFP_DMA)) -+ s = csizep->cs_dmacachep; -+ else -+#endif -+ s = csizep->cs_cachep; -+ -+ if (s) -+ pages += kmem_alloc_estimate(s, flags, 0); -+ } -+ -+ return pages; -+} -+EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes); -+ -+/* - * This initializes kmem_list3 or resizes various caches for all nodes. - */ - static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) -Index: linux-2.6.37-master/mm/slob.c -=================================================================== ---- linux-2.6.37-master.orig/mm/slob.c -+++ linux-2.6.37-master/mm/slob.c -@@ -699,6 +699,73 @@ int slab_is_available(void) - return slob_ready; - } - -+static __slob_estimate(unsigned size, unsigned align, unsigned objects) -+{ -+ unsigned nr_pages; -+ -+ size = SLOB_UNIT * SLOB_UNITS(size + align - 1); -+ -+ if (size <= PAGE_SIZE) { -+ nr_pages = DIV_ROUND_UP(objects, PAGE_SIZE / size); -+ } else { -+ nr_pages = objects << get_order(size); -+ } -+ -+ return nr_pages; -+} -+ -+/* -+ * Calculate the upper bound of pages required to sequentially allocate -+ * @objects objects from @cachep. -+ */ -+unsigned kmem_alloc_estimate(struct kmem_cache *c, gfp_t flags, int objects) -+{ -+ unsigned size = c->size; -+ -+ if (c->flags & SLAB_DESTROY_BY_RCU) -+ size += sizeof(struct slob_rcu); -+ -+ return __slob_estimate(size, c->align, objects); -+} -+ -+/* -+ * Calculate the upper bound of pages required to sequentially allocate -+ * @count objects of @size bytes from kmalloc given @flags. -+ */ -+unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count) -+{ -+ unsigned align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); -+ -+ return __slob_estimate(size, align, count); -+} -+EXPORT_SYMBOL_GPL(kmalloc_estimate_objs); -+ -+/* -+ * Calculate the upper bound of pages requires to sequentially allocate @bytes -+ * from kmalloc in an unspecified number of allocations of nonuniform size. -+ */ -+unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes) -+{ -+ unsigned long pages; -+ -+ /* -+ * Multiply by two, in order to account the worst case slack space -+ * due to the power-of-two allocation sizes. -+ * -+ * While not true for slob, it cannot do worse than that for sequential -+ * allocations. -+ */ -+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE); -+ -+ /* -+ * Our power of two series starts at PAGE_SIZE, so add one page. -+ */ -+ pages++; -+ -+ return pages; -+} -+EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes); -+ - void __init kmem_cache_init(void) - { - slob_ready = 1; -Index: linux-2.6.37-master/mm/slub.c -=================================================================== ---- linux-2.6.37-master.orig/mm/slub.c -+++ linux-2.6.37-master/mm/slub.c -@@ -2434,6 +2434,42 @@ const char *kmem_cache_name(struct kmem_ - } - EXPORT_SYMBOL(kmem_cache_name); - -+/* -+ * Calculate the upper bound of pages required to sequentially allocate -+ * @objects objects from @cachep. -+ * -+ * We should use s->min_objects because those are the least efficient. -+ */ -+unsigned kmem_alloc_estimate(struct kmem_cache *s, gfp_t flags, int objects) -+{ -+ unsigned long pages; -+ struct kmem_cache_order_objects x; -+ -+ if (WARN_ON(!s) || WARN_ON(!oo_objects(s->min))) -+ return 0; -+ -+ x = s->min; -+ pages = DIV_ROUND_UP(objects, oo_objects(x)) << oo_order(x); -+ -+ /* -+ * Account the possible additional overhead if the slab holds more that -+ * one object. Use s->max_objects because that's the worst case. -+ */ -+ x = s->oo; -+ if (oo_objects(x) > 1) { -+ /* -+ * Account the possible additional overhead if per cpu slabs -+ * are currently empty and have to be allocated. This is very -+ * unlikely but a possible scenario immediately after -+ * kmem_cache_shrink. -+ */ -+ pages += num_possible_cpus() << oo_order(x); -+ } -+ -+ return pages; -+} -+EXPORT_SYMBOL_GPL(kmem_alloc_estimate); -+ - static void list_slab_objects(struct kmem_cache *s, struct page *page, - const char *text) - { -@@ -2783,6 +2819,57 @@ void kfree(const void *x) - EXPORT_SYMBOL(kfree); - - /* -+ * Calculate the upper bound of pages required to sequentially allocate -+ * @count objects of @size bytes from kmalloc given @flags. -+ */ -+unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count) -+{ -+ struct kmem_cache *s = get_slab(size, flags); -+ if (!s) -+ return 0; -+ -+ return kmem_alloc_estimate(s, flags, count); -+ -+} -+EXPORT_SYMBOL_GPL(kmalloc_estimate_objs); -+ -+/* -+ * Calculate the upper bound of pages requires to sequentially allocate @bytes -+ * from kmalloc in an unspecified number of allocations of nonuniform size. -+ */ -+unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes) -+{ -+ int i; -+ unsigned long pages; -+ -+ /* -+ * multiply by two, in order to account the worst case slack space -+ * due to the power-of-two allocation sizes. -+ */ -+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE); -+ -+ /* -+ * add the kmem_cache overhead of each possible kmalloc cache -+ */ -+ for (i = 1; i < PAGE_SHIFT; i++) { -+ struct kmem_cache *s; -+ -+#ifdef CONFIG_ZONE_DMA -+ if (unlikely(flags & SLUB_DMA)) -+ s = kmalloc_dma_caches[i]; -+ else -+#endif -+ s = kmalloc_caches[i]; -+ -+ if (s) -+ pages += kmem_alloc_estimate(s, flags, 0); -+ } -+ -+ return pages; -+} -+EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes); -+ -+/* - * kmem_cache_shrink removes empty slabs from the partial lists and sorts - * the remaining slabs by the number of items in use. The slabs with the - * most items in use come first. New allocations will then fill those up diff --git a/patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch b/patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch deleted file mode 100644 index 7e0dfe7..0000000 --- a/patches.suse/SoN-07-mm-PF_MEMALLOC-softirq.patch +++ /dev/null @@ -1,83 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 07/31] mm: allow PF_MEMALLOC from softirq context -Patch-mainline: not yet - -This is needed to allow network softirq packet processing to make use of -PF_MEMALLOC. - -Currently softirq context cannot use PF_MEMALLOC due to it not being associated -with a task, and therefore not having task flags to fiddle with - thus the gfp -to alloc flag mapping ignores the task flags when in interrupts (hard or soft) -context. - -Allowing softirqs to make use of PF_MEMALLOC therefore requires some trickery. -We basically borrow the task flags from whatever process happens to be -preempted by the softirq. - -So we modify the gfp to alloc flags mapping to not exclude task flags in -softirq context, and modify the softirq code to save, clear and restore the -PF_MEMALLOC flag. - -The save and clear, ensures the preempted task's PF_MEMALLOC flag doesn't -leak into the softirq. The restore ensures a softirq's PF_MEMALLOC flag cannot -leak back into the preempted process. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/sched.h | 7 +++++++ - kernel/softirq.c | 3 +++ - mm/page_alloc.c | 7 ++++--- - 3 files changed, 14 insertions(+), 3 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1761,6 +1761,13 @@ static inline void rcu_copy_process(stru - - #endif - -+static inline void tsk_restore_flags(struct task_struct *p, -+ unsigned long pflags, unsigned long mask) -+{ -+ p->flags &= ~mask; -+ p->flags |= pflags & mask; -+} -+ - #ifdef CONFIG_SMP - extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -194,6 +194,8 @@ asmlinkage void __do_softirq(void) - __u32 pending; - int max_restart = MAX_SOFTIRQ_RESTART; - int cpu; -+ unsigned long pflags = current->flags; -+ current->flags &= ~PF_MEMALLOC; - - pending = local_softirq_pending(); - account_system_vtime(current); -@@ -246,6 +248,7 @@ restart: - - account_system_vtime(current); - __local_bh_enable(SOFTIRQ_OFFSET); -+ tsk_restore_flags(current, pflags, PF_MEMALLOC); - } - - #ifndef __ARCH_HAS_DO_SOFTIRQ ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -1928,9 +1928,10 @@ int gfp_to_alloc_flags(gfp_t gfp_mask) - alloc_flags |= ALLOC_HARDER; - - if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { -- if (!in_interrupt() && -- ((current->flags & PF_MEMALLOC) || -- unlikely(test_thread_flag(TIF_MEMDIE)))) -+ if (!in_irq() && (current->flags & PF_MEMALLOC)) -+ alloc_flags |= ALLOC_NO_WATERMARKS; -+ else if (!in_interrupt() && -+ unlikely(test_thread_flag(TIF_MEMDIE))) - alloc_flags |= ALLOC_NO_WATERMARKS; - } - diff --git a/patches.suse/SoN-08-mm-page_alloc-emerg.patch b/patches.suse/SoN-08-mm-page_alloc-emerg.patch deleted file mode 100644 index 08e34a0..0000000 --- a/patches.suse/SoN-08-mm-page_alloc-emerg.patch +++ /dev/null @@ -1,219 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 08/31] mm: emergency pool -Patch-mainline: not yet - -Provide means to reserve a specific amount of pages. - -The emergency pool is separated from the min watermark because ALLOC_HARDER -and ALLOC_HIGH modify the watermark in a relative way and thus do not ensure -a strict minimum. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/mmzone.h | 3 + - mm/page_alloc.c | 85 +++++++++++++++++++++++++++++++++++++++++++------ - mm/vmstat.c | 6 +-- - 3 files changed, 81 insertions(+), 13 deletions(-) - ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -282,6 +282,7 @@ struct zone_reclaim_stat { - - struct zone { - /* Fields commonly accessed by the page allocator */ -+ unsigned long pages_emerg; /* emergency pool */ - - /* zone watermarks, access with *_wmark_pages(zone) macros */ - unsigned long watermark[NR_WMARK]; -@@ -776,6 +777,8 @@ int sysctl_min_unmapped_ratio_sysctl_han - int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); - -+int adjust_memalloc_reserve(int pages); -+ - extern int numa_zonelist_order_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); - extern char numa_zonelist_order[]; ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -173,6 +173,8 @@ static char * const zone_names[MAX_NR_ZO - - static DEFINE_SPINLOCK(min_free_lock); - int min_free_kbytes = 1024; -+static DEFINE_MUTEX(var_free_mutex); -+int var_free_kbytes; - - static unsigned long __meminitdata nr_kernel_pages; - static unsigned long __meminitdata nr_all_pages; -@@ -1457,7 +1459,7 @@ static bool __zone_watermark_ok(struct z - if (alloc_flags & ALLOC_HARDER) - min -= min / 4; - -- if (free_pages <= min + z->lowmem_reserve[classzone_idx]) -+ if (free_pages <= min+z->lowmem_reserve[classzone_idx]+z->pages_emerg) - return false; - for (o = 0; o < order; o++) { - /* At the next order, this order's pages become unavailable */ -@@ -1985,7 +1987,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, u - { - const gfp_t wait = gfp_mask & __GFP_WAIT; - struct page *page = NULL; -- int alloc_flags; -+ int alloc_flags = 0; - unsigned long pages_reclaimed = 0; - unsigned long did_some_progress; - bool sync_migration = false; -@@ -2144,8 +2146,9 @@ nopage: - printk(KERN_INFO "perfectly reliable and the kernel is designed to handle that.\n"); - } - printk(KERN_INFO "%s: page allocation failure." -- " order:%d, mode:0x%x\n", -- current->comm, order, gfp_mask); -+ " order:%d, mode:0x%x, alloc_flags:0x%x pflags:0x%x\n", -+ current->comm, order, gfp_mask, alloc_flags, -+ current->flags); - dump_stack(); - show_mem(); - } -@@ -2480,9 +2483,9 @@ void show_free_areas(void) - "\n", - zone->name, - K(zone_page_state(zone, NR_FREE_PAGES)), -- K(min_wmark_pages(zone)), -- K(low_wmark_pages(zone)), -- K(high_wmark_pages(zone)), -+ K(zone->pages_emerg + min_wmark_pages(zone)), -+ K(zone->pages_emerg + low_wmark_pages(zone)), -+ K(zone->pages_emerg + high_wmark_pages(zone)), - K(zone_page_state(zone, NR_ACTIVE_ANON)), - K(zone_page_state(zone, NR_INACTIVE_ANON)), - K(zone_page_state(zone, NR_ACTIVE_FILE)), -@@ -4863,7 +4866,7 @@ static void calculate_totalreserve_pages - } - - /* we treat the high watermark as reserved pages. */ -- max += high_wmark_pages(zone); -+ max += high_wmark_pages(zone) + zone->pages_emerg; - - if (max > zone->present_pages) - max = zone->present_pages; -@@ -4921,7 +4924,8 @@ static void setup_per_zone_lowmem_reserv - */ - static void __setup_per_zone_wmarks(void) - { -- unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); -+ unsigned pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); -+ unsigned pages_emerg = var_free_kbytes >> (PAGE_SHIFT - 10); - unsigned long lowmem_pages = 0; - struct zone *zone; - unsigned long flags; -@@ -4933,11 +4937,13 @@ static void __setup_per_zone_wmarks(void - } - - for_each_zone(zone) { -- u64 tmp; -+ u64 tmp, tmp_emerg; - - spin_lock_irqsave(&zone->lock, flags); - tmp = (u64)pages_min * zone->present_pages; - do_div(tmp, lowmem_pages); -+ tmp_emerg = (u64)pages_emerg * zone->present_pages; -+ do_div(tmp_emerg, lowmem_pages); - if (is_highmem(zone)) { - /* - * __GFP_HIGH and PF_MEMALLOC allocations usually don't -@@ -4956,12 +4962,14 @@ static void __setup_per_zone_wmarks(void - if (min_pages > 128) - min_pages = 128; - zone->watermark[WMARK_MIN] = min_pages; -+ zone->pages_emerg = 0; - } else { - /* - * If it's a lowmem zone, reserve a number of pages - * proportionate to the zone's size. - */ - zone->watermark[WMARK_MIN] = tmp; -+ zone->pages_emerg = tmp_emerg; - } - - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); -@@ -5026,6 +5034,63 @@ void setup_per_zone_wmarks(void) - spin_unlock_irqrestore(&min_free_lock, flags); - } - -+static void __adjust_memalloc_reserve(int pages) -+{ -+ var_free_kbytes += pages << (PAGE_SHIFT - 10); -+ BUG_ON(var_free_kbytes < 0); -+ setup_per_zone_wmarks(); -+} -+ -+static int test_reserve_limits(void) -+{ -+ struct zone *zone; -+ int node; -+ -+ for_each_zone(zone) -+ wakeup_kswapd(zone, 0); -+ -+ for_each_online_node(node) { -+ struct page *page = alloc_pages_node(node, GFP_KERNEL, 0); -+ if (!page) -+ return -ENOMEM; -+ -+ __free_page(page); -+ } -+ -+ return 0; -+} -+ -+/** -+ * adjust_memalloc_reserve - adjust the memalloc reserve -+ * @pages: number of pages to add -+ * -+ * It adds a number of pages to the memalloc reserve; if -+ * the number was positive it kicks reclaim into action to -+ * satisfy the higher watermarks. -+ * -+ * returns -ENOMEM when it failed to satisfy the watermarks. -+ */ -+int adjust_memalloc_reserve(int pages) -+{ -+ int err = 0; -+ -+ mutex_lock(&var_free_mutex); -+ __adjust_memalloc_reserve(pages); -+ if (pages > 0) { -+ err = test_reserve_limits(); -+ if (err) { -+ __adjust_memalloc_reserve(-pages); -+ goto unlock; -+ } -+ } -+ printk(KERN_DEBUG "Emergency reserve: %d\n", var_free_kbytes); -+ -+unlock: -+ mutex_unlock(&var_free_mutex); -+ return err; -+} -+EXPORT_SYMBOL_GPL(adjust_memalloc_reserve); -+ - /* - * Initialise min_free_kbytes. - * ---- a/mm/vmstat.c -+++ b/mm/vmstat.c -@@ -957,9 +957,9 @@ static void zoneinfo_show_print(struct s - "\n spanned %lu" - "\n present %lu", - zone_page_state(zone, NR_FREE_PAGES), -- min_wmark_pages(zone), -- low_wmark_pages(zone), -- high_wmark_pages(zone), -+ zone->pages_emerg + min_wmark_pages(zone), -+ zone->pages_emerg + min_wmark_pages(zone), -+ zone->pages_emerg + high_wmark_pages(zone), - zone->pages_scanned, - zone->spanned_pages, - zone->present_pages); diff --git a/patches.suse/SoN-08a-mm-page_alloc-emerg.patch b/patches.suse/SoN-08a-mm-page_alloc-emerg.patch deleted file mode 100644 index 96cfc1b..0000000 --- a/patches.suse/SoN-08a-mm-page_alloc-emerg.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 4d2cfa9116b4651cf959e5a02feac0334590dbd9 Mon Sep 17 00:00:00 2001 -From: Mel Gorman -Date: Wed, 9 Mar 2011 19:23:55 +0000 -Subject: [PATCH] collapse: mm: Report the low watermark correctly. -Patch-mainline: Not yet -References: bnc#678497 - -Report the correct low watermark plus the emergency pool offset -properly. Currently it is printing out an offset from the min watermark. - -Signed-off-by: Mel Gorman -Reviewed-by: NeilBrown -Signed-off-by: Suresh Jayaraman ---- - mm/vmstat.c | 2 +- - 1 files changed, 1 insertions(+), 1 deletions(-) - -Index: linux-2.6.37-openSUSE-11.4/mm/vmstat.c -=================================================================== ---- linux-2.6.37-openSUSE-11.4.orig/mm/vmstat.c -+++ linux-2.6.37-openSUSE-11.4/mm/vmstat.c -@@ -880,7 +880,7 @@ static void zoneinfo_show_print(struct s - "\n present %lu", - zone_page_state(zone, NR_FREE_PAGES), - zone->pages_emerg + min_wmark_pages(zone), -- zone->pages_emerg + min_wmark_pages(zone), -+ zone->pages_emerg + low_wmark_pages(zone), - zone->pages_emerg + high_wmark_pages(zone), - zone->pages_scanned, - zone->spanned_pages, diff --git a/patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch b/patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch deleted file mode 100644 index f5530f3..0000000 --- a/patches.suse/SoN-09-global-ALLOC_NO_WATERMARKS.patch +++ /dev/null @@ -1,36 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 09/31] mm: system wide ALLOC_NO_WATERMARK -Patch-mainline: not yet - -The reserve is proportionally distributed over all (!highmem) zones in the -system. So we need to allow an emergency allocation access to all zones. In -order to do that we need to break out of any mempolicy boundaries we might -have. - -In my opinion that does not break mempolicies as those are user oriented -and not system oriented. That is, system allocations are not guaranteed to be -within mempolicy boundaries. For instance IRQs don't even have a mempolicy. - -So breaking out of mempolicy boundaries for 'rare' emergency allocations, -which are always system allocations (as opposed to user) is ok. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - mm/page_alloc.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -1995,6 +1995,11 @@ restart: - rebalance: - /* Allocate without watermarks if the context allows */ - if (alloc_flags & ALLOC_NO_WATERMARKS) { -+ /* -+ * break out mempolicy boundaries -+ */ -+ zonelist = node_zonelist(numa_node_id(), gfp_mask); -+ - page = __alloc_pages_high_priority(gfp_mask, order, - zonelist, high_zoneidx, nodemask, - preferred_zone, migratetype); diff --git a/patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch b/patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch deleted file mode 100644 index 074af89..0000000 --- a/patches.suse/SoN-10-mm-page_alloc-GFP_EMERGENCY.patch +++ /dev/null @@ -1,57 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 10/31] mm: __GFP_MEMALLOC -Patch-mainline: not yet - -__GFP_MEMALLOC will allow the allocation to disregard the watermarks, -much like PF_MEMALLOC. - -It allows one to pass along the memalloc state in object related allocation -flags as opposed to task related flags, such as sk->sk_allocation. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/gfp.h | 4 +++- - mm/page_alloc.c | 4 +++- - 2 files changed, 6 insertions(+), 2 deletions(-) - ---- a/include/linux/gfp.h -+++ b/include/linux/gfp.h -@@ -23,6 +23,7 @@ struct vm_area_struct; - #define ___GFP_REPEAT 0x400u - #define ___GFP_NOFAIL 0x800u - #define ___GFP_NORETRY 0x1000u -+#define ___GFP_MEMALLOC 0x2000u - #define ___GFP_COMP 0x4000u - #define ___GFP_ZERO 0x8000u - #define ___GFP_NOMEMALLOC 0x10000u -@@ -73,6 +74,7 @@ struct vm_area_struct; - #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ - #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ - #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ -+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Use emergency reserves */ - #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ - #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ - #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ -@@ -119,7 +121,7 @@ struct vm_area_struct; - /* Control page allocator reclaim behavior */ - #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ - __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ -- __GFP_NORETRY|__GFP_NOMEMALLOC) -+ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) - - /* Control slab gfp mask during early boot */ - #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -1944,7 +1944,9 @@ int gfp_to_alloc_flags(gfp_t gfp_mask) - alloc_flags |= ALLOC_HARDER; - - if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { -- if (!in_irq() && (current->flags & PF_MEMALLOC)) -+ if (gfp_mask & __GFP_MEMALLOC) -+ alloc_flags |= ALLOC_NO_WATERMARKS; -+ else if (!in_irq() && (current->flags & PF_MEMALLOC)) - alloc_flags |= ALLOC_NO_WATERMARKS; - else if (!in_interrupt() && - unlikely(test_thread_flag(TIF_MEMDIE))) diff --git a/patches.suse/SoN-11-mm-reserve.patch b/patches.suse/SoN-11-mm-reserve.patch deleted file mode 100644 index ac7e64d..0000000 --- a/patches.suse/SoN-11-mm-reserve.patch +++ /dev/null @@ -1,873 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 11/31] mm: memory reserve management -Patch-mainline: not yet - -Generic reserve management code. - -It provides methods to reserve and charge. Upon this, generic alloc/free style -reserve pools could be build, which could fully replace mempool_t -functionality. - -It should also allow for a Banker's algorithm replacement of __GFP_NOFAIL. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/reserve.h | 197 ++++++++++++++ - mm/Makefile | 2 - mm/reserve.c | 637 ++++++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 835 insertions(+), 1 deletion(-) - ---- /dev/null -+++ b/include/linux/reserve.h -@@ -0,0 +1,197 @@ -+/* -+ * Memory reserve management. -+ * -+ * Copyright (C) 2007-2008 Red Hat, Inc., -+ * Peter Zijlstra -+ * -+ * This file contains the public data structure and API definitions. -+ */ -+ -+#ifndef _LINUX_RESERVE_H -+#define _LINUX_RESERVE_H -+ -+#include -+#include -+#include -+#include -+ -+struct mem_reserve { -+ struct mem_reserve *parent; -+ struct list_head children; -+ struct list_head siblings; -+ -+ const char *name; -+ -+ long pages; -+ long limit; -+ long usage; -+ spinlock_t lock; /* protects limit and usage */ -+ -+ wait_queue_head_t waitqueue; -+}; -+ -+extern struct mem_reserve mem_reserve_root; -+ -+void mem_reserve_init(struct mem_reserve *res, const char *name, -+ struct mem_reserve *parent); -+int mem_reserve_connect(struct mem_reserve *new_child, -+ struct mem_reserve *node); -+void mem_reserve_disconnect(struct mem_reserve *node); -+ -+int mem_reserve_pages_set(struct mem_reserve *res, long pages); -+int mem_reserve_pages_add(struct mem_reserve *res, long pages); -+int mem_reserve_pages_charge(struct mem_reserve *res, long pages); -+ -+int mem_reserve_kmalloc_set(struct mem_reserve *res, long bytes); -+int mem_reserve_kmalloc_charge(struct mem_reserve *res, long bytes); -+ -+struct kmem_cache; -+ -+int mem_reserve_kmem_cache_set(struct mem_reserve *res, -+ struct kmem_cache *s, -+ int objects); -+int mem_reserve_kmem_cache_charge(struct mem_reserve *res, -+ struct kmem_cache *s, long objs); -+ -+void *___kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, -+ struct mem_reserve *res, int *emerg); -+ -+static inline -+void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, -+ struct mem_reserve *res, int *emerg) -+{ -+ void *obj; -+ -+ obj = kmalloc_node_track_caller(size, -+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); -+ if (!obj) -+ obj = ___kmalloc_reserve(size, flags, node, ip, res, emerg); -+ -+ return obj; -+} -+ -+/** -+ * kmalloc_reserve() - kmalloc() and charge against @res for @emerg allocations -+ * @size - size of the requested memory region -+ * @gfp - allocation flags to use for this allocation -+ * @node - preferred memory node for this allocation -+ * @res - reserve to charge emergency allocations against -+ * @emerg - bit 0 is set when the allocation was an emergency allocation -+ * -+ * Returns NULL on failure -+ */ -+#define kmalloc_reserve(size, gfp, node, res, emerg) \ -+ __kmalloc_reserve(size, gfp, node, _RET_IP_, res, emerg) -+ -+void __kfree_reserve(void *obj, struct mem_reserve *res, int emerg); -+ -+/** -+ * kfree_reserve() - kfree() and uncharge against @res for @emerg allocations -+ * @obj - memory to free -+ * @res - reserve to uncharge emergency allocations from -+ * @emerg - was this an emergency allocation -+ */ -+static inline -+void kfree_reserve(void *obj, struct mem_reserve *res, int emerg) -+{ -+ if (unlikely(obj && res && emerg)) -+ __kfree_reserve(obj, res, emerg); -+ else -+ kfree(obj); -+} -+ -+void *__kmem_cache_alloc_reserve(struct kmem_cache *s, gfp_t flags, int node, -+ struct mem_reserve *res, int *emerg); -+ -+/** -+ * kmem_cache_alloc_reserve() - kmem_cache_alloc() and charge against @res -+ * @s - kmem_cache to allocate from -+ * @gfp - allocation flags to use for this allocation -+ * @node - preferred memory node for this allocation -+ * @res - reserve to charge emergency allocations against -+ * @emerg - bit 0 is set when the allocation was an emergency allocation -+ * -+ * Returns NULL on failure -+ */ -+static inline -+void *kmem_cache_alloc_reserve(struct kmem_cache *s, gfp_t flags, int node, -+ struct mem_reserve *res, int *emerg) -+{ -+ void *obj; -+ -+ obj = kmem_cache_alloc_node(s, -+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); -+ if (!obj) -+ obj = __kmem_cache_alloc_reserve(s, flags, node, res, emerg); -+ -+ return obj; -+} -+ -+void __kmem_cache_free_reserve(struct kmem_cache *s, void *obj, -+ struct mem_reserve *res, int emerg); -+ -+/** -+ * kmem_cache_free_reserve() - kmem_cache_free() and uncharge against @res -+ * @s - kmem_cache to free to -+ * @obj - memory to free -+ * @res - reserve to uncharge emergency allocations from -+ * @emerg - was this an emergency allocation -+ */ -+static inline -+void kmem_cache_free_reserve(struct kmem_cache *s, void *obj, -+ struct mem_reserve *res, int emerg) -+{ -+ if (unlikely(obj && res && emerg)) -+ __kmem_cache_free_reserve(s, obj, res, emerg); -+ else -+ kmem_cache_free(s, obj); -+} -+ -+struct page *__alloc_pages_reserve(int node, gfp_t flags, int order, -+ struct mem_reserve *res, int *emerg); -+ -+/** -+ * alloc_pages_reserve() - alloc_pages() and charge against @res -+ * @node - preferred memory node for this allocation -+ * @gfp - allocation flags to use for this allocation -+ * @order - page order -+ * @res - reserve to charge emergency allocations against -+ * @emerg - bit 0 is set when the allocation was an emergency allocation -+ * -+ * Returns NULL on failure -+ */ -+static inline -+struct page *alloc_pages_reserve(int node, gfp_t flags, int order, -+ struct mem_reserve *res, int *emerg) -+{ -+ struct page *page; -+ -+ page = alloc_pages_node(node, -+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN, order); -+ if (!page) -+ page = __alloc_pages_reserve(node, flags, order, res, emerg); -+ -+ return page; -+} -+ -+void __free_pages_reserve(struct page *page, int order, -+ struct mem_reserve *res, int emerg); -+ -+/** -+ * free_pages_reserve() - __free_pages() and uncharge against @res -+ * @page - page to free -+ * @order - page order -+ * @res - reserve to uncharge emergency allocations from -+ * @emerg - was this an emergency allocation -+ */ -+static inline -+void free_pages_reserve(struct page *page, int order, -+ struct mem_reserve *res, int emerg) -+{ -+ if (unlikely(page && res && emerg)) -+ __free_pages_reserve(page, order, res, emerg); -+ else -+ __free_pages(page, order); -+} -+ -+#endif /* _LINUX_RESERVE_H */ ---- a/mm/Makefile -+++ b/mm/Makefile -@@ -12,7 +12,7 @@ obj-y := bootmem.o filemap.o mempool.o - readahead.o swap.o truncate.o vmscan.o shmem.o \ - prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ - page_isolation.o mm_init.o mmu_context.o percpu.o \ -- $(mmu-y) -+ reserve.o $(mmu-y) - obj-y += init-mm.o - - obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o ---- /dev/null -+++ b/mm/reserve.c -@@ -0,0 +1,639 @@ -+/* -+ * Memory reserve management. -+ * -+ * Copyright (C) 2007-2008, Red Hat, Inc., -+ * Peter Zijlstra -+ * -+ * Description: -+ * -+ * Manage a set of memory reserves. -+ * -+ * A memory reserve is a reserve for a specified number of object of specified -+ * size. Since memory is managed in pages, this reserve demand is then -+ * translated into a page unit. -+ * -+ * So each reserve has a specified object limit, an object usage count and a -+ * number of pages required to back these objects. -+ * -+ * Usage is charged against a reserve, if the charge fails, the resource must -+ * not be allocated/used. -+ * -+ * The reserves are managed in a tree, and the resource demands (pages and -+ * limit) are propagated up the tree. Obviously the object limit will be -+ * meaningless as soon as the unit starts mixing, but the required page reserve -+ * (being of one unit) is still valid at the root. -+ * -+ * It is the page demand of the root node that is used to set the global -+ * reserve (adjust_memalloc_reserve() which sets zone->pages_emerg). -+ * -+ * As long as a subtree has the same usage unit, an aggregate node can be used -+ * to charge against, instead of the leaf nodes. However, do be consistent with -+ * who is charged, resource usage is not propagated up the tree (for -+ * performance reasons). -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "internal.h" -+ -+static DEFINE_MUTEX(mem_reserve_mutex); -+ -+/** -+ * @mem_reserve_root - the global reserve root -+ * -+ * The global reserve is empty, and has no limit unit, it merely -+ * acts as an aggregation point for reserves and an interface to -+ * adjust_memalloc_reserve(). -+ */ -+struct mem_reserve mem_reserve_root = { -+ .children = LIST_HEAD_INIT(mem_reserve_root.children), -+ .siblings = LIST_HEAD_INIT(mem_reserve_root.siblings), -+ .name = "total reserve", -+ .lock = __SPIN_LOCK_UNLOCKED(mem_reserve_root.lock), -+ .waitqueue = __WAIT_QUEUE_HEAD_INITIALIZER(mem_reserve_root.waitqueue), -+}; -+EXPORT_SYMBOL_GPL(mem_reserve_root); -+ -+/** -+ * mem_reserve_init() - initialize a memory reserve object -+ * @res - the new reserve object -+ * @name - a name for this reserve -+ * @parent - when non NULL, the parent to connect to. -+ */ -+void mem_reserve_init(struct mem_reserve *res, const char *name, -+ struct mem_reserve *parent) -+{ -+ memset(res, 0, sizeof(*res)); -+ INIT_LIST_HEAD(&res->children); -+ INIT_LIST_HEAD(&res->siblings); -+ res->name = name; -+ spin_lock_init(&res->lock); -+ init_waitqueue_head(&res->waitqueue); -+ -+ if (parent) -+ mem_reserve_connect(res, parent); -+} -+EXPORT_SYMBOL_GPL(mem_reserve_init); -+ -+/* -+ * propagate the pages and limit changes up the (sub)tree. -+ */ -+static void __calc_reserve(struct mem_reserve *res, long pages, long limit) -+{ -+ unsigned long flags; -+ -+ for ( ; res; res = res->parent) { -+ res->pages += pages; -+ -+ if (limit) { -+ spin_lock_irqsave(&res->lock, flags); -+ res->limit += limit; -+ spin_unlock_irqrestore(&res->lock, flags); -+ } -+ } -+} -+ -+/** -+ * __mem_reserve_add() - primitive to change the size of a reserve -+ * @res - reserve to change -+ * @pages - page delta -+ * @limit - usage limit delta -+ * -+ * Returns -ENOMEM when a size increase is not possible atm. -+ */ -+static int __mem_reserve_add(struct mem_reserve *res, long pages, long limit) -+{ -+ int ret = 0; -+ long reserve; -+ -+ /* -+ * This looks more complex than need be, that is because we handle -+ * the case where @res isn't actually connected to mem_reserve_root. -+ * -+ * So, by propagating the new pages up the (sub)tree and computing -+ * the difference in mem_reserve_root.pages we find if this action -+ * affects the actual reserve. -+ * -+ * The (partial) propagation also makes that mem_reserve_connect() -+ * needs only look at the direct child, since each disconnected -+ * sub-tree is fully up-to-date. -+ */ -+ reserve = mem_reserve_root.pages; -+ __calc_reserve(res, pages, 0); -+ reserve = mem_reserve_root.pages - reserve; -+ -+ if (reserve) { -+ ret = adjust_memalloc_reserve(reserve); -+ if (ret) -+ __calc_reserve(res, -pages, 0); -+ } -+ -+ /* -+ * Delay updating the limits until we've acquired the resources to -+ * back it. -+ */ -+ if (!ret) -+ __calc_reserve(res, 0, limit); -+ -+ return ret; -+} -+ -+/** -+ * __mem_reserve_charge() - primitive to charge object usage of a reserve -+ * @res - reserve to charge -+ * @charge - size of the charge -+ * -+ * Returns non-zero on success, zero on failure. -+ */ -+static -+int __mem_reserve_charge(struct mem_reserve *res, long charge) -+{ -+ unsigned long flags; -+ int ret = 0; -+ -+ spin_lock_irqsave(&res->lock, flags); -+ if (charge < 0 || res->usage + charge < res->limit) { -+ res->usage += charge; -+ if (unlikely(res->usage < 0)) -+ res->usage = 0; -+ ret = 1; -+ } -+ if (charge < 0) -+ wake_up_all(&res->waitqueue); -+ spin_unlock_irqrestore(&res->lock, flags); -+ -+ return ret; -+} -+ -+/** -+ * mem_reserve_connect() - connect a reserve to another in a child-parent relation -+ * @new_child - the reserve node to connect (child) -+ * @node - the reserve node to connect to (parent) -+ * -+ * Connecting a node results in an increase of the reserve by the amount of -+ * pages in @new_child->pages if @node has a connection to mem_reserve_root. -+ * -+ * Returns -ENOMEM when the new connection would increase the reserve (parent -+ * is connected to mem_reserve_root) and there is no memory to do so. -+ * -+ * On error, the child is _NOT_ connected. -+ */ -+int mem_reserve_connect(struct mem_reserve *new_child, struct mem_reserve *node) -+{ -+ int ret; -+ -+ WARN_ON(!new_child->name); -+ -+ mutex_lock(&mem_reserve_mutex); -+ if (new_child->parent) { -+ ret = -EEXIST; -+ goto unlock; -+ } -+ new_child->parent = node; -+ list_add(&new_child->siblings, &node->children); -+ ret = __mem_reserve_add(node, new_child->pages, new_child->limit); -+ if (ret) { -+ new_child->parent = NULL; -+ list_del_init(&new_child->siblings); -+ } -+unlock: -+ mutex_unlock(&mem_reserve_mutex); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mem_reserve_connect); -+ -+/** -+ * mem_reserve_disconnect() - sever a nodes connection to the reserve tree -+ * @node - the node to disconnect -+ * -+ * Disconnecting a node results in a reduction of the reserve by @node->pages -+ * if node had a connection to mem_reserve_root. -+ */ -+void mem_reserve_disconnect(struct mem_reserve *node) -+{ -+ int ret; -+ -+ BUG_ON(!node->parent); -+ -+ mutex_lock(&mem_reserve_mutex); -+ if (!node->parent) { -+ ret = -ENOENT; -+ goto unlock; -+ } -+ ret = __mem_reserve_add(node->parent, -node->pages, -node->limit); -+ if (!ret) { -+ node->parent = NULL; -+ list_del_init(&node->siblings); -+ } -+unlock: -+ mutex_unlock(&mem_reserve_mutex); -+ -+ /* -+ * We cannot fail to shrink the reserves, can we? -+ */ -+ WARN_ON(ret); -+} -+EXPORT_SYMBOL_GPL(mem_reserve_disconnect); -+ -+#ifdef CONFIG_PROC_FS -+ -+/* -+ * Simple output of the reserve tree in: /proc/reserve_info -+ * Example: -+ * -+ * localhost ~ # cat /proc/reserve_info -+ * 1:0 "total reserve" 6232K 0/278581 -+ * 2:1 "total network reserve" 6232K 0/278581 -+ * 3:2 "network TX reserve" 212K 0/53 -+ * 4:3 "protocol TX pages" 212K 0/53 -+ * 5:2 "network RX reserve" 6020K 0/278528 -+ * 6:5 "IPv4 route cache" 5508K 0/16384 -+ * 7:5 "SKB data reserve" 512K 0/262144 -+ * 8:7 "IPv4 fragment cache" 512K 0/262144 -+ */ -+ -+static void mem_reserve_show_item(struct seq_file *m, struct mem_reserve *res, -+ unsigned int parent, unsigned int *id) -+{ -+ struct mem_reserve *child; -+ unsigned int my_id = ++*id; -+ -+ seq_printf(m, "%d:%d \"%s\" %ldK %ld/%ld\n", -+ my_id, parent, res->name, -+ res->pages << (PAGE_SHIFT - 10), -+ res->usage, res->limit); -+ -+ list_for_each_entry(child, &res->children, siblings) -+ mem_reserve_show_item(m, child, my_id, id); -+} -+ -+static int mem_reserve_show(struct seq_file *m, void *v) -+{ -+ unsigned int ident = 0; -+ -+ mutex_lock(&mem_reserve_mutex); -+ mem_reserve_show_item(m, &mem_reserve_root, ident, &ident); -+ mutex_unlock(&mem_reserve_mutex); -+ -+ return 0; -+} -+ -+static int mem_reserve_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, mem_reserve_show, NULL); -+} -+ -+static const struct file_operations mem_reserve_opterations = { -+ .open = mem_reserve_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static __init int mem_reserve_proc_init(void) -+{ -+ proc_create("reserve_info", S_IRUSR, NULL, &mem_reserve_opterations); -+ return 0; -+} -+ -+module_init(mem_reserve_proc_init); -+ -+#endif -+ -+/* -+ * alloc_page helpers -+ */ -+ -+/** -+ * mem_reserve_pages_set() - set reserves size in pages -+ * @res - reserve to set -+ * @pages - size in pages to set it to -+ * -+ * Returns -ENOMEM when it fails to set the reserve. On failure the old size -+ * is preserved. -+ */ -+int mem_reserve_pages_set(struct mem_reserve *res, long pages) -+{ -+ int ret; -+ -+ mutex_lock(&mem_reserve_mutex); -+ pages -= res->pages; -+ ret = __mem_reserve_add(res, pages, pages * PAGE_SIZE); -+ mutex_unlock(&mem_reserve_mutex); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mem_reserve_pages_set); -+ -+/** -+ * mem_reserve_pages_add() - change the size in a relative way -+ * @res - reserve to change -+ * @pages - number of pages to add (or subtract when negative) -+ * -+ * Similar to mem_reserve_pages_set, except that the argument is relative -+ * instead of absolute. -+ * -+ * Returns -ENOMEM when it fails to increase. -+ */ -+int mem_reserve_pages_add(struct mem_reserve *res, long pages) -+{ -+ int ret; -+ -+ mutex_lock(&mem_reserve_mutex); -+ ret = __mem_reserve_add(res, pages, pages * PAGE_SIZE); -+ mutex_unlock(&mem_reserve_mutex); -+ -+ return ret; -+} -+ -+/** -+ * mem_reserve_pages_charge() - charge page usage to a reserve -+ * @res - reserve to charge -+ * @pages - size to charge -+ * -+ * Returns non-zero on success. -+ */ -+int mem_reserve_pages_charge(struct mem_reserve *res, long pages) -+{ -+ return __mem_reserve_charge(res, pages * PAGE_SIZE); -+} -+EXPORT_SYMBOL_GPL(mem_reserve_pages_charge); -+ -+/* -+ * kmalloc helpers -+ */ -+ -+/** -+ * mem_reserve_kmalloc_set() - set this reserve to bytes worth of kmalloc -+ * @res - reserve to change -+ * @bytes - size in bytes to reserve -+ * -+ * Returns -ENOMEM on failure. -+ */ -+int mem_reserve_kmalloc_set(struct mem_reserve *res, long bytes) -+{ -+ int ret; -+ long pages; -+ -+ mutex_lock(&mem_reserve_mutex); -+ pages = kmalloc_estimate_bytes(GFP_ATOMIC, bytes); -+ pages -= res->pages; -+ bytes -= res->limit; -+ ret = __mem_reserve_add(res, pages, bytes); -+ mutex_unlock(&mem_reserve_mutex); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mem_reserve_kmalloc_set); -+ -+/** -+ * mem_reserve_kmalloc_charge() - charge bytes to a reserve -+ * @res - reserve to charge -+ * @bytes - bytes to charge -+ * -+ * Returns non-zero on success. -+ */ -+int mem_reserve_kmalloc_charge(struct mem_reserve *res, long bytes) -+{ -+ if (bytes < 0) -+ bytes = -roundup_pow_of_two(-bytes); -+ else -+ bytes = roundup_pow_of_two(bytes); -+ -+ return __mem_reserve_charge(res, bytes); -+} -+EXPORT_SYMBOL_GPL(mem_reserve_kmalloc_charge); -+ -+/* -+ * kmem_cache helpers -+ */ -+ -+/** -+ * mem_reserve_kmem_cache_set() - set reserve to @objects worth of kmem_cache_alloc of @s -+ * @res - reserve to set -+ * @s - kmem_cache to reserve from -+ * @objects - number of objects to reserve -+ * -+ * Returns -ENOMEM on failure. -+ */ -+int mem_reserve_kmem_cache_set(struct mem_reserve *res, struct kmem_cache *s, -+ int objects) -+{ -+ int ret; -+ long pages, bytes; -+ -+ mutex_lock(&mem_reserve_mutex); -+ pages = kmem_alloc_estimate(s, GFP_ATOMIC, objects); -+ pages -= res->pages; -+ bytes = objects * kmem_cache_size(s) - res->limit; -+ ret = __mem_reserve_add(res, pages, bytes); -+ mutex_unlock(&mem_reserve_mutex); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mem_reserve_kmem_cache_set); -+ -+/** -+ * mem_reserve_kmem_cache_charge() - charge (or uncharge) usage of objs -+ * @res - reserve to charge -+ * @objs - objects to charge for -+ * -+ * Returns non-zero on success. -+ */ -+int mem_reserve_kmem_cache_charge(struct mem_reserve *res, struct kmem_cache *s, -+ long objs) -+{ -+ return __mem_reserve_charge(res, objs * kmem_cache_size(s)); -+} -+EXPORT_SYMBOL_GPL(mem_reserve_kmem_cache_charge); -+ -+/* -+ * Alloc wrappers. -+ * -+ * Actual usage is commented in linux/reserve.h where the interface functions -+ * live. Furthermore, the code is 3 instances of the same paradigm, hence only -+ * the first contains extensive comments. -+ */ -+ -+/* -+ * kmalloc/kfree -+ */ -+ -+void *___kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, -+ struct mem_reserve *res, int *emerg) -+{ -+ void *obj; -+ gfp_t gfp; -+ -+ /* -+ * Try a regular allocation, when that fails and we're not entitled -+ * to the reserves, fail. -+ */ -+ gfp = flags | __GFP_NOMEMALLOC | __GFP_NOWARN; -+ obj = kmalloc_node_track_caller(size, gfp, node); -+ -+ if (obj || !(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS)) -+ goto out; -+ -+ /* -+ * If we were given a reserve to charge against, try that. -+ */ -+ if (res && !mem_reserve_kmalloc_charge(res, size)) { -+ /* -+ * If we failed to charge and we're not allowed to wait for -+ * it to succeed, bail. -+ */ -+ if (!(flags & __GFP_WAIT)) -+ goto out; -+ -+ /* -+ * Wait for a successfull charge against the reserve. All -+ * uncharge operations against this reserve will wake us up. -+ */ -+ wait_event(res->waitqueue, -+ mem_reserve_kmalloc_charge(res, size)); -+ -+ /* -+ * After waiting for it, again try a regular allocation. -+ * Pressure could have lifted during our sleep. If this -+ * succeeds, uncharge the reserve. -+ */ -+ obj = kmalloc_node_track_caller(size, gfp, node); -+ if (obj) { -+ mem_reserve_kmalloc_charge(res, -size); -+ goto out; -+ } -+ } -+ -+ /* -+ * Regular allocation failed, and we've successfully charged our -+ * requested usage against the reserve. Do the emergency allocation. -+ */ -+ obj = kmalloc_node_track_caller(size, flags, node); -+ WARN_ON(!obj); -+ if (emerg) -+ *emerg = 1; -+ -+out: -+ return obj; -+} -+ -+void __kfree_reserve(void *obj, struct mem_reserve *res, int emerg) -+{ -+ /* -+ * ksize gives the full allocated size vs the requested size we used to -+ * charge; however since we round up to the nearest power of two, this -+ * should all work nicely. -+ */ -+ size_t size = ksize(obj); -+ -+ kfree(obj); -+ /* -+ * Free before uncharge, this ensures memory is actually present when -+ * a subsequent charge succeeds. -+ */ -+ mem_reserve_kmalloc_charge(res, -size); -+} -+ -+/* -+ * kmem_cache_alloc/kmem_cache_free -+ */ -+ -+void *__kmem_cache_alloc_reserve(struct kmem_cache *s, gfp_t flags, int node, -+ struct mem_reserve *res, int *emerg) -+{ -+ void *obj; -+ gfp_t gfp; -+ -+ gfp = flags | __GFP_NOMEMALLOC | __GFP_NOWARN; -+ obj = kmem_cache_alloc_node(s, gfp, node); -+ -+ if (obj || !(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS)) -+ goto out; -+ -+ if (res && !mem_reserve_kmem_cache_charge(res, s, 1)) { -+ if (!(flags & __GFP_WAIT)) -+ goto out; -+ -+ wait_event(res->waitqueue, -+ mem_reserve_kmem_cache_charge(res, s, 1)); -+ -+ obj = kmem_cache_alloc_node(s, gfp, node); -+ if (obj) { -+ mem_reserve_kmem_cache_charge(res, s, -1); -+ goto out; -+ } -+ } -+ -+ obj = kmem_cache_alloc_node(s, flags, node); -+ WARN_ON(!obj); -+ if (emerg) -+ *emerg = 1; -+ -+out: -+ return obj; -+} -+ -+void __kmem_cache_free_reserve(struct kmem_cache *s, void *obj, -+ struct mem_reserve *res, int emerg) -+{ -+ kmem_cache_free(s, obj); -+ mem_reserve_kmem_cache_charge(res, s, -1); -+} -+ -+/* -+ * alloc_pages/free_pages -+ */ -+ -+struct page *__alloc_pages_reserve(int node, gfp_t flags, int order, -+ struct mem_reserve *res, int *emerg) -+{ -+ struct page *page; -+ gfp_t gfp; -+ long pages = 1 << order; -+ -+ gfp = flags | __GFP_NOMEMALLOC | __GFP_NOWARN; -+ page = alloc_pages_node(node, gfp, order); -+ -+ if (page || !(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS)) -+ goto out; -+ -+ if (res && !mem_reserve_pages_charge(res, pages)) { -+ if (!(flags & __GFP_WAIT)) -+ goto out; -+ -+ wait_event(res->waitqueue, -+ mem_reserve_pages_charge(res, pages)); -+ -+ page = alloc_pages_node(node, gfp, order); -+ if (page) { -+ mem_reserve_pages_charge(res, -pages); -+ goto out; -+ } -+ } -+ -+ page = alloc_pages_node(node, flags, order); -+ WARN_ON(!page); -+ if (emerg) -+ *emerg = 1; -+ -+out: -+ return page; -+} -+EXPORT_SYMBOL_GPL(__alloc_pages_reserve); -+ -+void __free_pages_reserve(struct page *page, int order, -+ struct mem_reserve *res, int emerg) -+{ -+ __free_pages(page, order); -+ mem_reserve_pages_charge(res, -(1 << order)); -+} -+EXPORT_SYMBOL_GPL(__free_pages_reserve); diff --git a/patches.suse/SoN-12-mm-selinux-emergency.patch b/patches.suse/SoN-12-mm-selinux-emergency.patch deleted file mode 100644 index 5c20707..0000000 --- a/patches.suse/SoN-12-mm-selinux-emergency.patch +++ /dev/null @@ -1,24 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 12/31] selinux: tag avc cache alloc as non-critical -Patch-mainline: not yet - -Failing to allocate a cache entry will only harm performance not correctness. -Do not consume valuable reserve pages for something like that. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - security/selinux/avc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/security/selinux/avc.c -+++ b/security/selinux/avc.c -@@ -284,7 +284,7 @@ static struct avc_node *avc_alloc_node(v - { - struct avc_node *node; - -- node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC); -+ node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC); - if (!node) - goto out; - diff --git a/patches.suse/SoN-13-net-ps_rx.patch b/patches.suse/SoN-13-net-ps_rx.patch deleted file mode 100644 index 086f061..0000000 --- a/patches.suse/SoN-13-net-ps_rx.patch +++ /dev/null @@ -1,184 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 13/31] net: packet split receive api -Patch-mainline: Not yet - -Add some packet-split receive hooks. - -For one this allows to do NUMA node affine page allocs. Later on these hooks -will be extended to do emergency reserve allocations for fragments. - -Thanks to Jiri Bohac for fixing a bug in bnx2. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Jiri Bohac -Signed-off-by: Suresh Jayaraman ---- - drivers/net/bnx2.c | 9 +++------ - drivers/net/e1000e/netdev.c | 7 ++----- - drivers/net/igb/igb_main.c | 6 +----- - drivers/net/ixgbe/ixgbe_main.c | 14 ++++++-------- - drivers/net/sky2.c | 16 ++++++---------- - include/linux/skbuff.h | 3 +++ - 6 files changed, 21 insertions(+), 34 deletions(-) - ---- a/drivers/net/bnx2.c -+++ b/drivers/net/bnx2.c -@@ -2676,7 +2676,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, stru - struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; - struct rx_bd *rxbd = - &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; -- struct page *page = alloc_page(gfp); -+ struct page *page = __netdev_alloc_page(bp->dev, gfp); - - if (!page) - return -ENOMEM; -@@ -2706,7 +2706,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struc - dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), - PAGE_SIZE, PCI_DMA_FROMDEVICE); - -- __free_page(page); -+ netdev_free_page(bp->dev, page); - rx_pg->page = NULL; - } - -@@ -3041,7 +3041,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2 - if (i == pages - 1) - frag_len -= 4; - -- skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); -+ skb_add_rx_frag(skb, i, rx_pg->page, 0, frag_len); - rx_pg->page = NULL; - - err = bnx2_alloc_rx_page(bp, rxr, -@@ -3059,9 +3059,6 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2 - PAGE_SIZE, PCI_DMA_FROMDEVICE); - - frag_size -= frag_len; -- skb->data_len += frag_len; -- skb->truesize += frag_len; -- skb->len += frag_len; - - pg_prod = NEXT_RX_BD(pg_prod); - pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); ---- a/drivers/net/e1000e/netdev.c -+++ b/drivers/net/e1000e/netdev.c -@@ -604,7 +604,7 @@ static void e1000_alloc_rx_buffers_ps(st - continue; - } - if (!ps_page->page) { -- ps_page->page = alloc_page(GFP_ATOMIC); -+ ps_page->page = netdev_alloc_page(netdev); - if (!ps_page->page) { - adapter->alloc_rx_buff_failed++; - goto no_buffers; -@@ -1188,11 +1188,8 @@ static bool e1000_clean_rx_irq_ps(struct - dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, - DMA_FROM_DEVICE); - ps_page->dma = 0; -- skb_fill_page_desc(skb, j, ps_page->page, 0, length); -+ skb_add_rx_frag(skb, j, ps_page->page, 0, length); - ps_page->page = NULL; -- skb->len += length; -- skb->data_len += length; -- skb->truesize += length; - } - - /* strip the ethernet crc, problem is we're using pages now so ---- a/drivers/net/igb/igb_main.c -+++ b/drivers/net/igb/igb_main.c -@@ -5594,7 +5594,7 @@ static bool igb_clean_rx_irq_adv(struct - PAGE_SIZE / 2, DMA_FROM_DEVICE); - buffer_info->page_dma = 0; - -- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, -+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); -@@ -5604,10 +5604,6 @@ static bool igb_clean_rx_irq_adv(struct - buffer_info->page = NULL; - else - get_page(buffer_info->page); -- -- skb->len += length; -- skb->data_len += length; -- skb->truesize += length; - } - - if (!(staterr & E1000_RXD_STAT_EOP)) { ---- a/drivers/net/ixgbe/ixgbe_main.c -+++ b/drivers/net/ixgbe/ixgbe_main.c -@@ -1243,10 +1244,10 @@ static bool ixgbe_clean_rx_irq(struct ix - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; -- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, -- rx_buffer_info->page, -- rx_buffer_info->page_offset, -- upper_len); -+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, -+ rx_buffer_info->page, -+ rx_buffer_info->page_offset, -+ upper_len); - - if ((page_count(rx_buffer_info->page) == 1) && - (page_to_nid(rx_buffer_info->page) == current_node)) -@@ -1254,9 +1255,6 @@ static bool ixgbe_clean_rx_irq(struct ix - else - rx_buffer_info->page = NULL; - -- skb->len += upper_len; -- skb->data_len += upper_len; -- skb->truesize += upper_len; - } - - i++; ---- a/drivers/net/sky2.c -+++ b/drivers/net/sky2.c -@@ -1394,7 +1394,7 @@ static struct sk_buff *sky2_rx_alloc(str - skb_reserve(skb, NET_IP_ALIGN); - - for (i = 0; i < sky2->rx_nfrags; i++) { -- struct page *page = alloc_page(GFP_ATOMIC); -+ struct page *page = netdev_alloc_page(sky2->netdev); - - if (!page) - goto free_partial; -@@ -2353,8 +2353,8 @@ static struct sk_buff *receive_copy(stru - } - - /* Adjust length of skb with fragments to match received data */ --static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, -- unsigned int length) -+static void skb_put_frags(struct sky2_port *sky2, struct sk_buff *skb, -+ unsigned int hdr_space, unsigned int length) - { - int i, num_frags; - unsigned int size; -@@ -2371,15 +2371,11 @@ static void skb_put_frags(struct sk_buff - - if (length == 0) { - /* don't need this page */ -- __free_page(frag->page); -+ netdev_free_page(sky2->netdev, frag->page); - --skb_shinfo(skb)->nr_frags; - } else { - size = min(length, (unsigned) PAGE_SIZE); -- -- frag->size = size; -- skb->data_len += size; -- skb->truesize += size; -- skb->len += size; -+ skb_add_rx_frag(skb, i, frag->page, 0, size); - length -= size; - } - } -@@ -2407,7 +2403,7 @@ static struct sk_buff *receive_new(struc - *re = nre; - - if (skb_shinfo(skb)->nr_frags) -- skb_put_frags(skb, hdr_space, length); -+ skb_put_frags(sky2, skb, hdr_space, length); - else - skb_put(skb, length); - return skb; diff --git a/patches.suse/SoN-14-net-sk_allocation.patch b/patches.suse/SoN-14-net-sk_allocation.patch deleted file mode 100644 index a4de1d7..0000000 --- a/patches.suse/SoN-14-net-sk_allocation.patch +++ /dev/null @@ -1,156 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 14/31] net: sk_allocation() - concentrate socket related allocations -Patch-mainline: Not yet - -Introduce sk_allocation(), this function allows to inject sock specific -flags to each sock related allocation. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/net/sock.h | 5 +++++ - net/ipv4/tcp.c | 3 ++- - net/ipv4/tcp_output.c | 11 ++++++----- - net/ipv6/tcp_ipv6.c | 15 +++++++++++---- - 4 files changed, 24 insertions(+), 10 deletions(-) - ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -565,6 +565,11 @@ static inline int sock_flag(struct sock - return test_bit(flag, &sk->sk_flags); - } - -+static inline gfp_t sk_allocation(struct sock *sk, gfp_t gfp_mask) -+{ -+ return gfp_mask; -+} -+ - static inline void sk_acceptq_removed(struct sock *sk) - { - sk->sk_ack_backlog--; ---- a/net/ipv4/tcp.c -+++ b/net/ipv4/tcp.c -@@ -685,7 +685,8 @@ struct sk_buff *sk_stream_alloc_skb(stru - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - -- skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); -+ skb = alloc_skb_fclone(size + sk->sk_prot->max_header, -+ sk_allocation(sk, gfp)); - if (skb) { - if (sk_wmem_schedule(sk, skb->truesize)) { - /* ---- a/net/ipv4/tcp_output.c -+++ b/net/ipv4/tcp_output.c -@@ -2313,7 +2313,7 @@ void tcp_send_fin(struct sock *sk) - /* Socket is locked, keep trying until memory is available. */ - for (;;) { - skb = alloc_skb_fclone(MAX_TCP_HEADER, -- sk->sk_allocation); -+ sk_allocation(sk, GFP_KERNEL)); - if (skb) - break; - yield(); -@@ -2339,7 +2339,7 @@ void tcp_send_active_reset(struct sock * - struct sk_buff *skb; - - /* NOTE: No TCP options attached and we never retransmit this. */ -- skb = alloc_skb(MAX_TCP_HEADER, priority); -+ skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, priority)); - if (!skb) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); - return; -@@ -2412,7 +2412,8 @@ struct sk_buff *tcp_make_synack(struct s - - if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) - s_data_desired = cvp->s_data_desired; -- skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); -+ skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, -+ sk_allocation(sk, GFP_ATOMIC)); - if (skb == NULL) - return NULL; - -@@ -2694,7 +2695,7 @@ void tcp_send_ack(struct sock *sk) - * tcp_transmit_skb() will set the ownership to this - * sock. - */ -- buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); -+ buff = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC)); - if (buff == NULL) { - inet_csk_schedule_ack(sk); - inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; -@@ -2729,7 +2730,7 @@ static int tcp_xmit_probe_skb(struct soc - struct sk_buff *skb; - - /* We don't queue it, tcp_transmit_skb() sets ownership. */ -- skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); -+ skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC)); - if (skb == NULL) - return -1; - ---- a/net/ipv6/tcp_ipv6.c -+++ b/net/ipv6/tcp_ipv6.c -@@ -589,7 +589,8 @@ static int tcp_v6_md5_do_add(struct sock - } else { - /* reallocate new list if current one is full. */ - if (!tp->md5sig_info) { -- tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); -+ tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), -+ sk_allocation(sk, GFP_ATOMIC)); - if (!tp->md5sig_info) { - kfree(newkey); - return -ENOMEM; -@@ -602,7 +603,8 @@ static int tcp_v6_md5_do_add(struct sock - } - if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { - keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * -- (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); -+ (tp->md5sig_info->entries6 + 1)), -+ sk_allocation(sk, GFP_ATOMIC)); - - if (!keys) { - tcp_free_md5sig_pool(); -@@ -726,7 +728,8 @@ static int tcp_v6_parse_md5_keys (struct - struct tcp_sock *tp = tcp_sk(sk); - struct tcp_md5sig_info *p; - -- p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); -+ p = kzalloc(sizeof(struct tcp_md5sig_info), -+ sk_allocation(sk, GFP_KERNEL)); - if (!p) - return -ENOMEM; - -@@ -997,6 +1000,7 @@ static void tcp_v6_send_response(struct - unsigned int tot_len = sizeof(struct tcphdr); - struct dst_entry *dst; - __be32 *topt; -+ gfp_t gfp_mask = GFP_ATOMIC; - - if (ts) - tot_len += TCPOLEN_TSTAMP_ALIGNED; -@@ -1006,7 +1010,7 @@ static void tcp_v6_send_response(struct - #endif - - buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, -- GFP_ATOMIC); -+ gfp_mask); - if (buff == NULL) - return; - -@@ -1083,6 +1087,7 @@ static void tcp_v6_send_reset(struct soc - struct tcphdr *th = tcp_hdr(skb); - u32 seq = 0, ack_seq = 0; - struct tcp_md5sig_key *key = NULL; -+ gfp_t gfp_mask = GFP_ATOMIC; - - if (th->rst) - return; -@@ -1094,6 +1099,8 @@ static void tcp_v6_send_reset(struct soc - if (sk) - key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); - #endif -+ if (sk) -+ gfp_mask = sk_allocation(sk, gfp_mask); - - if (th->ack) - seq = ntohl(th->ack_seq); diff --git a/patches.suse/SoN-15-netvm-reserve.patch b/patches.suse/SoN-15-netvm-reserve.patch deleted file mode 100644 index 0ab6f12..0000000 --- a/patches.suse/SoN-15-netvm-reserve.patch +++ /dev/null @@ -1,254 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 15/31] netvm: network reserve infrastructure -Patch-mainline: not yet - -Provide the basic infrastructure to reserve and charge/account network memory. - -We provide the following reserve tree: - -1) total network reserve -2) network TX reserve -3) protocol TX pages -4) network RX reserve -5) SKB data reserve - -[1] is used to make all the network reserves a single subtree, for easy -manipulation. - -[2] and [4] are merely for eastetic reasons. - -The TX pages reserve [3] is assumed bounded by it being the upper bound of -memory that can be used for sending pages (not quite true, but good enough) - -The SKB reserve [5] is an aggregate reserve, which is used to charge SKB data -against in the fallback path. - -The consumers for these reserves are sockets marked with: - SOCK_MEMALLOC - -Such sockets are to be used to service the VM (iow. to swap over). They -must be handled kernel side, exposing such a socket to user-space is a BUG. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/net/sock.h | 43 ++++++++++++++++++++- - net/Kconfig | 2 - net/core/sock.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 151 insertions(+), 1 deletion(-) - ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -52,6 +52,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -534,6 +535,7 @@ enum sock_flags { - SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ - SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ - SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ -+ SOCK_MEMALLOC, /* the VM depends on us - make sure we're serviced */ - SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ - SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ - SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ -@@ -565,9 +567,48 @@ static inline int sock_flag(struct sock - return test_bit(flag, &sk->sk_flags); - } - -+static inline int sk_has_memalloc(struct sock *sk) -+{ -+ return sock_flag(sk, SOCK_MEMALLOC); -+} -+ -+extern struct mem_reserve net_rx_reserve; -+extern struct mem_reserve net_skb_reserve; -+ -+#ifdef CONFIG_NETVM -+/* -+ * Guestimate the per request queue TX upper bound. -+ * -+ * Max packet size is 64k, and we need to reserve that much since the data -+ * might need to bounce it. Double it to be on the safe side. -+ */ -+#define TX_RESERVE_PAGES DIV_ROUND_UP(2*65536, PAGE_SIZE) -+ -+extern int memalloc_socks; -+ -+static inline int sk_memalloc_socks(void) -+{ -+ return memalloc_socks; -+} -+ -+extern int sk_adjust_memalloc(int socks, long tx_reserve_pages); -+extern int sk_set_memalloc(struct sock *sk); -+extern int sk_clear_memalloc(struct sock *sk); -+#else -+static inline int sk_memalloc_socks(void) -+{ -+ return 0; -+} -+ -+static inline int sk_clear_memalloc(struct sock *sk) -+{ -+ return 0; -+} -+#endif -+ - static inline gfp_t sk_allocation(struct sock *sk, gfp_t gfp_mask) - { -- return gfp_mask; -+ return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); - } - - static inline void sk_acceptq_removed(struct sock *sk) ---- a/net/Kconfig -+++ b/net/Kconfig -@@ -294,5 +294,7 @@ source "net/rfkill/Kconfig" - source "net/caif/Kconfig" - source "net/ceph/Kconfig" - -+config NETVM -+ bool - - endif # if NET ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -110,6 +110,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -224,6 +225,105 @@ int net_cls_subsys_id = -1; - EXPORT_SYMBOL_GPL(net_cls_subsys_id); - #endif - -+static struct mem_reserve net_reserve; -+struct mem_reserve net_rx_reserve; -+EXPORT_SYMBOL_GPL(net_rx_reserve); /* modular ipv6 only */ -+struct mem_reserve net_skb_reserve; -+EXPORT_SYMBOL_GPL(net_skb_reserve); /* modular ipv6 only */ -+static struct mem_reserve net_tx_reserve; -+static struct mem_reserve net_tx_pages; -+ -+#ifdef CONFIG_NETVM -+static DEFINE_MUTEX(memalloc_socks_lock); -+int memalloc_socks; -+ -+/** -+ * sk_adjust_memalloc - adjust the global memalloc reserve for critical RX -+ * @socks: number of new %SOCK_MEMALLOC sockets -+ * @tx_resserve_pages: number of pages to (un)reserve for TX -+ * -+ * This function adjusts the memalloc reserve based on system demand. -+ * The RX reserve is a limit, and only added once, not for each socket. -+ * -+ * NOTE: -+ * @tx_reserve_pages is an upper-bound of memory used for TX hence -+ * we need not account the pages like we do for RX pages. -+ */ -+int sk_adjust_memalloc(int socks, long tx_reserve_pages) -+{ -+ int err; -+ -+ mutex_lock(&memalloc_socks_lock); -+ err = mem_reserve_pages_add(&net_tx_pages, tx_reserve_pages); -+ if (err) -+ goto unlock; -+ -+ /* -+ * either socks is positive and we need to check for 0 -> !0 -+ * transition and connect the reserve tree when we observe it. -+ */ -+ if (!memalloc_socks && socks > 0) { -+ err = mem_reserve_connect(&net_reserve, &mem_reserve_root); -+ if (err) { -+ /* -+ * if we failed to connect the tree, undo the tx -+ * reserve so that failure has no side effects. -+ */ -+ mem_reserve_pages_add(&net_tx_pages, -tx_reserve_pages); -+ goto unlock; -+ } -+ } -+ memalloc_socks += socks; -+ /* -+ * or socks is negative and we must observe the !0 -> 0 transition -+ * and disconnect the reserve tree. -+ */ -+ if (!memalloc_socks && socks) -+ mem_reserve_disconnect(&net_reserve); -+ -+unlock: -+ mutex_unlock(&memalloc_socks_lock); -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(sk_adjust_memalloc); -+ -+/** -+ * sk_set_memalloc - sets %SOCK_MEMALLOC -+ * @sk: socket to set it on -+ * -+ * Set %SOCK_MEMALLOC on a socket and increase the memalloc reserve -+ * accordingly. -+ */ -+int sk_set_memalloc(struct sock *sk) -+{ -+ int set = sock_flag(sk, SOCK_MEMALLOC); -+ -+ if (!set) { -+ int err = sk_adjust_memalloc(1, 0); -+ if (err) -+ return err; -+ -+ sock_set_flag(sk, SOCK_MEMALLOC); -+ sk->sk_allocation |= __GFP_MEMALLOC; -+ } -+ return !set; -+} -+EXPORT_SYMBOL_GPL(sk_set_memalloc); -+ -+int sk_clear_memalloc(struct sock *sk) -+{ -+ int set = sock_flag(sk, SOCK_MEMALLOC); -+ if (set) { -+ sk_adjust_memalloc(-1, 0); -+ sock_reset_flag(sk, SOCK_MEMALLOC); -+ sk->sk_allocation &= ~__GFP_MEMALLOC; -+ } -+ return set; -+} -+EXPORT_SYMBOL_GPL(sk_clear_memalloc); -+#endif -+ - static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) - { - struct timeval tv; -@@ -1121,6 +1221,7 @@ static void __sk_free(struct sock *sk) - { - struct sk_filter *filter; - -+ sk_clear_memalloc(sk); - if (sk->sk_destruct) - sk->sk_destruct(sk); - -@@ -1300,6 +1401,12 @@ void __init sk_init(void) - sysctl_wmem_max = 131071; - sysctl_rmem_max = 131071; - } -+ -+ mem_reserve_init(&net_reserve, "total network reserve", NULL); -+ mem_reserve_init(&net_rx_reserve, "network RX reserve", &net_reserve); -+ mem_reserve_init(&net_skb_reserve, "SKB data reserve", &net_rx_reserve); -+ mem_reserve_init(&net_tx_reserve, "network TX reserve", &net_reserve); -+ mem_reserve_init(&net_tx_pages, "protocol TX pages", &net_tx_reserve); - } - - /* diff --git a/patches.suse/SoN-16-netvm-reserve-inet.patch b/patches.suse/SoN-16-netvm-reserve-inet.patch deleted file mode 100644 index aa12583..0000000 --- a/patches.suse/SoN-16-netvm-reserve-inet.patch +++ /dev/null @@ -1,504 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 16/31] netvm: INET reserves. -Patch-mainline: Not yet - -Add reserves for INET. - -The two big users seem to be the route cache and ip-fragment cache. - -Reserve the route cache under generic RX reserve, its usage is bounded by -the high reclaim watermark, and thus does not need further accounting. - -Reserve the ip-fragement caches under SKB data reserve, these add to the -SKB RX limit. By ensuring we can at least receive as much data as fits in -the reassmbly line we avoid fragment attack deadlocks. - -Adds to the reserve tree: - - total network reserve - network TX reserve - protocol TX pages - network RX reserve -+ IPv6 route cache -+ IPv4 route cache - SKB data reserve -+ IPv6 fragment cache -+ IPv4 fragment cache - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/net/inet_frag.h | 7 +++++ - include/net/netns/ipv6.h | 4 +++ - net/ipv4/inet_fragment.c | 3 ++ - net/ipv4/ip_fragment.c | 56 +++++++++++++++++++++++++++++++++++++++++++-- - net/ipv4/route.c | 58 +++++++++++++++++++++++++++++++++++++++++++++-- - net/ipv6/reassembly.c | 55 ++++++++++++++++++++++++++++++++++++++++++-- - net/ipv6/route.c | 47 ++++++++++++++++++++++++++++++++++++-- - 7 files changed, 222 insertions(+), 8 deletions(-) - ---- a/include/net/inet_frag.h -+++ b/include/net/inet_frag.h -@@ -1,6 +1,9 @@ - #ifndef __NET_FRAG_H__ - #define __NET_FRAG_H__ - -+#include -+#include -+ - struct netns_frags { - int nqueues; - atomic_t mem; -@@ -10,6 +13,10 @@ struct netns_frags { - int timeout; - int high_thresh; - int low_thresh; -+ -+ /* reserves */ -+ struct mutex lock; -+ struct mem_reserve reserve; - }; - - struct inet_frag_queue { ---- a/include/net/netns/ipv6.h -+++ b/include/net/netns/ipv6.h -@@ -25,6 +25,8 @@ struct netns_sysctl_ipv6 { - int ip6_rt_mtu_expires; - int ip6_rt_min_advmss; - int icmpv6_time; -+ -+ struct mutex ip6_rt_lock; - }; - - struct netns_ipv6 { -@@ -58,6 +60,8 @@ struct netns_ipv6 { - struct sock *ndisc_sk; - struct sock *tcp_sk; - struct sock *igmp_sk; -+ -+ struct mem_reserve ip6_rt_reserve; - #ifdef CONFIG_IPV6_MROUTE - #ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES - struct mr6_table *mrt6; ---- a/net/ipv4/inet_fragment.c -+++ b/net/ipv4/inet_fragment.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - - #include - -@@ -75,6 +76,8 @@ void inet_frags_init_net(struct netns_fr - nf->nqueues = 0; - atomic_set(&nf->mem, 0); - INIT_LIST_HEAD(&nf->lru_list); -+ mutex_init(&nf->lock); -+ mem_reserve_init(&nf->reserve, "IP fragement cache", NULL); - } - EXPORT_SYMBOL(inet_frags_init_net); - ---- a/net/ipv4/ip_fragment.c -+++ b/net/ipv4/ip_fragment.c -@@ -45,6 +45,8 @@ - #include - #include - #include -+#include -+#include - - /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 - * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c -@@ -635,6 +637,34 @@ int ip_defrag(struct sk_buff *skb, u32 u - EXPORT_SYMBOL(ip_defrag); - - #ifdef CONFIG_SYSCTL -+static int -+proc_dointvec_fragment(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos) -+{ -+ struct net *net = container_of(table->data, struct net, -+ ipv4.frags.high_thresh); -+ ctl_table tmp = *table; -+ int new_bytes, ret; -+ -+ mutex_lock(&net->ipv4.frags.lock); -+ if (write) { -+ tmp.data = &new_bytes; -+ table = &tmp; -+ } -+ -+ ret = proc_dointvec(table, write, buffer, lenp, ppos); -+ -+ if (!ret && write) { -+ ret = mem_reserve_kmalloc_set(&net->ipv4.frags.reserve, -+ new_bytes); -+ if (!ret) -+ net->ipv4.frags.high_thresh = new_bytes; -+ } -+ mutex_unlock(&net->ipv4.frags.lock); -+ -+ return ret; -+} -+ - static int zero; - - static struct ctl_table ip4_frags_ns_ctl_table[] = { -@@ -643,7 +673,7 @@ static struct ctl_table ip4_frags_ns_ctl - .data = &init_net.ipv4.frags.high_thresh, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec -+ .proc_handler = proc_dointvec_fragment, - }, - { - .procname = "ipfrag_low_thresh", -@@ -741,6 +771,8 @@ static inline void ip4_frags_ctl_registe - - static int __net_init ipv4_frags_init_net(struct net *net) - { -+ int ret; -+ - /* - * Fragment cache limits. We will commit 256K at one time. Should we - * cross that limit we will prune down to 192K. This should cope with -@@ -758,11 +790,31 @@ static int __net_init ipv4_frags_init_ne - - inet_frags_init_net(&net->ipv4.frags); - -- return ip4_frags_ns_ctl_register(net); -+ ret = ip4_frags_ns_ctl_register(net); -+ if (ret) -+ goto out_reg; -+ -+ mem_reserve_init(&net->ipv4.frags.reserve, "IPv4 fragment cache", -+ &net_skb_reserve); -+ ret = mem_reserve_kmalloc_set(&net->ipv4.frags.reserve, -+ net->ipv4.frags.high_thresh); -+ if (ret) -+ goto out_reserve; -+ -+ return 0; -+ -+out_reserve: -+ mem_reserve_disconnect(&net->ipv4.frags.reserve); -+ ip4_frags_ns_ctl_unregister(net); -+out_reg: -+ inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); -+ -+ return ret; - } - - static void __net_exit ipv4_frags_exit_net(struct net *net) - { -+ mem_reserve_disconnect(&net->ipv4.frags.reserve); - ip4_frags_ns_ctl_unregister(net); - inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); - } ---- a/net/ipv4/route.c -+++ b/net/ipv4/route.c -@@ -108,6 +108,7 @@ - #ifdef CONFIG_SYSCTL - #include - #endif -+#include - - #define RT_FL_TOS(oldflp) \ - ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) -@@ -224,6 +225,7 @@ struct rt_hash_bucket { - # define RT_HASH_LOCK_SZ 256 - # endif - #endif -+#include - - static spinlock_t *rt_hash_locks; - # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)] -@@ -268,6 +270,10 @@ static inline int rt_genid(struct net *n - return atomic_read(&net->ipv4.rt_genid); - } - -+static struct mem_reserve ipv4_route_reserve; -+ -+static struct mem_reserve ipv4_route_reserve; -+ - #ifdef CONFIG_PROC_FS - struct rt_cache_iter_state { - struct seq_net_private p; -@@ -398,6 +404,36 @@ static int rt_cache_seq_show(struct seq_ - return 0; - } - -+static struct mutex ipv4_route_lock; -+ -+static int -+proc_dointvec_route(struct ctl_table *table, int write, void __user *buffer, -+ size_t *lenp, loff_t *ppos) -+{ -+ ctl_table tmp = *table; -+ int new_size, ret; -+ -+ mutex_lock(&ipv4_route_lock); -+ if (write) { -+ tmp.data = &new_size; -+ table = &tmp; -+ } -+ -+ ret = proc_dointvec(table, write, buffer, lenp, ppos); -+ -+ if (!ret && write) { -+ ret = mem_reserve_kmem_cache_set(&ipv4_route_reserve, -+ ipv4_dst_ops.kmem_cachep, new_size); -+ if (!ret) -+ ip_rt_max_size = new_size; -+ } -+ mutex_unlock(&ipv4_route_lock); -+ -+ return ret; -+} -+ -+static struct mutex ipv4_route_lock; -+ - static const struct seq_operations rt_cache_seq_ops = { - .start = rt_cache_seq_start, - .next = rt_cache_seq_next, -@@ -3103,7 +3139,7 @@ static ctl_table ipv4_route_table[] = { - .data = &ip_rt_max_size, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec, -+ .proc_handler = proc_dointvec_route, - }, - { - /* Deprecated. Use gc_min_interval_ms */ -@@ -3140,7 +3176,7 @@ static ctl_table ipv4_route_table[] = { - .data = &ip_rt_redirect_load, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec, -+ .proc_handler = proc_dointvec_route, - }, - { - .procname = "redirect_number", -@@ -3334,6 +3370,24 @@ int __init ip_rt_init(void) - ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); - ip_rt_max_size = (rt_hash_mask + 1) * 16; - -+#ifdef CONFIG_PROCFS -+ mutex_init(&ipv4_route_lock); -+#endif -+ -+ mem_reserve_init(&ipv4_route_reserve, "IPv4 route cache", -+ &net_rx_reserve); -+ mem_reserve_kmem_cache_set(&ipv4_route_reserve, -+ ipv4_dst_ops.kmem_cachep, ip_rt_max_size); -+ -+#ifdef CONFIG_PROCFS -+ mutex_init(&ipv4_route_lock); -+#endif -+ -+ mem_reserve_init(&ipv4_route_reserve, "IPv4 route cache", -+ &net_rx_reserve); -+ mem_reserve_kmem_cache_set(&ipv4_route_reserve, -+ ipv4_dst_ops.kmem_cachep, ip_rt_max_size); -+ - devinet_init(); - ip_fib_init(); - ---- a/net/ipv6/reassembly.c -+++ b/net/ipv6/reassembly.c -@@ -42,6 +42,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -639,13 +640,41 @@ static const struct inet6_protocol frag_ - }; - - #ifdef CONFIG_SYSCTL -+static int -+proc_dointvec_fragment(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos) -+{ -+ struct net *net = container_of(table->data, struct net, -+ ipv6.frags.high_thresh); -+ ctl_table tmp = *table; -+ int new_bytes, ret; -+ -+ mutex_lock(&net->ipv6.frags.lock); -+ if (write) { -+ tmp.data = &new_bytes; -+ table = &tmp; -+ } -+ -+ ret = proc_dointvec(table, write, buffer, lenp, ppos); -+ -+ if (!ret && write) { -+ ret = mem_reserve_kmalloc_set(&net->ipv6.frags.reserve, -+ new_bytes); -+ if (!ret) -+ net->ipv6.frags.high_thresh = new_bytes; -+ } -+ mutex_unlock(&net->ipv6.frags.lock); -+ -+ return ret; -+} -+ - static struct ctl_table ip6_frags_ns_ctl_table[] = { - { - .procname = "ip6frag_high_thresh", - .data = &init_net.ipv6.frags.high_thresh, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec -+ .proc_handler = proc_dointvec_fragment, - }, - { - .procname = "ip6frag_low_thresh", -@@ -750,17 +779,39 @@ static inline void ip6_frags_sysctl_unre - - static int __net_init ipv6_frags_init_net(struct net *net) - { -+ int ret; -+ - net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; - net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; - net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; - - inet_frags_init_net(&net->ipv6.frags); - -- return ip6_frags_ns_sysctl_register(net); -+ ret = ip6_frags_ns_sysctl_register(net); -+ if (ret) -+ goto out_reg; -+ -+ mem_reserve_init(&net->ipv6.frags.reserve, "IPv6 fragment cache", -+ &net_skb_reserve); -+ ret = mem_reserve_kmalloc_set(&net->ipv6.frags.reserve, -+ net->ipv6.frags.high_thresh); -+ if (ret) -+ goto out_reserve; -+ -+ return 0; -+ -+out_reserve: -+ mem_reserve_disconnect(&net->ipv6.frags.reserve); -+ ip6_frags_ns_sysctl_unregister(net); -+out_reg: -+ inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); -+ -+ return ret; - } - - static void __net_exit ipv6_frags_exit_net(struct net *net) - { -+ mem_reserve_disconnect(&net->ipv6.frags.reserve); - ip6_frags_ns_sysctl_unregister(net); - inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); - } ---- a/net/ipv6/route.c -+++ b/net/ipv6/route.c -@@ -37,6 +37,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -2532,6 +2533,34 @@ int ipv6_sysctl_rtcache_flush(ctl_table - return 0; - } - -+static int -+proc_dointvec_route(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos) -+{ -+ struct net *net = container_of(table->data, struct net, -+ ipv6.sysctl.ip6_rt_max_size); -+ ctl_table tmp = *table; -+ int new_size, ret; -+ -+ mutex_lock(&net->ipv6.sysctl.ip6_rt_lock); -+ if (write) { -+ tmp.data = &new_size; -+ table = &tmp; -+ } -+ -+ ret = proc_dointvec(table, write, buffer, lenp, ppos); -+ -+ if (!ret && write) { -+ ret = mem_reserve_kmem_cache_set(&net->ipv6.ip6_rt_reserve, -+ net->ipv6.ip6_dst_ops.kmem_cachep, new_size); -+ if (!ret) -+ net->ipv6.sysctl.ip6_rt_max_size = new_size; -+ } -+ mutex_unlock(&net->ipv6.sysctl.ip6_rt_lock); -+ -+ return ret; -+} -+ - ctl_table ipv6_route_table_template[] = { - { - .procname = "flush", -@@ -2552,7 +2581,7 @@ ctl_table ipv6_route_table_template[] = - .data = &init_net.ipv6.sysctl.ip6_rt_max_size, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec, -+ .proc_handler = proc_dointvec_route, - }, - { - .procname = "gc_min_interval", -@@ -2627,6 +2656,8 @@ struct ctl_table * __net_init ipv6_route - table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; - } - -+ mutex_init(&net->ipv6.sysctl.ip6_rt_lock); -+ - return table; - } - #endif -@@ -2676,6 +2707,14 @@ static int __net_init ip6_route_net_init - net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; - net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; - -+ mem_reserve_init(&net->ipv6.ip6_rt_reserve, "IPv6 route cache", -+ &net_rx_reserve); -+ ret = mem_reserve_kmem_cache_set(&net->ipv6.ip6_rt_reserve, -+ net->ipv6.ip6_dst_ops.kmem_cachep, -+ net->ipv6.sysctl.ip6_rt_max_size); -+ if (ret) -+ goto out_reserve_fail; -+ - #ifdef CONFIG_PROC_FS - proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); - proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); -@@ -2686,12 +2725,15 @@ static int __net_init ip6_route_net_init - out: - return ret; - -+out_reserve_fail: -+ mem_reserve_disconnect(&net->ipv6.ip6_rt_reserve); - #ifdef CONFIG_IPV6_MULTIPLE_TABLES -+ kfree(net->ipv6.ip6_blk_hole_entry); - out_ip6_prohibit_entry: - kfree(net->ipv6.ip6_prohibit_entry); - out_ip6_null_entry: -- kfree(net->ipv6.ip6_null_entry); - #endif -+ kfree(net->ipv6.ip6_null_entry); - out_ip6_dst_entries: - dst_entries_destroy(&net->ipv6.ip6_dst_ops); - out_ip6_dst_ops: -@@ -2702,6 +2744,7 @@ static void __net_exit ip6_route_net_exi - proc_net_remove(net, "ipv6_route"); - proc_net_remove(net, "rt6_stats"); - #endif -+ mem_reserve_disconnect(&net->ipv6.ip6_rt_reserve); - kfree(net->ipv6.ip6_null_entry); - #ifdef CONFIG_IPV6_MULTIPLE_TABLES - kfree(net->ipv6.ip6_prohibit_entry); diff --git a/patches.suse/SoN-16a-netvm-reserve-inet.patch b/patches.suse/SoN-16a-netvm-reserve-inet.patch deleted file mode 100644 index 1f31d06..0000000 --- a/patches.suse/SoN-16a-netvm-reserve-inet.patch +++ /dev/null @@ -1,71 +0,0 @@ -From: Mel Gorman -Subject: [PATCH] netvm: Remove duplicated initialization in net/ipv4/route.c. -Patch-mainline: Not yet -References: bnc#678970 - -Calling mem_reserve_init() twice causes list_add corruption error and -unnecessarily increases reserves. Remove one initialisation. - -Signed-off-by: Tetsuo Handa -Signed-off-by: Mel Gorman -Reviewed-by: NeilBrown -Signed-off-by: Suresh Jayaraman ---- - net/ipv4/route.c | 9 --------- - 1 files changed, 0 insertions(+), 9 deletions(-) - -Index: linux-2.6.37-openSUSE-11.4/net/ipv4/route.c -=================================================================== ---- linux-2.6.37-openSUSE-11.4.orig/net/ipv4/route.c -+++ linux-2.6.37-openSUSE-11.4/net/ipv4/route.c -@@ -224,7 +224,6 @@ struct rt_hash_bucket { - # define RT_HASH_LOCK_SZ 256 - # endif - #endif --#include - - static spinlock_t *rt_hash_locks; - # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)] -@@ -271,8 +270,6 @@ static inline int rt_genid(struct net *n - - static struct mem_reserve ipv4_route_reserve; - --static struct mem_reserve ipv4_route_reserve; -- - #ifdef CONFIG_PROC_FS - struct rt_cache_iter_state { - struct seq_net_private p; -@@ -431,8 +428,6 @@ proc_dointvec_route(struct ctl_table *ta - return ret; - } - --static struct mutex ipv4_route_lock; -- - static const struct seq_operations rt_cache_seq_ops = { - .start = rt_cache_seq_start, - .next = rt_cache_seq_next, -@@ -3167,7 +3162,7 @@ static ctl_table ipv4_route_table[] = { - .data = &ip_rt_redirect_load, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec_route, -+ .proc_handler = proc_dointvec, - }, - { - .procname = "redirect_number", -@@ -3369,15 +3364,6 @@ int __init ip_rt_init(void) - - #ifdef CONFIG_PROCFS - mutex_init(&ipv4_route_lock); --#endif -- -- mem_reserve_init(&ipv4_route_reserve, "IPv4 route cache", -- &net_rx_reserve); -- mem_reserve_kmem_cache_set(&ipv4_route_reserve, -- ipv4_dst_ops.kmem_cachep, ip_rt_max_size); -- --#ifdef CONFIG_PROCFS -- mutex_init(&ipv4_route_lock); - #endif - - mem_reserve_init(&ipv4_route_reserve, "IPv4 route cache", diff --git a/patches.suse/SoN-17-netvm-reserve-inet.patch-fix b/patches.suse/SoN-17-netvm-reserve-inet.patch-fix deleted file mode 100644 index 93240de..0000000 --- a/patches.suse/SoN-17-netvm-reserve-inet.patch-fix +++ /dev/null @@ -1,23 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH 17/31] Fix initialization of ipv4_route_lock -Patch-mainline: not yet - - It's CONFIG_PROC_FS, not CONFIG_PROCFS. - -Signed-off-by: Jeff Mahoney -Signed-off-by: Suresh Jayaraman ---- - net/ipv4/route.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/net/ipv4/route.c -+++ b/net/ipv4/route.c -@@ -3370,7 +3370,7 @@ int __init ip_rt_init(void) - ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); - ip_rt_max_size = (rt_hash_mask + 1) * 16; - --#ifdef CONFIG_PROCFS -+#ifdef CONFIG_PROC_FS - mutex_init(&ipv4_route_lock); - #endif - diff --git a/patches.suse/SoN-18-netvm-skbuff-reserve.patch b/patches.suse/SoN-18-netvm-skbuff-reserve.patch deleted file mode 100644 index db98044..0000000 --- a/patches.suse/SoN-18-netvm-skbuff-reserve.patch +++ /dev/null @@ -1,445 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 18/31] netvm: hook skb allocation to reserves -Patch-mainline: not yet - -Change the skb allocation api to indicate RX usage and use this to fall back to -the reserve when needed. SKBs allocated from the reserve are tagged in -skb->emergency. - -Teach all other skb ops about emergency skbs and the reserve accounting. - -Use the (new) packet split API to allocate and track fragment pages from the -emergency reserve. Do this using an atomic counter in page->index. This is -needed because the fragments have a different sharing semantic than that -indicated by skb_shinfo()->dataref. - -Note that the decision to distinguish between regular and emergency SKBs allows -the accounting overhead to be limited to the later kind. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - - include/linux/mm_types.h | 1 - include/linux/skbuff.h | 35 +++++++++++-- - net/core/skbuff.c | 121 +++++++++++++++++++++++++++++++++++++---------- - 3 files changed, 128 insertions(+), 29 deletions(-) - ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -72,6 +72,7 @@ struct page { - pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* SLUB: freelist req. slab lock */ - int reserve; /* page_alloc: page is a reserve page */ -+ atomic_t frag_count; /* skb fragment use count */ - }; - struct list_head lru; /* Pageout list, eg. active_list - * protected by zone->lru_lock ! ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - - /* Don't change this without changing skb_csum_unnecessary! */ - #define CHECKSUM_NONE 0 -@@ -386,9 +386,12 @@ struct sk_buff { - __u8 deliver_no_wcard:1; - #endif - __u8 ooo_okay:1; -+#ifdef CONFIG_NETVM -+ __u8 emergency:1; -+#endif - kmemcheck_bitfield_end(flags2); - -- /* 0/13 bit hole */ -+ /* 0/12 bit hole */ - - #ifdef CONFIG_NET_DMA - dma_cookie_t dma_cookie; -@@ -423,6 +426,18 @@ struct sk_buff { - - #include - -+#define SKB_ALLOC_FCLONE 0x01 -+#define SKB_ALLOC_RX 0x02 -+ -+static inline bool skb_emergency(const struct sk_buff *skb) -+{ -+#ifdef CONFIG_NETVM -+ return unlikely(skb->emergency); -+#else -+ return false; -+#endif -+} -+ - /* - * skb might have a dst pointer attached, refcounted or not. - * _skb_refdst low order bit is set if refcount was _not_ taken -@@ -480,7 +495,7 @@ extern void kfree_skb(struct sk_buff *sk - extern void consume_skb(struct sk_buff *skb); - extern void __kfree_skb(struct sk_buff *skb); - extern struct sk_buff *__alloc_skb(unsigned int size, -- gfp_t priority, int fclone, int node); -+ gfp_t priority, int flags, int node); - static inline struct sk_buff *alloc_skb(unsigned int size, - gfp_t priority) - { -@@ -490,7 +505,7 @@ static inline struct sk_buff *alloc_skb( - static inline struct sk_buff *alloc_skb_fclone(unsigned int size, - gfp_t priority) - { -- return __alloc_skb(size, priority, 1, NUMA_NO_NODE); -+ return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); - } - - extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); -@@ -1511,7 +1526,8 @@ static inline void __skb_queue_purge(str - static inline struct sk_buff *__dev_alloc_skb(unsigned int length, - gfp_t gfp_mask) - { -- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); -+ struct sk_buff *skb = -+ __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, -1); - if (likely(skb)) - skb_reserve(skb, NET_SKB_PAD); - return skb; -@@ -1551,6 +1567,8 @@ static inline struct sk_buff *netdev_all - return skb; - } - -+extern struct mem_reserve net_skb_reserve; -+ - /** - * __netdev_alloc_page - allocate a page for ps-rx on a specific device - * @dev: network device to receive on -@@ -1562,7 +1580,8 @@ static inline struct sk_buff *netdev_all - */ - static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) - { -- return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); -+ return alloc_pages_reserve(NUMA_NO_NODE, gfp_mask | __GFP_MEMALLOC, 0, -+ &net_skb_reserve, NULL); - } - - /** -@@ -1578,9 +1597,14 @@ static inline struct page *netdev_alloc_ - return __netdev_alloc_page(dev, GFP_ATOMIC); - } - -+static inline void __netdev_free_page(struct net_device *dev, struct page *page) -+{ -+ free_pages_reserve(page, 0, &net_skb_reserve, page->reserve); -+} -+ - static inline void netdev_free_page(struct net_device *dev, struct page *page) - { -- __free_page(page); -+ __netdev_free_page(dev, page); - } - - /** ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -168,14 +168,21 @@ static void skb_under_panic(struct sk_bu - * %GFP_ATOMIC. - */ - struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, -- int fclone, int node) -+ int flags, int node) - { - struct kmem_cache *cache; - struct skb_shared_info *shinfo; - struct sk_buff *skb; - u8 *data; -+ int emergency = 0; -+ int memalloc = sk_memalloc_socks(); - -- cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; -+ size = SKB_DATA_ALIGN(size); -+ cache = (flags & SKB_ALLOC_FCLONE) -+ ? skbuff_fclone_cache : skbuff_head_cache; -+ -+ if (memalloc && (flags & SKB_ALLOC_RX)) -+ gfp_mask |= __GFP_MEMALLOC; - - /* Get the HEAD */ - skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); -@@ -183,9 +190,8 @@ struct sk_buff *__alloc_skb(unsigned int - goto out; - prefetchw(skb); - -- size = SKB_DATA_ALIGN(size); -- data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), -- gfp_mask, node); -+ data = kmalloc_reserve(size + sizeof(struct skb_shared_info), -+ gfp_mask, node, &net_skb_reserve, &emergency); - if (!data) - goto nodata; - prefetchw(data + size); -@@ -196,6 +202,9 @@ struct sk_buff *__alloc_skb(unsigned int - * the tail pointer in struct sk_buff! - */ - memset(skb, 0, offsetof(struct sk_buff, tail)); -+#ifdef CONFIG_NETVM -+ skb->emergency = emergency; -+#endif - skb->truesize = size + sizeof(struct sk_buff); - atomic_set(&skb->users, 1); - skb->head = data; -@@ -211,7 +220,7 @@ struct sk_buff *__alloc_skb(unsigned int - atomic_set(&shinfo->dataref, 1); - kmemcheck_annotate_variable(shinfo->destructor_arg); - -- if (fclone) { -+ if (flags & SKB_ALLOC_FCLONE) { - struct sk_buff *child = skb + 1; - atomic_t *fclone_ref = (atomic_t *) (child + 1); - -@@ -221,6 +230,9 @@ struct sk_buff *__alloc_skb(unsigned int - atomic_set(fclone_ref, 1); - - child->fclone = SKB_FCLONE_UNAVAILABLE; -+#ifdef CONFIG_NETVM -+ child->emergency = skb->emergency; -+#endif - } - out: - return skb; -@@ -249,7 +261,7 @@ struct sk_buff *__netdev_alloc_skb(struc - { - struct sk_buff *skb; - -- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); -+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); - if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD); - skb->dev = dev; -@@ -265,6 +277,27 @@ void skb_add_rx_frag(struct sk_buff *skb - skb->len += size; - skb->data_len += size; - skb->truesize += size; -+ -+#ifdef CONFIG_NETVM -+ /* -+ * In the rare case that skb_emergency() != page->reserved we'll -+ * skew the accounting slightly, but since its only a 'small' constant -+ * shift its ok. -+ */ -+ if (skb_emergency(skb)) { -+ /* -+ * We need to track fragment pages so that we properly -+ * release their reserve in skb_put_page(). -+ */ -+ atomic_set(&page->frag_count, 1); -+ } else if (unlikely(page->reserve)) { -+ /* -+ * Release the reserve now, because normal skbs don't -+ * do the emergency accounting. -+ */ -+ mem_reserve_pages_charge(&net_skb_reserve, -1); -+ } -+#endif - } - EXPORT_SYMBOL(skb_add_rx_frag); - -@@ -316,21 +349,38 @@ static void skb_clone_fraglist(struct sk - skb_get(list); - } - -+static void skb_get_page(struct sk_buff *skb, struct page *page) -+{ -+ get_page(page); -+ if (skb_emergency(skb)) -+ atomic_inc(&page->frag_count); -+} -+ -+static void skb_put_page(struct sk_buff *skb, struct page *page) -+{ -+ if (skb_emergency(skb) && atomic_dec_and_test(&page->frag_count)) -+ mem_reserve_pages_charge(&net_skb_reserve, -1); -+ put_page(page); -+} -+ - static void skb_release_data(struct sk_buff *skb) - { - if (!skb->cloned || - !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, - &skb_shinfo(skb)->dataref)) { -+ - if (skb_shinfo(skb)->nr_frags) { - int i; -- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) -- put_page(skb_shinfo(skb)->frags[i].page); -+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { -+ skb_put_page(skb, -+ skb_shinfo(skb)->frags[i].page); -+ } - } - - if (skb_has_frag_list(skb)) - skb_drop_fraglist(skb); - -- kfree(skb->head); -+ kfree_reserve(skb->head, &net_skb_reserve, skb_emergency(skb)); - } - } - -@@ -524,6 +574,9 @@ static void __copy_skb_header(struct sk_ - #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) - new->ipvs_property = old->ipvs_property; - #endif -+#ifdef CONFIG_NETVM -+ new->emergency = old->emergency; -+#endif - new->protocol = old->protocol; - new->mark = old->mark; - new->skb_iif = old->skb_iif; -@@ -618,6 +671,9 @@ struct sk_buff *skb_clone(struct sk_buff - n->fclone = SKB_FCLONE_CLONE; - atomic_inc(fclone_ref); - } else { -+ if (skb_emergency(skb)) -+ gfp_mask |= __GFP_MEMALLOC; -+ - n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); - if (!n) - return NULL; -@@ -654,6 +710,14 @@ static void copy_skb_header(struct sk_bu - skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; - } - -+static inline int skb_alloc_rx_flag(const struct sk_buff *skb) -+{ -+ if (skb_emergency(skb)) -+ return SKB_ALLOC_RX; -+ -+ return 0; -+} -+ - /** - * skb_copy - create private copy of an sk_buff - * @skb: buffer to copy -@@ -675,7 +739,8 @@ struct sk_buff *skb_copy(const struct sk - { - int headerlen = skb_headroom(skb); - unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; -- struct sk_buff *n = alloc_skb(size, gfp_mask); -+ struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), -+ NUMA_NO_NODE); - - if (!n) - return NULL; -@@ -709,7 +774,8 @@ EXPORT_SYMBOL(skb_copy); - struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) - { - unsigned int size = skb_end_pointer(skb) - skb->head; -- struct sk_buff *n = alloc_skb(size, gfp_mask); -+ struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), -+ NUMA_NO_NODE); - - if (!n) - goto out; -@@ -729,8 +795,9 @@ struct sk_buff *pskb_copy(struct sk_buff - int i; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { -- skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; -- get_page(skb_shinfo(n)->frags[i].page); -+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -+ skb_shinfo(n)->frags[i] = *frag; -+ skb_get_page(n, frag->page); - } - skb_shinfo(n)->nr_frags = i; - } -@@ -778,7 +845,11 @@ int pskb_expand_head(struct sk_buff *skb - goto adjust_others; - } - -- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); -+ if (skb_emergency(skb)) -+ gfp_mask |= __GFP_MEMALLOC; -+ -+ data = kmalloc_reserve(size + sizeof(struct skb_shared_info), -+ gfp_mask, -1, &net_skb_reserve, NULL); - if (!data) - goto nodata; - -@@ -806,7 +877,7 @@ int pskb_expand_head(struct sk_buff *skb - kfree(skb->head); - } else { - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) -- get_page(skb_shinfo(skb)->frags[i].page); -+ skb_get_page(skb, skb_shinfo(skb)->frags[i].page); - - if (skb_has_frag_list(skb)) - skb_clone_fraglist(skb); -@@ -889,8 +960,8 @@ struct sk_buff *skb_copy_expand(const st - /* - * Allocate the copy buffer - */ -- struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, -- gfp_mask); -+ struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, -+ gfp_mask, skb_alloc_rx_flag(skb), -1); - int oldheadroom = skb_headroom(skb); - int head_copy_len, head_copy_off; - int off; -@@ -1083,7 +1154,7 @@ drop_pages: - skb_shinfo(skb)->nr_frags = i; - - for (; i < nfrags; i++) -- put_page(skb_shinfo(skb)->frags[i].page); -+ skb_put_page(skb, skb_shinfo(skb)->frags[i].page); - - if (skb_has_frag_list(skb)) - skb_drop_fraglist(skb); -@@ -1252,7 +1323,7 @@ pull_pages: - k = 0; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - if (skb_shinfo(skb)->frags[i].size <= eat) { -- put_page(skb_shinfo(skb)->frags[i].page); -+ skb_put_page(skb, skb_shinfo(skb)->frags[i].page); - eat -= skb_shinfo(skb)->frags[i].size; - } else { - skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; -@@ -2034,6 +2105,7 @@ static inline void skb_split_no_header(s - skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; - - if (pos < len) { -+ struct page *page = skb_shinfo(skb)->frags[i].page; - /* Split frag. - * We have two variants in this case: - * 1. Move all the frag to the second -@@ -2042,7 +2114,7 @@ static inline void skb_split_no_header(s - * where splitting is expensive. - * 2. Split is accurately. We make this. - */ -- get_page(skb_shinfo(skb)->frags[i].page); -+ skb_get_page(skb1, page); - skb_shinfo(skb1)->frags[0].page_offset += len - pos; - skb_shinfo(skb1)->frags[0].size -= len - pos; - skb_shinfo(skb)->frags[i].size = len - pos; -@@ -2540,8 +2612,9 @@ struct sk_buff *skb_segment(struct sk_bu - skb_release_head_state(nskb); - __skb_push(nskb, doffset); - } else { -- nskb = alloc_skb(hsize + doffset + headroom, -- GFP_ATOMIC); -+ nskb = __alloc_skb(hsize + doffset + headroom, -+ GFP_ATOMIC, skb_alloc_rx_flag(skb), -+ -1); - - if (unlikely(!nskb)) - goto err; -@@ -2587,7 +2660,7 @@ struct sk_buff *skb_segment(struct sk_bu - - while (pos < offset + len && i < nfrags) { - *frag = skb_shinfo(skb)->frags[i]; -- get_page(frag->page); -+ skb_get_page(nskb, frag->page); - size = frag->size; - - if (pos < offset) { diff --git a/patches.suse/SoN-19-netvm-sk_filter.patch b/patches.suse/SoN-19-netvm-sk_filter.patch deleted file mode 100644 index 74adfbe..0000000 --- a/patches.suse/SoN-19-netvm-sk_filter.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 19/31] netvm: filter emergency skbs. -Patch-mainline: not yet - -Toss all emergency packets not for a SOCK_MEMALLOC socket. This ensures our -precious memory reserve doesn't get stuck waiting for user-space. - -The correctness of this approach relies on the fact that networks must be -assumed lossy. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - net/core/filter.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/net/core/filter.c -+++ b/net/core/filter.c -@@ -82,6 +82,9 @@ int sk_filter(struct sock *sk, struct sk - int err; - struct sk_filter *filter; - -+ if (skb_emergency(skb) && !sk_has_memalloc(sk)) -+ return -ENOMEM; -+ - err = security_sock_rcv_skb(sk, skb); - if (err) - return err; diff --git a/patches.suse/SoN-20-netvm-tcp-deadlock.patch b/patches.suse/SoN-20-netvm-tcp-deadlock.patch deleted file mode 100644 index 8820bf4..0000000 --- a/patches.suse/SoN-20-netvm-tcp-deadlock.patch +++ /dev/null @@ -1,118 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 20/31] netvm: prevent a stream specific deadlock -Patch-mainline: not yet - -It could happen that all !SOCK_MEMALLOC sockets have buffered so much data -that we're over the global rmem limit. This will prevent SOCK_MEMALLOC buffers -from receiving data, which will prevent userspace from running, which is needed -to reduce the buffered data. - -Fix this by exempting the SOCK_MEMALLOC sockets from the rmem limit. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - - include/net/sock.h | 7 ++++--- - net/caif/caif_socket.c | 2 +- - net/core/sock.c | 2 +- - net/ipv4/tcp_input.c | 12 ++++++------ - net/sctp/ulpevent.c | 2 +- - 5 files changed, 13 insertions(+), 12 deletions(-) - ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -980,12 +980,13 @@ static inline int sk_wmem_schedule(struc - __sk_mem_schedule(sk, size, SK_MEM_SEND); - } - --static inline int sk_rmem_schedule(struct sock *sk, int size) -+static inline int sk_rmem_schedule(struct sock *sk, struct sk_buff *skb) - { - if (!sk_has_account(sk)) - return 1; -- return size <= sk->sk_forward_alloc || -- __sk_mem_schedule(sk, size, SK_MEM_RECV); -+ return skb->truesize <= sk->sk_forward_alloc || -+ __sk_mem_schedule(sk, skb->truesize, SK_MEM_RECV) || -+ skb_emergency(skb); - } - - static inline void sk_mem_reclaim(struct sock *sk) ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -399,7 +399,7 @@ int sock_queue_rcv_skb(struct sock *sk, - if (err) - return err; - -- if (!sk_rmem_schedule(sk, skb->truesize)) { -+ if (!sk_rmem_schedule(sk, skb)) { - atomic_inc(&sk->sk_drops); - return -ENOBUFS; - } ---- a/net/ipv4/tcp_input.c -+++ b/net/ipv4/tcp_input.c -@@ -4347,19 +4347,19 @@ static void tcp_ofo_queue(struct sock *s - static int tcp_prune_ofo_queue(struct sock *sk); - static int tcp_prune_queue(struct sock *sk); - --static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) -+static inline int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb) - { - if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || -- !sk_rmem_schedule(sk, size)) { -+ !sk_rmem_schedule(sk, skb)) { - - if (tcp_prune_queue(sk) < 0) - return -1; - -- if (!sk_rmem_schedule(sk, size)) { -+ if (!sk_rmem_schedule(sk, skb)) { - if (!tcp_prune_ofo_queue(sk)) - return -1; - -- if (!sk_rmem_schedule(sk, size)) -+ if (!sk_rmem_schedule(sk, skb)) - return -1; - } - } -@@ -4412,7 +4412,7 @@ static void tcp_data_queue(struct sock * - if (eaten <= 0) { - queue_and_out: - if (eaten < 0 && -- tcp_try_rmem_schedule(sk, skb->truesize)) -+ tcp_try_rmem_schedule(sk, skb)) - goto drop; - - skb_set_owner_r(skb, sk); -@@ -4483,7 +4483,7 @@ drop: - - TCP_ECN_check_ce(tp, skb); - -- if (tcp_try_rmem_schedule(sk, skb->truesize)) -+ if (tcp_try_rmem_schedule(sk, skb)) - goto drop; - - /* Disable header prediction. */ ---- a/net/sctp/ulpevent.c -+++ b/net/sctp/ulpevent.c -@@ -702,7 +702,7 @@ struct sctp_ulpevent *sctp_ulpevent_make - if (rx_count >= asoc->base.sk->sk_rcvbuf) { - - if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || -- (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize))) -+ (!sk_rmem_schedule(asoc->base.sk, chunk->skb))) - goto fail; - } - ---- a/net/caif/caif_socket.c -+++ b/net/caif/caif_socket.c -@@ -170,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc - err = sk_filter(sk, skb); - if (err) - return err; -- if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { -+ if (!sk_rmem_schedule(sk, skb) && rx_flow_is_on(cf_sk)) { - set_rx_flow_off(cf_sk); - pr_debug("sending flow OFF due to rmem_schedule\n"); - dbfs_atomic_inc(&cnt.num_rx_flow_off); diff --git a/patches.suse/SoN-21-emergency-nf_queue.patch b/patches.suse/SoN-21-emergency-nf_queue.patch deleted file mode 100644 index 56c0d08..0000000 --- a/patches.suse/SoN-21-emergency-nf_queue.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 21/31] netfilter: NF_QUEUE vs emergency skbs -Patch-mainline: not yet - -Avoid memory getting stuck waiting for userspace, drop all emergency packets. -This of course requires the regular storage route to not include an NF_QUEUE -target ;-) - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - net/netfilter/core.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/net/netfilter/core.c -+++ b/net/netfilter/core.c -@@ -176,11 +176,14 @@ next_hook: - if (verdict == NF_ACCEPT || verdict == NF_STOP) { - ret = 1; - } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { -+drop: - kfree_skb(skb); - ret = -(verdict >> NF_VERDICT_BITS); - if (ret == 0) - ret = -EPERM; - } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { -+ if (skb_emergency(skb)) -+ goto drop; - if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn, - verdict >> NF_VERDICT_BITS)) - goto next_hook; diff --git a/patches.suse/SoN-22-netvm.patch b/patches.suse/SoN-22-netvm.patch deleted file mode 100644 index 6543971..0000000 --- a/patches.suse/SoN-22-netvm.patch +++ /dev/null @@ -1,183 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 22/31] netvm: skb processing -Patch-mainline: Not yet - -In order to make sure emergency packets receive all memory needed to proceed -ensure processing of emergency SKBs happens under PF_MEMALLOC. - -Use the (new) sk_backlog_rcv() wrapper to ensure this for backlog processing. - -Skip taps, since those are user-space again. - -Signed-off-by: Jiri Slaby [lock imbalance fix] -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/net/sock.h | 5 ++++ - net/core/dev.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++---- - net/core/sock.c | 16 +++++++++++++++ - 3 files changed, 72 insertions(+), 4 deletions(-) - ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -682,8 +682,13 @@ static inline __must_check int sk_add_ba - return 0; - } - -+extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); -+ - static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) - { -+ if (skb_emergency(skb)) -+ return __sk_backlog_rcv(sk, skb); -+ - return sk->sk_backlog_rcv(sk, skb); - } - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -2814,6 +2814,30 @@ int __skb_bond_should_drop(struct sk_buf - } - EXPORT_SYMBOL(__skb_bond_should_drop); - -+/* -+ * Filter the protocols for which the reserves are adequate. -+ * -+ * Before adding a protocol make sure that it is either covered by the existing -+ * reserves, or add reserves covering the memory need of the new protocol's -+ * packet processing. -+ */ -+static int skb_emergency_protocol(struct sk_buff *skb) -+{ -+ if (skb_emergency(skb)) -+ switch (skb->protocol) { -+ case __constant_htons(ETH_P_ARP): -+ case __constant_htons(ETH_P_IP): -+ case __constant_htons(ETH_P_IPV6): -+ case __constant_htons(ETH_P_8021Q): -+ break; -+ -+ default: -+ return 0; -+ } -+ -+ return 1; -+} -+ - static int __netif_receive_skb(struct sk_buff *skb) - { - struct packet_type *ptype, *pt_prev; -@@ -2824,6 +2848,7 @@ static int __netif_receive_skb(struct sk - struct net_device *orig_or_bond; - int ret = NET_RX_DROP; - __be16 type; -+ unsigned long pflags = current->flags; - - if (!netdev_tstamp_prequeue) - net_timestamp_check(skb); -@@ -2831,9 +2856,21 @@ static int __netif_receive_skb(struct sk - - trace_netif_receive_skb(skb); - -+ /* Emergency skb are special, they should -+ * - be delivered to SOCK_MEMALLOC sockets only -+ * - stay away from userspace -+ * - have bounded memory usage -+ * -+ * Use PF_MEMALLOC as a poor mans memory pool - the grouping kind. -+ * This saves us from propagating the allocation context down to all -+ * allocation sites. -+ */ -+ if (skb_emergency(skb)) -+ current->flags |= PF_MEMALLOC; -+ - /* if we've gotten here through NAPI, check netpoll */ - if (netpoll_receive_skb(skb)) -- return NET_RX_DROP; -+ goto out; - - if (!skb->skb_iif) - skb->skb_iif = skb->dev->ifindex; -@@ -2875,6 +2912,9 @@ static int __netif_receive_skb(struct sk - } - #endif - -+ if (skb_emergency(skb)) -+ goto skip_taps; -+ - list_for_each_entry_rcu(ptype, &ptype_all, list) { - if (ptype->dev == null_or_orig || ptype->dev == skb->dev || - ptype->dev == orig_dev) { -@@ -2884,13 +2924,17 @@ static int __netif_receive_skb(struct sk - } - } - -+skip_taps: - #ifdef CONFIG_NET_CLS_ACT - skb = handle_ing(skb, &pt_prev, &ret, orig_dev); - if (!skb) -- goto out; -+ goto unlock; - ncls: - #endif - -+ if (!skb_emergency_protocol(skb)) -+ goto drop; -+ - /* Handle special case of bridge or macvlan */ - rx_handler = rcu_dereference(skb->dev->rx_handler); - if (rx_handler) { -@@ -2900,7 +2944,7 @@ ncls: - } - skb = rx_handler(skb); - if (!skb) -- goto out; -+ goto unlock; - } - - if (vlan_tx_tag_present(skb)) { -@@ -2930,6 +2974,7 @@ ncls: - if (pt_prev) { - ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); - } else { -+drop: - atomic_long_inc(&skb->dev->rx_dropped); - kfree_skb(skb); - /* Jamal, now you will not able to escape explaining -@@ -2937,8 +2982,10 @@ ncls: - ret = NET_RX_DROP; - } - --out: -+unlock: - rcu_read_unlock(); -+out: -+ tsk_restore_flags(current, pflags, PF_MEMALLOC); - return ret; - } - ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -322,6 +322,22 @@ int sk_clear_memalloc(struct sock *sk) - return set; - } - EXPORT_SYMBOL_GPL(sk_clear_memalloc); -+ -+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) -+{ -+ int ret; -+ unsigned long pflags = current->flags; -+ -+ /* these should have been dropped before queueing */ -+ BUG_ON(!sk_has_memalloc(sk)); -+ -+ current->flags |= PF_MEMALLOC; -+ ret = sk->sk_backlog_rcv(sk, skb); -+ tsk_restore_flags(current, pflags, PF_MEMALLOC); -+ -+ return ret; -+} -+EXPORT_SYMBOL(__sk_backlog_rcv); - #endif - - static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) diff --git a/patches.suse/SoN-23-mm-swapfile.patch b/patches.suse/SoN-23-mm-swapfile.patch deleted file mode 100644 index e018fba..0000000 --- a/patches.suse/SoN-23-mm-swapfile.patch +++ /dev/null @@ -1,348 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 23/31] mm: add support for non block device backed swap files -Patch-mainline: not yet - -New addres_space_operations methods are added: - int swapon(struct file *); - int swapoff(struct file *); - int swap_out(struct file *, struct page *, struct writeback_control *); - int swap_in(struct file *, struct page *); - -When during sys_swapon() the ->swapon() method is found and returns no error -the swapper_space.a_ops will proxy to sis->swap_file->f_mapping->a_ops, and -make use of ->swap_{out,in}() to write/read swapcache pages. - -The ->swapon() method will be used to communicate to the file that the VM -relies on it, and the address_space should take adequate measures (like -reserving memory for mempools or the like). The ->swapoff() method will be -called on sys_swapoff() when ->swapon() was found and returned no error. - -This new interface can be used to obviate the need for ->bmap in the swapfile -code. A filesystem would need to load (and maybe even allocate) the full block -map for a file into memory and pin it there on ->swapon() so that -->swap_{out,in}() have instant access to it. It can be released on ->swapoff(). - -The reason to provide ->swap_{out,in}() over using {write,read}page() is to - 1) make a distinction between swapcache and pagecache pages, and - 2) to provide a struct file * for credential context (normally not needed - in the context of writepage, as the page content is normally dirtied - using either of the following interfaces: - write_{begin,end}() - {prepare,commit}_write() - page_mkwrite() - which do have the file context. - -[miklos@szeredi.hu: cleanups] -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - Documentation/filesystems/Locking | 22 ++++++++++++++++ - Documentation/filesystems/vfs.txt | 18 +++++++++++++ - include/linux/buffer_head.h | 1 - include/linux/fs.h | 9 ++++++ - include/linux/swap.h | 4 ++ - mm/page_io.c | 52 ++++++++++++++++++++++++++++++++++++++ - mm/swap_state.c | 4 +- - mm/swapfile.c | 30 ++++++++++++++++++++- - 8 files changed, 136 insertions(+), 4 deletions(-) - ---- a/Documentation/filesystems/Locking -+++ b/Documentation/filesystems/Locking -@@ -198,6 +198,10 @@ prototypes: - int (*launder_page)(struct page *); - int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); - int (*error_remove_page)(struct address_space *, struct page *); -+ int (*swapon) (struct file *); -+ int (*swapoff) (struct file *); -+ int (*swap_out) (struct file *, struct page *, struct writeback_control *); -+ int (*swap_in) (struct file *, struct page *); - - locking rules: - All except set_page_dirty and freepage may block -@@ -221,6 +225,10 @@ migratepage: yes (both) - launder_page: yes - is_partially_uptodate: yes - error_remove_page: yes -+swapon no -+swapoff no -+swap_out no yes, unlocks -+swap_in no yes, unlocks - - ->write_begin(), ->write_end(), ->sync_page() and ->readpage() - may be called from the request handler (/dev/loop). -@@ -322,6 +330,20 @@ cleaned, or an error value if not. Note - getting mapped back in and redirtied, it needs to be kept locked - across the entire operation. - -+ ->swapon() will be called with a non-zero argument on files backing -+(non block device backed) swapfiles. A return value of zero indicates success, -+in which case this file can be used for backing swapspace. The swapspace -+operations will be proxied to the address space operations. -+ -+ ->swapoff() will be called in the sys_swapoff() path when ->swapon() -+returned success. -+ -+ ->swap_out() when swapon() returned success, this method is used to -+write the swap page. -+ -+ ->swap_in() when swapon() returned success, this method is used to -+read the swap page. -+ - ----------------------- file_lock_operations ------------------------------ - prototypes: - void (*fl_copy_lock)(struct file_lock *, struct file_lock *); ---- a/Documentation/filesystems/vfs.txt -+++ b/Documentation/filesystems/vfs.txt -@@ -543,6 +543,11 @@ struct address_space_operations { - int (*migratepage) (struct page *, struct page *); - int (*launder_page) (struct page *); - int (*error_remove_page) (struct mapping *mapping, struct page *page); -+ int (*swapon)(struct file *); -+ int (*swapoff)(struct file *); -+ int (*swap_out)(struct file *file, struct page *page, -+ struct writeback_control *wbc); -+ int (*swap_in)(struct file *file, struct page *page); - }; - - writepage: called by the VM to write a dirty page to backing store. -@@ -712,6 +717,19 @@ struct address_space_operations { - unless you have them locked or reference counts increased. - - -+ swapon: Called when swapon is used on a file. A -+ return value of zero indicates success, in which case this -+ file can be used to back swapspace. The swapspace operations -+ will be proxied to this address space's ->swap_{out,in} methods. -+ -+ swapoff: Called during swapoff on files where swapon was successfull. -+ -+ swap_out: Called to write a swapcache page to a backing store, similar to -+ writepage. -+ -+ swap_in: Called to read a swapcache page from a backing store, similar to -+ readpage. -+ - The File Object - =============== - ---- a/include/linux/buffer_head.h -+++ b/include/linux/buffer_head.h -@@ -330,6 +330,7 @@ static inline int inode_has_buffers(stru - static inline void invalidate_inode_buffers(struct inode *inode) {} - static inline int remove_inode_buffers(struct inode *inode) { return 1; } - static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } -+static inline void block_sync_page(struct page *) { } - - #endif /* CONFIG_BLOCK */ - #endif /* _LINUX_BUFFER_HEAD_H */ ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -622,6 +622,15 @@ struct address_space_operations { - int (*is_partially_uptodate) (struct page *, read_descriptor_t *, - unsigned long); - int (*error_remove_page)(struct address_space *, struct page *); -+ -+ /* -+ * swapfile support -+ */ -+ int (*swapon)(struct file *file); -+ int (*swapoff)(struct file *file); -+ int (*swap_out)(struct file *file, struct page *page, -+ struct writeback_control *wbc); -+ int (*swap_in)(struct file *file, struct page *page); - }; - - /* ---- a/include/linux/swap.h -+++ b/include/linux/swap.h -@@ -148,6 +148,7 @@ enum { - SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ - SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ - SWP_BLKDEV = (1 << 6), /* its a block device */ -+ SWP_FILE = (1 << 7), /* file swap area */ - /* add others here before... */ - SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ - }; -@@ -303,6 +304,8 @@ extern void swap_unplug_io_fn(struct bac - /* linux/mm/page_io.c */ - extern int swap_readpage(struct page *); - extern int swap_writepage(struct page *page, struct writeback_control *wbc); -+extern void swap_sync_page(struct page *page); -+extern int swap_set_page_dirty(struct page *page); - extern void end_swap_bio_read(struct bio *bio, int err); - - /* linux/mm/swap_state.c */ -@@ -339,6 +342,7 @@ extern int swap_type_of(dev_t, sector_t, - extern unsigned int count_swap_pages(int, int); - extern sector_t map_swap_page(struct page *, struct block_device **); - extern sector_t swapdev_block(int, pgoff_t); -+extern struct swap_info_struct *page_swap_info(struct page *); - extern int reuse_swap_page(struct page *); - extern int try_to_free_swap(struct page *); - struct backing_dev_info; ---- a/mm/page_io.c -+++ b/mm/page_io.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -93,11 +94,23 @@ int swap_writepage(struct page *page, st - { - struct bio *bio; - int ret = 0, rw = WRITE; -+ struct swap_info_struct *sis = page_swap_info(page); - - if (try_to_free_swap(page)) { - unlock_page(page); - goto out; - } -+ -+ if (sis->flags & SWP_FILE) { -+ struct file *swap_file = sis->swap_file; -+ struct address_space *mapping = swap_file->f_mapping; -+ -+ ret = mapping->a_ops->swap_out(swap_file, page, wbc); -+ if (!ret) -+ count_vm_event(PSWPOUT); -+ return ret; -+ } -+ - bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); - if (bio == NULL) { - set_page_dirty(page); -@@ -115,13 +128,52 @@ out: - return ret; - } - -+void swap_sync_page(struct page *page) -+{ -+ struct swap_info_struct *sis = page_swap_info(page); -+ -+ if (sis->flags & SWP_FILE) { -+ struct address_space *mapping = sis->swap_file->f_mapping; -+ -+ if (mapping->a_ops->sync_page) -+ mapping->a_ops->sync_page(page); -+ } else { -+ block_sync_page(page); -+ } -+} -+ -+int swap_set_page_dirty(struct page *page) -+{ -+ struct swap_info_struct *sis = page_swap_info(page); -+ -+ if (sis->flags & SWP_FILE) { -+ struct address_space *mapping = sis->swap_file->f_mapping; -+ -+ return mapping->a_ops->set_page_dirty(page); -+ } else { -+ return __set_page_dirty_nobuffers(page); -+ } -+} -+ - int swap_readpage(struct page *page) - { - struct bio *bio; - int ret = 0; -+ struct swap_info_struct *sis = page_swap_info(page); - - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(PageUptodate(page)); -+ -+ if (sis->flags & SWP_FILE) { -+ struct file *swap_file = sis->swap_file; -+ struct address_space *mapping = swap_file->f_mapping; -+ -+ ret = mapping->a_ops->swap_in(swap_file, page); -+ if (!ret) -+ count_vm_event(PSWPIN); -+ return ret; -+ } -+ - bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); - if (bio == NULL) { - unlock_page(page); ---- a/mm/swap_state.c -+++ b/mm/swap_state.c -@@ -29,8 +29,8 @@ - */ - static const struct address_space_operations swap_aops = { - .writepage = swap_writepage, -- .sync_page = block_sync_page, -- .set_page_dirty = __set_page_dirty_nobuffers, -+ .sync_page = swap_sync_page, -+ .set_page_dirty = swap_set_page_dirty, - .migratepage = migrate_page, - }; - ---- a/mm/swapfile.c -+++ b/mm/swapfile.c -@@ -1373,6 +1373,14 @@ static void destroy_swap_extents(struct - list_del(&se->list); - kfree(se); - } -+ -+ if (sis->flags & SWP_FILE) { -+ struct file *swap_file = sis->swap_file; -+ struct address_space *mapping = swap_file->f_mapping; -+ -+ sis->flags &= ~SWP_FILE; -+ mapping->a_ops->swapoff(swap_file); -+ } - } - - /* -@@ -1454,7 +1462,9 @@ add_swap_extent(struct swap_info_struct - */ - static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) - { -- struct inode *inode; -+ struct file *swap_file = sis->swap_file; -+ struct address_space *mapping = swap_file->f_mapping; -+ struct inode *inode = mapping->host; - unsigned blocks_per_page; - unsigned long page_no; - unsigned blkbits; -@@ -1465,13 +1475,22 @@ static int setup_swap_extents(struct swa - int nr_extents = 0; - int ret; - -- inode = sis->swap_file->f_mapping->host; - if (S_ISBLK(inode->i_mode)) { - ret = add_swap_extent(sis, 0, sis->max, 0); - *span = sis->pages; - goto out; - } - -+ if (mapping->a_ops->swapon) { -+ ret = mapping->a_ops->swapon(swap_file); -+ if (!ret) { -+ sis->flags |= SWP_FILE; -+ ret = add_swap_extent(sis, 0, sis->max, 0); -+ *span = sis->pages; -+ } -+ goto out; -+ } -+ - blkbits = inode->i_blkbits; - blocks_per_page = PAGE_SIZE >> blkbits; - -@@ -2290,6 +2309,13 @@ int swapcache_prepare(swp_entry_t entry) - return __swap_duplicate(entry, SWAP_HAS_CACHE); - } - -+struct swap_info_struct *page_swap_info(struct page *page) -+{ -+ swp_entry_t swap = { .val = page_private(page) }; -+ BUG_ON(!PageSwapCache(page)); -+ return swap_info[swp_type(swap)]; -+} -+ - /* - * swap_lock prevents swap_map being freed. Don't grab an extra - * reference on the swaphandle, it doesn't matter if it becomes unused. diff --git a/patches.suse/SoN-24-mm-page_file_methods.patch b/patches.suse/SoN-24-mm-page_file_methods.patch deleted file mode 100644 index c16612e..0000000 --- a/patches.suse/SoN-24-mm-page_file_methods.patch +++ /dev/null @@ -1,112 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 24/31] mm: methods for teaching filesystems about PG_swapcache pages -Patch-mainline: not yet - -In order to teach filesystems to handle swap cache pages, three new page -functions are introduced: - - pgoff_t page_file_index(struct page *); - loff_t page_file_offset(struct page *); - struct address_space *page_file_mapping(struct page *); - -page_file_index() - gives the offset of this page in the file in -PAGE_CACHE_SIZE blocks. Like page->index is for mapped pages, this function -also gives the correct index for PG_swapcache pages. - -page_file_offset() - uses page_file_index(), so that it will give the expected -result, even for PG_swapcache pages. - -page_file_mapping() - gives the mapping backing the actual page; that is for -swap cache pages it will give swap_file->f_mapping. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - include/linux/mm.h | 25 +++++++++++++++++++++++++ - include/linux/pagemap.h | 5 +++++ - mm/swapfile.c | 19 +++++++++++++++++++ - 3 files changed, 49 insertions(+) - ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -663,6 +663,17 @@ static inline void *page_rmapping(struct - return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); - } - -+extern struct address_space *__page_file_mapping(struct page *); -+ -+static inline -+struct address_space *page_file_mapping(struct page *page) -+{ -+ if (unlikely(PageSwapCache(page))) -+ return __page_file_mapping(page); -+ -+ return page->mapping; -+} -+ - static inline int PageAnon(struct page *page) - { - return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; -@@ -679,6 +690,20 @@ static inline pgoff_t page_index(struct - return page->index; - } - -+extern pgoff_t __page_file_index(struct page *page); -+ -+/* -+ * Return the file index of the page. Regular pagecache pages use ->index -+ * whereas swapcache pages use swp_offset(->private) -+ */ -+static inline pgoff_t page_file_index(struct page *page) -+{ -+ if (unlikely(PageSwapCache(page))) -+ return __page_file_index(page); -+ -+ return page->index; -+} -+ - /* - * The atomic page->_mapcount, like _count, starts from -1: - * so that transitions both from it and to it can be tracked, ---- a/include/linux/pagemap.h -+++ b/include/linux/pagemap.h -@@ -282,6 +282,11 @@ static inline loff_t page_offset(struct - return ((loff_t)page->index) << PAGE_CACHE_SHIFT; - } - -+static inline loff_t page_file_offset(struct page *page) -+{ -+ return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; -+} -+ - extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, - unsigned long address); - ---- a/mm/swapfile.c -+++ b/mm/swapfile.c -@@ -2307,6 +2307,25 @@ struct swap_info_struct *page_swap_info( - } - - /* -+ * out-of-line __page_file_ methods to avoid include hell. -+ */ -+ -+struct address_space *__page_file_mapping(struct page *page) -+{ -+ VM_BUG_ON(!PageSwapCache(page)); -+ return page_swap_info(page)->swap_file->f_mapping; -+} -+EXPORT_SYMBOL_GPL(__page_file_mapping); -+ -+pgoff_t __page_file_index(struct page *page) -+{ -+ swp_entry_t swap = { .val = page_private(page) }; -+ VM_BUG_ON(!PageSwapCache(page)); -+ return swp_offset(swap); -+} -+EXPORT_SYMBOL_GPL(__page_file_index); -+ -+/* - * swap_lock prevents swap_map being freed. Don't grab an extra - * reference on the swaphandle, it doesn't matter if it becomes unused. - */ diff --git a/patches.suse/SoN-25-nfs-swapcache.patch b/patches.suse/SoN-25-nfs-swapcache.patch deleted file mode 100644 index bb56be1..0000000 --- a/patches.suse/SoN-25-nfs-swapcache.patch +++ /dev/null @@ -1,292 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 25/31] nfs: teach the NFS client how to treat PG_swapcache pages -Patch-mainline: Not yet - -Replace all relevant occurences of page->index and page->mapping in the NFS -client with the new page_file_index() and page_file_mapping() functions. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - fs/nfs/file.c | 6 +++--- - fs/nfs/internal.h | 7 ++++--- - fs/nfs/pagelist.c | 6 +++--- - fs/nfs/read.c | 6 +++--- - fs/nfs/write.c | 43 +++++++++++++++++++++++-------------------- - 5 files changed, 36 insertions(+), 32 deletions(-) - ---- a/fs/nfs/file.c -+++ b/fs/nfs/file.c -@@ -472,7 +472,7 @@ static void nfs_invalidate_page(struct p - if (offset != 0) - return; - /* Cancel any unstarted writes on this page */ -- nfs_wb_page_cancel(page->mapping->host, page); -+ nfs_wb_page_cancel(page_file_mapping(page)->host, page); - - nfs_fscache_invalidate_page(page, page->mapping->host); - } -@@ -514,7 +514,7 @@ static int nfs_release_page(struct page - */ - static int nfs_launder_page(struct page *page) - { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - struct nfs_inode *nfsi = NFS_I(inode); - - dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", -@@ -563,7 +563,7 @@ static int nfs_vm_page_mkwrite(struct vm - nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page); - - lock_page(page); -- mapping = page->mapping; -+ mapping = page_file_mapping(page); - if (mapping != dentry->d_inode->i_mapping) - goto out_unlock; - ---- a/fs/nfs/internal.h -+++ b/fs/nfs/internal.h -@@ -343,13 +343,14 @@ void nfs_super_set_maxbytes(struct super - static inline - unsigned int nfs_page_length(struct page *page) - { -- loff_t i_size = i_size_read(page->mapping->host); -+ loff_t i_size = i_size_read(page_file_mapping(page)->host); - - if (i_size > 0) { -+ pgoff_t page_index = page_file_index(page); - pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; -- if (page->index < end_index) -+ if (page_index < end_index) - return PAGE_CACHE_SIZE; -- if (page->index == end_index) -+ if (page_index == end_index) - return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; - } - return 0; ---- a/fs/nfs/pagelist.c -+++ b/fs/nfs/pagelist.c -@@ -70,11 +70,11 @@ nfs_create_request(struct nfs_open_conte - * update_nfs_request below if the region is not locked. */ - req->wb_page = page; - atomic_set(&req->wb_complete, 0); -- req->wb_index = page->index; -+ req->wb_index = page_file_index(page); - page_cache_get(page); - BUG_ON(PagePrivate(page)); - BUG_ON(!PageLocked(page)); -- BUG_ON(page->mapping->host != inode); -+ BUG_ON(page_file_mapping(page)->host != inode); - req->wb_offset = offset; - req->wb_pgbase = offset; - req->wb_bytes = count; -@@ -369,7 +369,7 @@ void nfs_pageio_cond_complete(struct nfs - * nfs_scan_list - Scan a list for matching requests - * @nfsi: NFS inode - * @dst: Destination list -- * @idx_start: lower bound of page->index to scan -+ * @idx_start: lower bound of page_file_index(page) to scan - * @npages: idx_start + npages sets the upper bound to scan. - * @tag: tag to scan for - * ---- a/fs/nfs/read.c -+++ b/fs/nfs/read.c -@@ -502,11 +502,11 @@ static const struct rpc_call_ops nfs_rea - int nfs_readpage(struct file *file, struct page *page) - { - struct nfs_open_context *ctx; -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - int error; - - dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", -- page, PAGE_CACHE_SIZE, page->index); -+ page, PAGE_CACHE_SIZE, page_file_index(page)); - nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); - nfs_add_stats(inode, NFSIOS_READPAGES, 1); - -@@ -560,7 +560,7 @@ static int - readpage_async_filler(void *data, struct page *page) - { - struct nfs_readdesc *desc = (struct nfs_readdesc *)data; -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *new; - unsigned int len; - int error; ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -123,7 +123,7 @@ static struct nfs_page *nfs_page_find_re - - static struct nfs_page *nfs_page_find_request(struct page *page) - { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *req = NULL; - - spin_lock(&inode->i_lock); -@@ -135,16 +135,16 @@ static struct nfs_page *nfs_page_find_re - /* Adjust the file length if we're writing beyond the end */ - static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) - { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - loff_t end, i_size; - pgoff_t end_index; - - spin_lock(&inode->i_lock); - i_size = i_size_read(inode); - end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; -- if (i_size > 0 && page->index < end_index) -+ if (i_size > 0 && page_file_index(page) < end_index) - goto out; -- end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); -+ end = page_file_offset(page) + ((loff_t)offset+count); - if (i_size >= end) - goto out; - i_size_write(inode, end); -@@ -157,7 +157,7 @@ out: - static void nfs_set_pageerror(struct page *page) - { - SetPageError(page); -- nfs_zap_mapping(page->mapping->host, page->mapping); -+ nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); - } - - /* We can set the PG_uptodate flag if we see that a write request -@@ -198,7 +198,7 @@ static int nfs_set_page_writeback(struct - int ret = test_set_page_writeback(page); - - if (!ret) { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - struct nfs_server *nfss = NFS_SERVER(inode); - - page_cache_get(page); -@@ -213,7 +213,7 @@ static int nfs_set_page_writeback(struct - - static void nfs_end_page_writeback(struct page *page) - { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - struct nfs_server *nfss = NFS_SERVER(inode); - - end_page_writeback(page); -@@ -224,7 +224,7 @@ static void nfs_end_page_writeback(struc - - static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) - { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *req; - int ret; - -@@ -285,13 +285,13 @@ out: - - static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) - { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - int ret; - - nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); - nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - -- nfs_pageio_cond_complete(pgio, page->index); -+ nfs_pageio_cond_complete(pgio, page_file_index(page)); - ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); - if (ret == -EAGAIN) { - redirty_page_for_writepage(wbc, page); -@@ -310,7 +310,8 @@ static int nfs_writepage_locked(struct p - struct nfs_pageio_descriptor pgio; - int err; - -- nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc)); -+ nfs_pageio_init_write(&pgio, page_file_mapping(page)->host, -+ wb_priority(wbc)); - err = nfs_do_writepage(page, wbc, &pgio); - nfs_pageio_complete(&pgio); - if (err < 0) -@@ -455,7 +456,8 @@ nfs_mark_request_commit(struct nfs_page - nfsi->ncommit++; - spin_unlock(&inode->i_lock); - inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); -- inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); -+ inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, -+ BDI_RECLAIMABLE); - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); - } - -@@ -466,7 +468,8 @@ nfs_clear_request_commit(struct nfs_page - - if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { - dec_zone_page_state(page, NR_UNSTABLE_NFS); -- dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); -+ dec_bdi_stat(page_file_mapping(page)->backing_dev_info, -+ BDI_RECLAIMABLE); - return 1; - } - return 0; -@@ -527,7 +530,7 @@ nfs_need_commit(struct nfs_inode *nfsi) - * nfs_scan_commit - Scan an inode for commit requests - * @inode: NFS inode to scan - * @dst: destination list -- * @idx_start: lower bound of page->index to scan. -+ * @idx_start: lower bound of page_file_index(page) to scan. - * @npages: idx_start + npages sets the upper bound to scan. - * - * Moves requests from the inode's 'commit' request list. -@@ -647,7 +650,7 @@ out_err: - static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, - struct page *page, unsigned int offset, unsigned int bytes) - { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *req; - int error; - -@@ -706,7 +709,7 @@ int nfs_flush_incompatible(struct file * - nfs_release_request(req); - if (!do_flush) - return 0; -- status = nfs_wb_page(page->mapping->host, page); -+ status = nfs_wb_page(page_file_mapping(page)->host, page); - } while (status == 0); - return status; - } -@@ -732,7 +735,7 @@ int nfs_updatepage(struct file *file, st - unsigned int offset, unsigned int count) - { - struct nfs_open_context *ctx = nfs_file_open_context(file); -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - int status = 0; - - nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); -@@ -740,7 +743,7 @@ int nfs_updatepage(struct file *file, st - dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n", - file->f_path.dentry->d_parent->d_name.name, - file->f_path.dentry->d_name.name, count, -- (long long)(page_offset(page) + offset)); -+ (long long)(page_file_offset(page) + offset)); - - /* If we're not using byte range locks, and we know the page - * is up to date, it may be more efficient to extend the write -@@ -1023,7 +1026,7 @@ static void nfs_writeback_release_partia - } - - if (nfs_write_need_commit(data)) { -- struct inode *inode = page->mapping->host; -+ struct inode *inode = page_file_mapping(page)->host; - - spin_lock(&inode->i_lock); - if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { -@@ -1321,7 +1324,7 @@ nfs_commit_list(struct inode *inode, str - nfs_list_remove_request(req); - nfs_mark_request_commit(req); - dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); -- dec_bdi_stat(req->wb_page->mapping->backing_dev_info, -+ dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, - BDI_RECLAIMABLE); - nfs_clear_page_tag_locked(req); - } diff --git a/patches.suse/SoN-25a-nfs-swapcache.patch b/patches.suse/SoN-25a-nfs-swapcache.patch deleted file mode 100644 index 9283c6b..0000000 --- a/patches.suse/SoN-25a-nfs-swapcache.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Mel Gorman -Subject: [PATCH] nfs: Convert nfs_mark_request_dirty() to use page_file_mapping() -Patch-mainline: Not yet -References: bnc#677738 - -nfs_mark_request_dirty() uses page->mapping directly. If the page is a -PageSwapCache page, it triggers as oops as the mapping must be retrieved -from the swap info instead. This patch uses page_file_mapping() thus -preventing the oops. - -Signed-off-by: Mel Gorman -Reviewed-by: NeilBrown -Signed-off-by: Suresh Jayaraman ---- - fs/nfs/write.c | 3 ++- - 1 files changed, 2 insertions(+), 1 deletions(-) - -Index: linux-2.6.37-openSUSE-11.4/fs/nfs/write.c -=================================================================== ---- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/write.c -+++ linux-2.6.37-openSUSE-11.4/fs/nfs/write.c -@@ -432,7 +432,8 @@ static void - nfs_mark_request_dirty(struct nfs_page *req) - { - __set_page_dirty_nobuffers(req->wb_page); -- __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC); -+ __mark_inode_dirty(page_file_mapping(req->wb_page)->host, -+ I_DIRTY_DATASYNC); - } - - #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) diff --git a/patches.suse/SoN-25b-nfs-swapcache.patch b/patches.suse/SoN-25b-nfs-swapcache.patch deleted file mode 100644 index bc252a8..0000000 --- a/patches.suse/SoN-25b-nfs-swapcache.patch +++ /dev/null @@ -1,29 +0,0 @@ -From: Mel Gorman -Date: Wed, 9 Mar 2011 19:36:49 +0000 -Subject: [PATCH] nfs: Use page_file_offset during page writeback -Patch-mainline: Not yet -References: bnc#677738 - -nfs_wb_page could conceivably be called for a PageSwapCache page so play -it safe and use page_file_offset() to lookup the correct index. - -Signed-off-by: Mel Gorman -Reviewed-by: NeilBrown -Signed-off-by: Suresh Jayaraman ---- - fs/nfs/write.c | 2 +- - 1 files changed, 1 insertions(+), 1 deletions(-) - -Index: linux-2.6.37-openSUSE-11.4/fs/nfs/write.c -=================================================================== ---- linux-2.6.37-openSUSE-11.4.orig/fs/nfs/write.c -+++ linux-2.6.37-openSUSE-11.4/fs/nfs/write.c -@@ -1520,7 +1520,7 @@ int nfs_wb_page_cancel(struct inode *ino - */ - int nfs_wb_page(struct inode *inode, struct page *page) - { -- loff_t range_start = page_offset(page); -+ loff_t range_start = page_file_offset(page); - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, diff --git a/patches.suse/SoN-26-nfs-swapper.patch b/patches.suse/SoN-26-nfs-swapper.patch deleted file mode 100644 index 6673192..0000000 --- a/patches.suse/SoN-26-nfs-swapper.patch +++ /dev/null @@ -1,164 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 26/31] nfs: disable data cache revalidation for swapfiles -Patch-mainline: Not yet - -Do as Trond suggested: - http://lkml.org/lkml/2006/8/25/348 - -Disable NFS data cache revalidation on swap files since it doesn't really -make sense to have other clients change the file while you are using it. - -Thereby we can stop setting PG_private on swap pages, since there ought to -be no further races with invalidate_inode_pages2() to deal with. - -And since we cannot set PG_private we cannot use page->private (which is -already used by PG_swapcache pages anyway) to store the nfs_page. Thus -augment the new nfs_page_find_request logic. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - fs/nfs/inode.c | 6 ++++ - fs/nfs/write.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++----------- - 2 files changed, 67 insertions(+), 14 deletions(-) - ---- a/fs/nfs/inode.c -+++ b/fs/nfs/inode.c -@@ -868,6 +868,12 @@ int nfs_revalidate_mapping(struct inode - struct nfs_inode *nfsi = NFS_I(inode); - int ret = 0; - -+ /* -+ * swapfiles are not supposed to be shared. -+ */ -+ if (IS_SWAPFILE(inode)) -+ goto out; -+ - if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) - || nfs_attribute_cache_expired(inode) - || NFS_STALE(inode)) { ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -107,25 +107,64 @@ static void nfs_context_set_write_error( - set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); - } - --static struct nfs_page *nfs_page_find_request_locked(struct page *page) -+static struct nfs_page * -+__nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page, -+ int get) - { - struct nfs_page *req = NULL; - -- if (PagePrivate(page)) { -+ if (PagePrivate(page)) - req = (struct nfs_page *)page_private(page); -- if (req != NULL) -- kref_get(&req->wb_kref); -- } -+ else if (unlikely(PageSwapCache(page))) -+ req = radix_tree_lookup(&nfsi->nfs_page_tree, -+ page_file_index(page)); -+ -+ if (get && req) -+ kref_get(&req->wb_kref); -+ - return req; - } - -+static inline struct nfs_page * -+nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) -+{ -+ return __nfs_page_find_request_locked(nfsi, page, 1); -+} -+ -+static int __nfs_page_has_request(struct page *page) -+{ -+ struct inode *inode = page_file_mapping(page)->host; -+ struct nfs_page *req = NULL; -+ -+ spin_lock(&inode->i_lock); -+ req = __nfs_page_find_request_locked(NFS_I(inode), page, 0); -+ spin_unlock(&inode->i_lock); -+ -+ /* -+ * hole here plugged by the caller holding onto PG_locked -+ */ -+ -+ return req != NULL; -+} -+ -+static inline int nfs_page_has_request(struct page *page) -+{ -+ if (PagePrivate(page)) -+ return 1; -+ -+ if (unlikely(PageSwapCache(page))) -+ return __nfs_page_has_request(page); -+ -+ return 0; -+} -+ - static struct nfs_page *nfs_page_find_request(struct page *page) - { - struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *req = NULL; - - spin_lock(&inode->i_lock); -- req = nfs_page_find_request_locked(page); -+ req = nfs_page_find_request_locked(NFS_I(inode), page); - spin_unlock(&inode->i_lock); - return req; - } -@@ -228,7 +267,7 @@ static struct nfs_page *nfs_find_and_loc - - spin_lock(&inode->i_lock); - for (;;) { -- req = nfs_page_find_request_locked(page); -+ req = nfs_page_find_request_locked(NFS_I(inode), page); - if (req == NULL) - break; - if (nfs_set_page_tag_locked(req)) -@@ -391,9 +430,15 @@ static int nfs_inode_add_request(struct - if (nfs_have_delegation(inode, FMODE_WRITE)) - nfsi->change_attr++; - } -- set_bit(PG_MAPPED, &req->wb_flags); -- SetPagePrivate(req->wb_page); -- set_page_private(req->wb_page, (unsigned long)req); -+ /* -+ * Swap-space should not get truncated. Hence no need to plug the race -+ * with invalidate/truncate. -+ */ -+ if (likely(!PageSwapCache(req->wb_page))) { -+ set_bit(PG_MAPPED, &req->wb_flags); -+ SetPagePrivate(req->wb_page); -+ set_page_private(req->wb_page, (unsigned long)req); -+ } - nfsi->npages++; - kref_get(&req->wb_kref); - radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, -@@ -415,9 +460,11 @@ static void nfs_inode_remove_request(str - BUG_ON (!NFS_WBACK_BUSY(req)); - - spin_lock(&inode->i_lock); -- set_page_private(req->wb_page, 0); -- ClearPagePrivate(req->wb_page); -- clear_bit(PG_MAPPED, &req->wb_flags); -+ if (likely(!PageSwapCache(req->wb_page))) { -+ set_page_private(req->wb_page, 0); -+ ClearPagePrivate(req->wb_page); -+ clear_bit(PG_MAPPED, &req->wb_flags); -+ } - radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); - nfsi->npages--; - if (!nfsi->npages) { -@@ -585,7 +632,7 @@ static struct nfs_page *nfs_try_to_updat - spin_lock(&inode->i_lock); - - for (;;) { -- req = nfs_page_find_request_locked(page); -+ req = nfs_page_find_request_locked(NFS_I(inode), page); - if (req == NULL) - goto out_unlock; - diff --git a/patches.suse/SoN-27-nfs-swap_ops.patch b/patches.suse/SoN-27-nfs-swap_ops.patch deleted file mode 100644 index 68f2a8a..0000000 --- a/patches.suse/SoN-27-nfs-swap_ops.patch +++ /dev/null @@ -1,318 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 27/31] nfs: enable swap on NFS -Patch-mainline: not yet - -Implement all the new swapfile a_ops for NFS. This will set the NFS socket to -SOCK_MEMALLOC and run socket reconnect under PF_MEMALLOC as well as reset -SOCK_MEMALLOC before engaging the protocol ->connect() method. - -PF_MEMALLOC should allow the allocation of struct socket and related objects -and the early (re)setting of SOCK_MEMALLOC should allow us to receive the -packets required for the TCP connection buildup. - -(swapping continues over a server reset during heavy network traffic) - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - fs/nfs/Kconfig | 10 ++++++ - fs/nfs/file.c | 18 ++++++++++++ - fs/nfs/write.c | 22 ++++++++++++++ - include/linux/nfs_fs.h | 2 + - include/linux/sunrpc/xprt.h | 5 ++- - net/sunrpc/Kconfig | 5 +++ - net/sunrpc/clnt.c | 2 + - net/sunrpc/sched.c | 7 +++- - net/sunrpc/xprtsock.c | 65 ++++++++++++++++++++++++++++++++++++++++++++ - 9 files changed, 133 insertions(+), 3 deletions(-) - ---- a/fs/nfs/Kconfig -+++ b/fs/nfs/Kconfig -@@ -74,6 +74,16 @@ config NFS_V4 - - If unsure, say Y. - -+config NFS_SWAP -+ bool "Provide swap over NFS support" -+ default n -+ depends on NFS_FS -+ select SUNRPC_SWAP -+ help -+ This option enables swapon to work on files located on NFS mounts. -+ -+ For more details, see Documentation/network-swap.txt -+ - config NFS_V4_1 - bool "NFS client support for NFSv4.1 (EXPERIMENTAL)" - depends on NFS_FS && NFS_V4 && EXPERIMENTAL ---- a/fs/nfs/file.c -+++ b/fs/nfs/file.c -@@ -529,6 +529,18 @@ static int nfs_launder_page(struct page - return nfs_wb_page(inode, page); - } - -+#ifdef CONFIG_NFS_SWAP -+static int nfs_swapon(struct file *file) -+{ -+ return xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 1); -+} -+ -+static int nfs_swapoff(struct file *file) -+{ -+ return xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 0); -+} -+#endif -+ - const struct address_space_operations nfs_file_aops = { - .readpage = nfs_readpage, - .readpages = nfs_readpages, -@@ -543,6 +555,12 @@ const struct address_space_operations nf - .migratepage = nfs_migrate_page, - .launder_page = nfs_launder_page, - .error_remove_page = generic_error_remove_page, -+#ifdef CONFIG_NFS_SWAP -+ .swapon = nfs_swapon, -+ .swapoff = nfs_swapoff, -+ .swap_out = nfs_swap_out, -+ .swap_in = nfs_readpage, -+#endif - }; - - /* ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -365,6 +365,28 @@ int nfs_writepage(struct page *page, str - return ret; - } - -+static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, -+ unsigned int offset, unsigned int count); -+ -+int nfs_swap_out(struct file *file, struct page *page, -+ struct writeback_control *wbc) -+{ -+ struct nfs_open_context *ctx = nfs_file_open_context(file); -+ int status; -+ -+ status = nfs_writepage_setup(ctx, page, 0, nfs_page_length(page)); -+ if (status < 0) { -+ nfs_set_pageerror(page); -+ goto out; -+ } -+ -+ status = nfs_writepage_locked(page, wbc); -+ -+out: -+ unlock_page(page); -+ return status; -+} -+ - static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) - { - int ret; ---- a/include/linux/nfs_fs.h -+++ b/include/linux/nfs_fs.h -@@ -502,6 +502,8 @@ extern int nfs_writepages(struct addres - extern int nfs_flush_incompatible(struct file *file, struct page *page); - extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); - extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); -+extern int nfs_swap_out(struct file *file, struct page *page, -+ struct writeback_control *wbc); - - /* - * Try to write back everything synchronously (but check the ---- a/include/linux/sunrpc/xprt.h -+++ b/include/linux/sunrpc/xprt.h -@@ -172,7 +172,9 @@ struct rpc_xprt { - unsigned int max_reqs; /* total slots */ - unsigned long state; /* transport state */ - unsigned char shutdown : 1, /* being shut down */ -- resvport : 1; /* use a reserved port */ -+ resvport : 1, /* use a reserved port */ -+ swapper : 1; /* we're swapping over this -+ transport */ - unsigned int bind_index; /* bind function index */ - - /* -@@ -308,6 +310,7 @@ void xprt_release_rqst_cong(struct rpc - void xprt_disconnect_done(struct rpc_xprt *xprt); - void xprt_force_disconnect(struct rpc_xprt *xprt); - void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); -+int xs_swapper(struct rpc_xprt *xprt, int enable); - - /* - * Reserved bit positions in xprt->state ---- a/net/sunrpc/Kconfig -+++ b/net/sunrpc/Kconfig -@@ -17,6 +17,11 @@ config SUNRPC_XPRT_RDMA - - If unsure, say N. - -+config SUNRPC_SWAP -+ bool -+ depends on SUNRPC -+ select NETVM -+ - config RPCSEC_GSS_KRB5 - tristate - depends on SUNRPC && CRYPTO ---- a/net/sunrpc/clnt.c -+++ b/net/sunrpc/clnt.c -@@ -590,6 +590,8 @@ void rpc_task_set_client(struct rpc_task - atomic_inc(&clnt->cl_count); - if (clnt->cl_softrtry) - task->tk_flags |= RPC_TASK_SOFT; -+ if (task->tk_client->cl_xprt->swapper) -+ task->tk_flags |= RPC_TASK_SWAPPER; - /* Add to the client's list of all tasks */ - spin_lock(&clnt->cl_lock); - list_add_tail(&task->tk_task, &clnt->cl_tasks); ---- a/net/sunrpc/sched.c -+++ b/net/sunrpc/sched.c -@@ -728,7 +728,10 @@ static void rpc_async_schedule(struct wo - void *rpc_malloc(struct rpc_task *task, size_t size) - { - struct rpc_buffer *buf; -- gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; -+ gfp_t gfp = GFP_NOWAIT; -+ -+ if (RPC_IS_SWAPPER(task)) -+ gfp |= __GFP_MEMALLOC; - - size += sizeof(struct rpc_buffer); - if (size <= RPC_BUFFER_MAXSIZE) -@@ -807,7 +810,7 @@ static void rpc_init_task(struct rpc_tas - static struct rpc_task * - rpc_alloc_task(void) - { -- return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); -+ return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); - } - - /* ---- a/net/sunrpc/xprtsock.c -+++ b/net/sunrpc/xprtsock.c -@@ -1641,6 +1641,57 @@ out: - return ERR_PTR(err); - } - -+#ifdef CONFIG_SUNRPC_SWAP -+static void xs_set_memalloc(struct rpc_xprt *xprt) -+{ -+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, -+ xprt); -+ -+ if (xprt->swapper) -+ sk_set_memalloc(transport->inet); -+} -+ -+#define RPC_BUF_RESERVE_PAGES \ -+ kmalloc_estimate_objs(sizeof(struct rpc_rqst), GFP_KERNEL, RPC_MAX_SLOT_TABLE) -+#define RPC_RESERVE_PAGES (RPC_BUF_RESERVE_PAGES + TX_RESERVE_PAGES) -+ -+/** -+ * xs_swapper - Tag this transport as being used for swap. -+ * @xprt: transport to tag -+ * @enable: enable/disable -+ * -+ */ -+int xs_swapper(struct rpc_xprt *xprt, int enable) -+{ -+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, -+ xprt); -+ int err = 0; -+ -+ if (enable) { -+ /* -+ * keep one extra sock reference so the reserve won't dip -+ * when the socket gets reconnected. -+ */ -+ err = sk_adjust_memalloc(1, RPC_RESERVE_PAGES); -+ if (!err) { -+ xprt->swapper = 1; -+ xs_set_memalloc(xprt); -+ } -+ } else if (xprt->swapper) { -+ xprt->swapper = 0; -+ sk_clear_memalloc(transport->inet); -+ sk_adjust_memalloc(-1, -RPC_RESERVE_PAGES); -+ } -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(xs_swapper); -+#else -+static void xs_set_memalloc(struct rpc_xprt *xprt) -+{ -+} -+#endif -+ - static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) - { - struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); -@@ -1665,6 +1716,8 @@ static void xs_udp_finish_connecting(str - transport->sock = sock; - transport->inet = sk; - -+ xs_set_memalloc(xprt); -+ - write_unlock_bh(&sk->sk_callback_lock); - } - xs_udp_do_set_buffer_size(xprt); -@@ -1676,11 +1729,15 @@ static void xs_udp_setup_socket(struct w - container_of(work, struct sock_xprt, connect_worker.work); - struct rpc_xprt *xprt = &transport->xprt; - struct socket *sock = transport->sock; -+ unsigned long pflags = current->flags; - int status = -EIO; - - if (xprt->shutdown) - goto out; - -+ if (xprt->swapper) -+ current->flags |= PF_MEMALLOC; -+ - /* Start by resetting any existing state */ - xs_reset_transport(transport); - sock = xs_create_sock(xprt, transport, -@@ -1699,6 +1756,7 @@ static void xs_udp_setup_socket(struct w - out: - xprt_clear_connecting(xprt); - xprt_wake_pending_tasks(xprt, status); -+ tsk_restore_flags(current, pflags, PF_MEMALLOC); - } - - /* -@@ -1788,6 +1846,8 @@ static int xs_tcp_finish_connecting(stru - if (!xprt_bound(xprt)) - return -ENOTCONN; - -+ xs_set_memalloc(xprt); -+ - /* Tell the socket layer to start connecting... */ - xprt->stat.connect_count++; - xprt->stat.connect_start = jiffies; -@@ -1808,11 +1868,15 @@ static void xs_tcp_setup_socket(struct w - container_of(work, struct sock_xprt, connect_worker.work); - struct socket *sock = transport->sock; - struct rpc_xprt *xprt = &transport->xprt; -+ unsigned long pflags = current->flags; - int status = -EIO; - - if (xprt->shutdown) - goto out; - -+ if (xprt->swapper) -+ current->flags |= PF_MEMALLOC; -+ - if (!sock) { - clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); - sock = xs_create_sock(xprt, transport, -@@ -1874,6 +1938,7 @@ out_eagain: - out: - xprt_clear_connecting(xprt); - xprt_wake_pending_tasks(xprt, status); -+ tsk_restore_flags(current, pflags, PF_MEMALLOC); - } - - /** diff --git a/patches.suse/SoN-27a-nfs-swap_ops.patch b/patches.suse/SoN-27a-nfs-swap_ops.patch deleted file mode 100644 index bee43d5..0000000 --- a/patches.suse/SoN-27a-nfs-swap_ops.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Mel Gorman -Date: Wed, 9 Mar 2011 12:26:23 +0000 -Subject: [PATCH] netvm: Do not mark requests for swapfile writes as dirty or kswapd fails to free the page -Patch-mainline: Not yet -References: bnc#678472 - -When writing back NFS pages from kswapd, the inode and pages are getting -marked dirty before IO has even started. The expectation of kswapd is -that it calls clear_page_dirty_for_io(), submits IO and the filesystem -remarks the page dirty if necessary. Without this patch, the page always -comes back under writeback and still dirty. kswapd continually launders -but never frees leading to deadlock. - -Signed-off-by: Mel Gorman ---- - fs/nfs/write.c | 14 ++++++++++++-- - 1 files changed, 12 insertions(+), 2 deletions(-) - -diff --git a/fs/nfs/write.c b/fs/nfs/write.c -index 2375c7d..fe05d78 100644 ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -744,11 +744,21 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, - req = nfs_setup_write_request(ctx, page, offset, count); - if (IS_ERR(req)) - return PTR_ERR(req); -- nfs_mark_request_dirty(req); -+ -+ /* -+ * There is no need to mark swapfile requests as dirty like normal -+ * writepage requests as page dirtying and cleaning is managed -+ * from the mm. If a PageSwapCache page is marked dirty like this, -+ * it will still be dirty after kswapd calls writepage and may -+ * never be released -+ */ -+ if (!PageSwapCache(page)) -+ nfs_mark_request_dirty(req); - /* Update file length */ - nfs_grow_file(page, offset, count); - nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); -- nfs_mark_request_dirty(req); -+ if (!PageSwapCache(page)) -+ nfs_mark_request_dirty(req); - nfs_clear_page_tag_locked(req); - return 0; - } diff --git a/patches.suse/SoN-28-nfs-alloc-recursions.patch b/patches.suse/SoN-28-nfs-alloc-recursions.patch deleted file mode 100644 index 523c3f5..0000000 --- a/patches.suse/SoN-28-nfs-alloc-recursions.patch +++ /dev/null @@ -1,59 +0,0 @@ -From: Peter Zijlstra -Subject: [PATCH 28/31] nfs: fix various memory recursions possible with swap over NFS. -Patch-mainline: not yet - -GFP_NOFS is _more_ permissive than GFP_NOIO in that it will initiate IO, -just not of any filesystem data. - -The problem is that previuosly NOFS was correct because that avoids -recursion into the NFS code, it now is not, because also IO (swap) can -lead to this recursion. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Suresh Jayaraman ---- - fs/nfs/pagelist.c | 2 +- - fs/nfs/write.c | 7 ++++--- - 2 files changed, 5 insertions(+), 4 deletions(-) - ---- a/fs/nfs/pagelist.c -+++ b/fs/nfs/pagelist.c -@@ -27,7 +27,7 @@ static inline struct nfs_page * - static inline struct nfs_page * - nfs_page_alloc(void) - { -- struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); -+ struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO); - if (p) - INIT_LIST_HEAD(&p->wb_list); - return p; ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -50,7 +50,7 @@ static mempool_t *nfs_commit_mempool; - - struct nfs_write_data *nfs_commitdata_alloc(void) - { -- struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); -+ struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); - - if (p) { - memset(p, 0, sizeof(*p)); -@@ -69,7 +69,7 @@ void nfs_commit_free(struct nfs_write_da - - struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) - { -- struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); -+ struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); - - if (p) { - memset(p, 0, sizeof(*p)); -@@ -79,7 +79,8 @@ struct nfs_write_data *nfs_writedata_all - if (pagecount <= ARRAY_SIZE(p->page_array)) - p->pagevec = p->page_array; - else { -- p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); -+ p->pagevec = kcalloc(pagecount, sizeof(struct page *), -+ GFP_NOIO); - if (!p->pagevec) { - mempool_free(p, nfs_wdata_mempool); - p = NULL; diff --git a/patches.suse/SoN-29-fix-swap_sync_page-race b/patches.suse/SoN-29-fix-swap_sync_page-race deleted file mode 100644 index 2544dab..0000000 --- a/patches.suse/SoN-29-fix-swap_sync_page-race +++ /dev/null @@ -1,58 +0,0 @@ -From: NeilBrown -Subject: [PATCH 29/31] Cope with racy nature of sync_page in swap_sync_page -Patch-mainline: not yet - -sync_page is called without that PageLock held. This means that, -for example, PageSwapCache can be cleared at any time. -We need to be careful not to put much trust any any part of the page. - -So allow page_swap_info to return NULL of the page is no longer -in a SwapCache, and handle the NULL gracefully in swap_sync_page. - -No other calls need to handle the NULL as that all hold PageLock, -so PageSwapCache cannot be cleared by surprise. Add a WARN_ON to -document this fact and help find out if I am wrong. - -Acked-by: Miklos Szeredi -Signed-off-by: NeilBrown -Signed-off-by: Suresh Jayaraman ---- - mm/page_io.c | 5 +++++ - mm/swapfile.c | 8 +++++++- - 2 files changed, 12 insertions(+), 1 deletion(-) - ---- a/mm/page_io.c -+++ b/mm/page_io.c -@@ -128,10 +128,15 @@ out: - return ret; - } - -+/* this comment ensure the patch applies to swap_sync_page -+ * and not swap_set_page_dirty by mistake -+ */ - void swap_sync_page(struct page *page) - { - struct swap_info_struct *sis = page_swap_info(page); - -+ if (!sis) -+ return; - if (sis->flags & SWP_FILE) { - struct address_space *mapping = sis->swap_file->f_mapping; - ---- a/mm/swapfile.c -+++ b/mm/swapfile.c -@@ -2302,7 +2302,13 @@ int swapcache_prepare(swp_entry_t entry) - struct swap_info_struct *page_swap_info(struct page *page) - { - swp_entry_t swap = { .val = page_private(page) }; -- BUG_ON(!PageSwapCache(page)); -+ if (!PageSwapCache(page) || !swap.val) { -+ /* This should only happen from sync_page. -+ * In other cases the page should be locked and -+ * should be in a SwapCache -+ */ -+ return NULL; -+ } - return swap_info[swp_type(swap)]; - } - diff --git a/patches.suse/SoN-30-fix-uninitialized-var.patch b/patches.suse/SoN-30-fix-uninitialized-var.patch deleted file mode 100644 index a484ffd..0000000 --- a/patches.suse/SoN-30-fix-uninitialized-var.patch +++ /dev/null @@ -1,40 +0,0 @@ -From: Miklos Szeredi -Subject: [PATCH 30/31] Fix use of uninitialized variable in cache_grow() -Patch-mainline: not yet - -This fixes a bug in reserve-slub.patch. - -If cache_grow() was called with objp != NULL then the 'reserve' local -variable wasn't initialized. This resulted in ac->reserve being set to -a rubbish value. Due to this in some circumstances huge amounts of -slab pages were allocated (due to slab_force_alloc() returning true), -which caused atomic page allocation failures and slowdown of the -system. - -Signed-off-by: Miklos Szeredi -Signed-off-by: Suresh Jayaraman ---- - mm/slab.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - ---- a/mm/slab.c -+++ b/mm/slab.c -@@ -2841,7 +2841,7 @@ static int cache_grow(struct kmem_cache - size_t offset; - gfp_t local_flags; - struct kmem_list3 *l3; -- int reserve; -+ int reserve = -1; - - /* - * Be lazy and only check for valid flags here, keeping it out of the -@@ -2897,7 +2897,8 @@ static int cache_grow(struct kmem_cache - if (local_flags & __GFP_WAIT) - local_irq_disable(); - check_irq_off(); -- slab_set_reserve(cachep, reserve); -+ if (reserve != -1) -+ slab_set_reserve(cachep, reserve); - spin_lock(&l3->list_lock); - - /* Make slab active. */ diff --git a/patches.suse/SoN-31-fix-null-pointer-dereference b/patches.suse/SoN-31-fix-null-pointer-dereference deleted file mode 100644 index 0c88a66..0000000 --- a/patches.suse/SoN-31-fix-null-pointer-dereference +++ /dev/null @@ -1,37 +0,0 @@ -From: Xiaotian Feng -Subject: fix null pointer deref in swap_entry_free -Patch-mainline: Not yet - -Commit b3a27d uses p->bdev->bd_disk, this will lead a null pointer -deref with swap over nfs. - -Signed-off-by: Xiaotian Feng -Signed-off-by: Suresh Jayaraman --- -Index: linux-2.6.35-master/mm/swapfile.c -=================================================================== ---- linux-2.6.35-master.orig/mm/swapfile.c -+++ linux-2.6.35-master/mm/swapfile.c -@@ -574,7 +574,6 @@ static unsigned char swap_entry_free(str - - /* free if no reference */ - if (!usage) { -- struct gendisk *disk = p->bdev->bd_disk; - if (offset < p->lowest_bit) - p->lowest_bit = offset; - if (offset > p->highest_bit) -@@ -584,9 +583,11 @@ static unsigned char swap_entry_free(str - swap_list.next = p->type; - nr_swap_pages++; - p->inuse_pages--; -- if ((p->flags & SWP_BLKDEV) && -- disk->fops->swap_slot_free_notify) -- disk->fops->swap_slot_free_notify(p->bdev, offset); -+ if (p->flags & SWP_BLKDEV) { -+ struct gendisk *disk = p->bdev->bd_disk; -+ if (disk->fops->swap_slot_free_notify) -+ disk->fops->swap_slot_free_notify(p->bdev, offset); -+ } - } - - return usage; diff --git a/patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles b/patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles deleted file mode 100644 index a0f82de..0000000 --- a/patches.suse/SoN-32-fix-kernel-bug-with-multiple-swapfiles +++ /dev/null @@ -1,45 +0,0 @@ -From: Xiaotian Feng -Subject: fix mess up on swap with multi files from same nfs server -Patch-mainline: Not yet - -xs_swapper() will set xprt->swapper when swapon nfs files, unset xprt->swapper -when swapoff nfs files. This will lead a bug if we swapon multi files from -the same nfs server, they had the same xprt, then the reserved memory could -not be disconnected when we swapoff all files. - -Signed-off-by: Xiaotian Feng ---- -Index: linux-2.6.35-master/include/linux/sunrpc/xprt.h -=================================================================== ---- linux-2.6.35-master.orig/include/linux/sunrpc/xprt.h -+++ linux-2.6.35-master/include/linux/sunrpc/xprt.h -@@ -172,8 +172,8 @@ struct rpc_xprt { - unsigned int max_reqs; /* total slots */ - unsigned long state; /* transport state */ - unsigned char shutdown : 1, /* being shut down */ -- resvport : 1, /* use a reserved port */ -- swapper : 1; /* we're swapping over this -+ resvport : 1; /* use a reserved port */ -+ unsigned int swapper; /* we're swapping over this - transport */ - unsigned int bind_index; /* bind function index */ - -Index: linux-2.6.35-master/net/sunrpc/xprtsock.c -=================================================================== ---- linux-2.6.35-master.orig/net/sunrpc/xprtsock.c -+++ linux-2.6.35-master/net/sunrpc/xprtsock.c -@@ -1665,11 +1665,11 @@ int xs_swapper(struct rpc_xprt *xprt, in - */ - err = sk_adjust_memalloc(1, RPC_RESERVE_PAGES); - if (!err) { -- xprt->swapper = 1; -+ xprt->swapper++; - xs_set_memalloc(xprt); - } - } else if (xprt->swapper) { -- xprt->swapper = 0; -+ xprt->swapper--; - sk_clear_memalloc(transport->inet); - sk_adjust_memalloc(-1, -RPC_RESERVE_PAGES); - } diff --git a/patches.suse/SoN-33-slab-leak-fix.patch b/patches.suse/SoN-33-slab-leak-fix.patch deleted file mode 100644 index 4cb0e44..0000000 --- a/patches.suse/SoN-33-slab-leak-fix.patch +++ /dev/null @@ -1,45 +0,0 @@ -From: Nick Piggin -Subject: Fix almost-infinite slab cache growing -References: bnc#554081 -Patch-mainline: never - -If we get into cache_alloc_refill() with must_refill set, we end up in an -almost endless loop adding more and more pages to the slab. The loop can be -broken only by a failure to allocate more pages or an interrupt refilling the -slab's allocation cache. - -Fix the issue by jumping to a more appropriate place when the allocation cache -is not refilled by an interrupt. - -Signed-off-by: Nick Piggin - -Index: linux-2.6.38-master/mm/slab.c -=================================================================== ---- linux-2.6.38-master.orig/mm/slab.c -+++ linux-2.6.38-master/mm/slab.c -@@ -3104,11 +3104,11 @@ static void *cache_alloc_refill(struct k - struct array_cache *ac; - int node; - --retry: - check_irq_off(); - node = numa_slab_nid(cachep, flags); - if (unlikely(must_refill)) - goto force_grow; -+retry: - ac = cpu_cache_get(cachep); - batchcount = ac->batchcount; - if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { -@@ -3188,8 +3188,10 @@ force_grow: - if (!x && (ac->avail == 0 || must_refill)) - return NULL; - -- if (!ac->avail) /* objects refilled by interrupt? */ -+ if (!ac->avail) { /* objects refilled by interrupt? */ -+ node = numa_node_id(); - goto retry; -+ } - } - ac->touched = 1; - return ac->entry[--ac->avail]; diff --git a/patches.suse/SoN-fix b/patches.suse/SoN-fix deleted file mode 100644 index f75ae54..0000000 --- a/patches.suse/SoN-fix +++ /dev/null @@ -1,22 +0,0 @@ -From: Jeff Mahoney -Subject: SoN: wakeup_kswapd takes a zone index now -Patch-mainline: Depends on local patches - - This patch fixes the build with SoN applied. - -Signed-off-by: Jeff Mahoney ---- - mm/page_alloc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -5054,7 +5054,7 @@ static int test_reserve_limits(void) - int node; - - for_each_zone(zone) -- wakeup_kswapd(zone, 0); -+ wakeup_kswapd(zone, 0, zone_idx(zone)); - - for_each_online_node(node) { - struct page *page = alloc_pages_node(node, GFP_KERNEL, 0); diff --git a/patches.suse/acpi-don-t-preempt-until-the-system-is-up b/patches.suse/acpi-don-t-preempt-until-the-system-is-up deleted file mode 100644 index 9e28565..0000000 --- a/patches.suse/acpi-don-t-preempt-until-the-system-is-up +++ /dev/null @@ -1,25 +0,0 @@ -From: Jeff Mahoney -Subject: acpi: don't preempt until the system is up -Patch-mainline: Probably never - - This is needed to avoid scheduling while atomic BUGs with the - DSDT in initramfs patches. - - -Signed-off-by: Jeff Mahoney ---- - drivers/acpi/acpica/psloop.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/acpi/acpica/psloop.c -+++ b/drivers/acpi/acpica/psloop.c -@@ -843,7 +843,8 @@ acpi_ps_complete_op(struct acpi_walk_sta - *op = NULL; - } - -- ACPI_PREEMPTION_POINT(); -+ if (system_state == SYSTEM_RUNNING) -+ ACPI_PREEMPTION_POINT(); - - return_ACPI_STATUS(AE_OK); - } diff --git a/patches.suse/acpi-dsdt-initrd-v0.9a-2.6.25.patch b/patches.suse/acpi-dsdt-initrd-v0.9a-2.6.25.patch deleted file mode 100644 index 325202c..0000000 --- a/patches.suse/acpi-dsdt-initrd-v0.9a-2.6.25.patch +++ /dev/null @@ -1,410 +0,0 @@ -From: Eric Piel -Subject: [PATCH 1/1] ACPI: initramfs DSDT override support -Patch-mainline: not yet - - -Permits to load of DSDT (the main ACPI table) from initramfs. In case this -option is selected, the initramfs is parsed at ACPI initialization (very early -boot time) to look for a file DSDT.aml . This aims at allowing users to -override the DSDT without recompiling the kernel. This is done by adding a new -feature to the initramfs parser so that one specific file can be directly -copied into memory. - -This is derived from the patch v0.8 from http://gaugusch.at/kernel.shtml but -with kernel inclusion in mind: some clean-up's in the documentation, default -set to No, a kernel parameter to disable it at runtime, and most important, a -different approach for reading the initramfs which avoids using the filesystem -infrastructure. - -It also contains a fix for compilation on non-ACPI platforms provided by Rene Rebe. - -Update 17 Sep 2009 jeffm@suse.com: -- 2.6.30 (or so) introduced very early ACPI initialization for proper SMP - detection. This caused crashes since things like the mm caches weren't - set up yet, so kmalloc would crash. This update delays overriding the DSDT - until the acpi_early_init() call that used to override it. Since there is - a DSDT already loaded, it is necessarily a bit hacky. - -Signed-off-by: Eric Piel -Signed-off-by: Thomas Renninger -Signed-off-by: Len Brown -Signed-off-by: Jeff Mahoney ---- - Documentation/acpi/dsdt-override.txt | 12 +++- - Documentation/acpi/initramfs-add-dsdt.sh | 43 +++++++++++++++ - Documentation/kernel-parameters.txt | 3 + - drivers/acpi/Kconfig | 11 ++++ - drivers/acpi/acpica/tbxface.c | 36 +++++++++++-- - drivers/acpi/osl.c | 28 +++++++++- - init/initramfs.c | 84 +++++++++++++++++++++++++++++++ - 7 files changed, 209 insertions(+), 8 deletions(-) - create mode 100644 Documentation/acpi/initramfs-add-dsdt.sh - ---- a/Documentation/acpi/dsdt-override.txt -+++ b/Documentation/acpi/dsdt-override.txt -@@ -1,7 +1,15 @@ --Linux supports a method of overriding the BIOS DSDT: -+Linux supports two methods of overriding the BIOS DSDT: - - CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel. - --When to use this method is described in detail on the -+CONFIG_ACPI_CUSTOM_DSDT_INITRD adds the image to the initrd. -+ -+When to use these methods is described in detail on the - Linux/ACPI home page: - http://www.lesswatts.org/projects/acpi/overridingDSDT.php -+ -+Note that if both options are used, the DSDT supplied -+by the INITRD method takes precedence. -+ -+Documentation/initramfs-add-dsdt.sh is provided for convenience -+for use with the CONFIG_ACPI_CUSTOM_DSDT_INITRD method. ---- /dev/null -+++ b/Documentation/acpi/initramfs-add-dsdt.sh -@@ -0,0 +1,43 @@ -+#!/bin/bash -+# Adds a DSDT file to the initrd (if it's an initramfs) -+# first argument is the name of archive -+# second argument is the name of the file to add -+# The file will be copied as /DSDT.aml -+ -+# 20060126: fix "Premature end of file" with some old cpio (Roland Robic) -+# 20060205: this time it should really work -+ -+# check the arguments -+if [ $# -ne 2 ]; then -+ program_name=$(basename $0) -+ echo "\ -+$program_name: too few arguments -+Usage: $program_name initrd-name.img DSDT-to-add.aml -+Adds a DSDT file to an initrd (in initramfs format) -+ -+ initrd-name.img: filename of the initrd in initramfs format -+ DSDT-to-add.aml: filename of the DSDT file to add -+ " 1>&2 -+ exit 1 -+fi -+ -+# we should check it's an initramfs -+ -+tempcpio=$(mktemp -d) -+# cleanup on exit, hangup, interrupt, quit, termination -+trap 'rm -rf $tempcpio' 0 1 2 3 15 -+ -+# extract the archive -+gunzip -c "$1" > "$tempcpio"/initramfs.cpio || exit 1 -+ -+# copy the DSDT file at the root of the directory so that we can call it "/DSDT.aml" -+cp -f "$2" "$tempcpio"/DSDT.aml -+ -+# add the file -+cd "$tempcpio" -+(echo DSDT.aml | cpio --quiet -H newc -o -A -O "$tempcpio"/initramfs.cpio) || exit 1 -+cd "$OLDPWD" -+ -+# re-compress the archive -+gzip -c "$tempcpio"/initramfs.cpio > "$1" -+ ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -217,6 +217,9 @@ and is between 256 and 4096 characters. - - acpi_no_auto_ssdt [HW,ACPI] Disable automatic loading of SSDT - -+ acpi_no_initrd_override [KNL,ACPI] -+ Disable loading custom ACPI tables from the initramfs -+ - acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS - Format: To spoof as Windows 98: ="Microsoft Windows" - ---- a/drivers/acpi/Kconfig -+++ b/drivers/acpi/Kconfig -@@ -248,6 +248,17 @@ config ACPI_CUSTOM_DSDT - bool - default ACPI_CUSTOM_DSDT_FILE != "" - -+config ACPI_CUSTOM_DSDT_INITRD -+ bool "Read Custom DSDT from initramfs" -+ depends on BLK_DEV_INITRD -+ default n -+ help -+ This option supports a custom DSDT by optionally loading it from initrd. -+ See Documentation/acpi/dsdt-override.txt -+ -+ If you are not using this feature now, but may use it later, -+ it is safe to say Y here. -+ - config ACPI_BLACKLIST_YEAR - int "Disable ACPI for systems before Jan 1st this year" if X86_32 - default 0 ---- a/drivers/acpi/acpica/tbxface.c -+++ b/drivers/acpi/acpica/tbxface.c -@@ -484,6 +484,33 @@ acpi_get_table_by_index(u32 table_index, - - ACPI_EXPORT_SYMBOL(acpi_get_table_by_index) - -+static void -+acpi_dsdt_initrd_override(void) -+{ -+#if defined(CONFIG_ACPI_CUSTOM_DSDT_INITRD) -+ struct acpi_table_header *new = NULL; -+ struct acpi_table_desc *table; -+ acpi_status status; -+ -+ table = &acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT]; -+ status = acpi_os_table_override(table->pointer, &new); -+ if (ACPI_SUCCESS(status) && new) { -+ acpi_tb_delete_table(table); -+ -+ /* This is the top part of acpi_table_load */ -+ memset(table, 0, sizeof(*table)); -+ table->address = ACPI_PTR_TO_PHYSADDR(new); -+ table->pointer = new; -+ table->length = new->length; -+ table->flags |= ACPI_TABLE_ORIGIN_OVERRIDE; -+ table->flags |= ACPI_TABLE_ORIGIN_ALLOCATED; -+ memcpy(table->signature.ascii, new->signature, ACPI_NAME_SIZE); -+ acpi_tb_print_table_header(table->address, new); -+ } -+#endif -+} -+ -+ - /******************************************************************************* - * - * FUNCTION: acpi_tb_load_namespace -@@ -496,7 +523,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_by_ind - * the RSDT/XSDT. - * - ******************************************************************************/ --static acpi_status acpi_tb_load_namespace(void) -+static acpi_status __init acpi_tb_load_namespace(void) - { - acpi_status status; - u32 i; -@@ -522,6 +549,8 @@ static acpi_status acpi_tb_load_namespac - goto unlock_and_exit; - } - -+ acpi_dsdt_initrd_override(); -+ - /* A valid DSDT is required */ - - status = -@@ -590,7 +619,7 @@ static acpi_status acpi_tb_load_namespac - * - ******************************************************************************/ - --acpi_status acpi_load_tables(void) -+acpi_status __init acpi_load_tables(void) - { - acpi_status status; - -@@ -607,9 +636,6 @@ acpi_status acpi_load_tables(void) - return_ACPI_STATUS(status); - } - --ACPI_EXPORT_SYMBOL(acpi_load_tables) -- -- - /******************************************************************************* - * - * FUNCTION: acpi_install_table_handler ---- a/drivers/acpi/osl.c -+++ b/drivers/acpi/osl.c -@@ -98,6 +98,11 @@ static DEFINE_SPINLOCK(acpi_res_lock); - #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ - static char osi_additional_string[OSI_STRING_LENGTH_MAX]; - -+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD -+static int acpi_no_initrd_override; -+extern struct acpi_table_header *acpi_find_dsdt_initrd(void); -+#endif -+ - /* - * The story of _OSI(Linux) - * -@@ -352,7 +357,7 @@ acpi_os_predefined_override(const struct - return AE_OK; - } - --acpi_status -+acpi_status __init - acpi_os_table_override(struct acpi_table_header * existing_table, - struct acpi_table_header ** new_table) - { -@@ -365,6 +370,18 @@ acpi_os_table_override(struct acpi_table - if (strncmp(existing_table->signature, "DSDT", 4) == 0) - *new_table = (struct acpi_table_header *)AmlCode; - #endif -+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD -+ if ((strncmp(existing_table->signature, "DSDT", 4) == 0) && -+ !acpi_no_initrd_override && acpi_gbl_permanent_mmap) { -+ /* JDM: acpi_gbl_permanent_mmap means acpi_early_init() has -+ * been called so things like kmalloc are ok. */ -+ struct acpi_table_header *initrd_table; -+ -+ initrd_table = acpi_find_dsdt_initrd(); -+ if (initrd_table) -+ *new_table = initrd_table; -+ } -+#endif - if (*new_table != NULL) { - printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " - "this is unsafe: tainting kernel\n", -@@ -375,6 +392,15 @@ acpi_os_table_override(struct acpi_table - return AE_OK; - } - -+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD -+static int __init acpi_no_initrd_override_setup(char *s) -+{ -+ acpi_no_initrd_override = 1; -+ return 1; -+} -+__setup("acpi_no_initrd_override", acpi_no_initrd_override_setup); -+#endif -+ - static irqreturn_t acpi_irq(int irq, void *dev_id) - { - u32 handled; ---- a/init/initramfs.c -+++ b/init/initramfs.c -@@ -8,6 +8,9 @@ - #include - #include - #include -+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD -+#include -+#endif - - static __initdata char *message; - static void __init error(char *x) -@@ -125,6 +128,12 @@ static __initdata unsigned long body_len - static __initdata uid_t uid; - static __initdata gid_t gid; - static __initdata unsigned rdev; -+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD -+static __initdata char *file_looked_for; -+static __initdata struct acpi_table_header *file_mem; -+#else -+const char *file_looked_for = NULL; -+#endif - - static void __init parse_header(char *s) - { -@@ -159,6 +168,7 @@ static __initdata enum state { - SkipIt, - GotName, - CopyFile, -+ CopyFileMem, - GotSymlink, - Reset - } state, next_state; -@@ -228,6 +238,11 @@ static int __init do_header(void) - parse_header(collected); - next_header = this_header + N_ALIGN(name_len) + body_len; - next_header = (next_header + 3) & ~3; -+ if (file_looked_for) { -+ read_into(name_buf, N_ALIGN(name_len), GotName); -+ return 0; -+ } -+ - state = SkipIt; - if (name_len <= 0 || name_len > PATH_MAX) - return 0; -@@ -298,6 +313,12 @@ static int __init do_name(void) - free_hash(); - return 0; - } -+ if (file_looked_for) { -+ if (S_ISREG(mode) && -+ (strcmp(collected, file_looked_for) == 0)) -+ state = CopyFileMem; -+ return 0; -+ } - clean_path(collected, mode); - if (S_ISREG(mode)) { - int ml = maybe_link(); -@@ -333,6 +354,40 @@ static int __init do_name(void) - return 0; - } - -+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD -+static int __init do_copy_mem(void) -+{ -+ static void *file_current; /* current position in the memory */ -+ if (file_mem == NULL) { -+ if (body_len < 4) { /* check especially against empty files */ -+ error("file is less than 4 bytes"); -+ return 1; -+ } -+ file_mem = kmalloc(body_len, GFP_ATOMIC); -+ if (!file_mem) { -+ error("failed to allocate enough memory"); -+ return 1; -+ } -+ file_current = file_mem; -+ } -+ if (count >= body_len) { -+ memcpy(file_current, victim, body_len); -+ eat(body_len); -+ file_looked_for = NULL; /* don't find files with same name */ -+ state = SkipIt; -+ return 0; -+ } else { -+ memcpy(file_current, victim, count); -+ file_current += count; -+ body_len -= count; -+ eat(count); -+ return 1; -+ } -+} -+#else -+#define do_copy_mem NULL -+#endif -+ - static int __init do_copy(void) - { - if (count >= body_len) { -@@ -370,6 +425,7 @@ static __initdata int (*actions[])(void) - [SkipIt] = do_skip, - [GotName] = do_name, - [CopyFile] = do_copy, -+ [CopyFileMem] = do_copy_mem, - [GotSymlink] = do_symlink, - [Reset] = do_reset, - }; -@@ -606,3 +662,31 @@ static int __init populate_rootfs(void) - return 0; - } - rootfs_initcall(populate_rootfs); -+ -+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD -+struct acpi_table_header * __init acpi_find_dsdt_initrd(void) -+{ -+ char *err, *ramfs_dsdt_name = "DSDT.aml"; -+ -+ printk(KERN_INFO "ACPI: Checking initramfs for custom DSDT\n"); -+ file_mem = NULL; -+ file_looked_for = ramfs_dsdt_name; -+ err = unpack_to_rootfs((char *)initrd_start, -+ initrd_end - initrd_start); -+ file_looked_for = NULL; -+ -+ if (err) { -+ /* -+ * Even if reading the DSDT file was successful, -+ * we give up if the initramfs cannot be entirely read. -+ */ -+ kfree(file_mem); -+ printk(KERN_ERR "ACPI: Aborted because %s.\n", err); -+ return NULL; -+ } -+ if (file_mem) -+ printk(KERN_INFO "ACPI: Found DSDT in %s.\n", ramfs_dsdt_name); -+ -+ return file_mem; -+} -+#endif diff --git a/patches.suse/acpi-generic-initramfs-table-override-support b/patches.suse/acpi-generic-initramfs-table-override-support deleted file mode 100644 index ee6d39b..0000000 --- a/patches.suse/acpi-generic-initramfs-table-override-support +++ /dev/null @@ -1,401 +0,0 @@ -From: Jeff Mahoney -Subject: ACPI: generic initramfs table override support -References: bnc#533555 -Patch-mainline: Probably never - -This patch allows the system administrator to override ACPI tables with -versions provided in an initramfs. - -This works by moving the initialization of populate_rootfs earlier -in the initialization so that we can use the VFS file system routines. -The system is initialized enough to support this by acpi_early_init(). -My understanding is that an early version of original patch posted at -http://gaugusch.at/kernel.shtml may have done something similar. - -This version provides the infrastructure to override any ACPI table, but -only provides support for overriding DSDT. If other tables are desired, -extending the support is trivial. - -During early ACPI initialization, when the initramfs is still loaded, -we go through a table of override entries which specify the name of the -table to override, the file name that contains it, and a pointer to the -data loaded from the file. The override tables and headers are kept in -memory so that they available to the ACPI subsystem after the __init -sections and the initramfs have been jettisoned. - -This patch is derived from the work by Éric Piel . - -13 Jan 2010 jeffm: Uses initramfs_{read,write} now to avoid - "scheduling while atomic" warnings. - -Signed-off-by: Jeff Mahoney ---- - - Documentation/acpi/dsdt-override.txt | 8 + - Documentation/acpi/initramfs-add-dsdt.sh | 43 +++++++ - Documentation/acpi/table-override.txt | 21 +++ - Documentation/kernel-parameters.txt | 4 - drivers/acpi/Kconfig | 13 ++ - drivers/acpi/bus.c | 7 + - drivers/acpi/osl.c | 171 +++++++++++++++++++++++++++++++ - include/acpi/acpiosxf.h | 3 - 8 files changed, 269 insertions(+), 1 deletion(-) - ---- a/Documentation/acpi/dsdt-override.txt -+++ b/Documentation/acpi/dsdt-override.txt -@@ -1,7 +1,13 @@ --Linux supports a method of overriding the BIOS DSDT: -+Linux supports two methods of overriding the BIOS DSDT: - - CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel. - -+CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS loads the image from -+the initramfs at boot-time. It is more flexible in that it -+does not need to be built into the kernel and tables other -+than DSDT can potentially be overridden. Please see -+Documentation/acpi/table-override.txt for more information. -+ - When to use this method is described in detail on the - Linux/ACPI home page: - http://www.lesswatts.org/projects/acpi/overridingDSDT.php ---- /dev/null -+++ b/Documentation/acpi/initramfs-add-dsdt.sh -@@ -0,0 +1,43 @@ -+#!/bin/bash -+# Adds a DSDT file to the initrd (if it's an initramfs) -+# first argument is the name of archive -+# second argument is the name of the file to add -+# The file will be copied as /DSDT.aml -+ -+# 20060126: fix "Premature end of file" with some old cpio (Roland Robic) -+# 20060205: this time it should really work -+ -+# check the arguments -+if [ $# -ne 2 ]; then -+ program_name=$(basename $0) -+ echo "\ -+$program_name: too few arguments -+Usage: $program_name initrd-name.img DSDT-to-add.aml -+Adds a DSDT file to an initrd (in initramfs format) -+ -+ initrd-name.img: filename of the initrd in initramfs format -+ DSDT-to-add.aml: filename of the DSDT file to add -+ " 1>&2 -+ exit 1 -+fi -+ -+# we should check it's an initramfs -+ -+tempcpio=$(mktemp -d) -+# cleanup on exit, hangup, interrupt, quit, termination -+trap 'rm -rf $tempcpio' 0 1 2 3 15 -+ -+# extract the archive -+gunzip -c "$1" > "$tempcpio"/initramfs.cpio || exit 1 -+ -+# copy the DSDT file at the root of the directory so that we can call it "/DSDT.aml" -+cp -f "$2" "$tempcpio"/DSDT.aml -+ -+# add the file -+cd "$tempcpio" -+(echo DSDT.aml | cpio --quiet -H newc -o -A -O "$tempcpio"/initramfs.cpio) || exit 1 -+cd "$OLDPWD" -+ -+# re-compress the archive -+gzip -c "$tempcpio"/initramfs.cpio > "$1" -+ ---- /dev/null -+++ b/Documentation/acpi/table-override.txt -@@ -0,0 +1,21 @@ -+CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS provides a mechanism for -+the user to add table images to the initramfs for loading at -+runtime. Tables used before expansion of the initramfs may not -+be replaced. Fortunately this list is small and the one most -+typically used, DSDT, is not one of them. -+ -+In order to override a table, the image must be placed in the root -+of the initramfs with a filename of .aml (e.g. DSDT.aml). -+ -+As the ACPI subsystem initializes, it will load the tables into memory -+and override them as the tables are needed. -+ -+This option takes precedence over the in-kernel method provided by -+the ACPI_CUSTOM_DSDT config option. -+ -+When to use these methods is described in detail on the -+Linux/ACPI home page: -+http://www.lesswatts.org/projects/acpi/overridingDSDT.php -+ -+Documentation/initramfs-add-dsdt.sh is provided for convenience -+for use with the CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS method. ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -220,6 +220,10 @@ and is between 256 and 4096 characters. - - acpi_no_auto_ssdt [HW,ACPI] Disable automatic loading of SSDT - -+ acpi_no_initrd_override [KNL,ACPI] -+ acpi_no_initramfs_override [KNL,ACPI] -+ Disable loading custom ACPI tables from the initramfs -+ - acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS - Format: To spoof as Windows 98: ="Microsoft Windows" - ---- a/drivers/acpi/Kconfig -+++ b/drivers/acpi/Kconfig -@@ -271,6 +271,19 @@ config ACPI_CUSTOM_DSDT - bool - default ACPI_CUSTOM_DSDT_FILE != "" - -+config ACPI_CUSTOM_OVERRIDE_INITRAMFS -+ bool "Load ACPI override tables from initramfs" -+ depends on BLK_DEV_INITRD -+ default n -+ help -+ This option supports loading custom replacement tables by optionally -+ loading them from the initramfs. -+ -+ See Documentation/acpi/table-override.txt -+ -+ If you are not using this feature now, but may use it later, -+ it is safe to say Y here. -+ - config ACPI_BLACKLIST_YEAR - int "Disable ACPI for systems before Jan 1st this year" if X86_32 - default 0 ---- a/drivers/acpi/bus.c -+++ b/drivers/acpi/bus.c -@@ -866,6 +866,13 @@ void __init acpi_early_init(void) - goto error0; - } - -+ status = acpi_load_override_tables(); -+ if (ACPI_FAILURE(status)) { -+ printk(KERN_ERR PREFIX -+ "Unable to load Override Tables\n"); -+ goto error0; -+ } -+ - status = acpi_load_tables(); - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX ---- a/drivers/acpi/osl.c -+++ b/drivers/acpi/osl.c -@@ -43,6 +43,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -51,6 +52,15 @@ - #include - #include - -+/* We need these to manipulate the global table array. The existing -+ * accessors in acpica/ only pass back the table header and we need -+ * the descriptor. */ -+#include "acpica/acconfig.h" -+#include "acpica/aclocal.h" -+#include "acpica/acglobal.h" -+#include "acpica/acutils.h" -+#include "acpica/actables.h" -+ - #define _COMPONENT ACPI_OS_SERVICES - ACPI_MODULE_NAME("osl"); - #define PREFIX "ACPI: " -@@ -98,6 +108,23 @@ static DEFINE_SPINLOCK(acpi_res_lock); - #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ - static char osi_additional_string[OSI_STRING_LENGTH_MAX]; - -+#ifdef CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS -+static int acpi_no_initrd_override; -+static int __init acpi_no_initrd_override_setup(char *s) -+{ -+ acpi_no_initrd_override = 1; -+ return 1; -+} -+ -+static int __init acpi_no_initramfs_override_setup(char *s) -+{ -+ return acpi_no_initrd_override_setup(s); -+} -+ -+__setup("acpi_no_initrd_override", acpi_no_initrd_override_setup); -+__setup("acpi_no_initramfs_override", acpi_no_initramfs_override_setup); -+#endif -+ - /* - * The story of _OSI(Linux) - * -@@ -326,6 +353,146 @@ acpi_os_predefined_override(const struct - return AE_OK; - } - -+#ifdef CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS -+struct acpi_override_table_entry -+{ -+ const char *name; -+ struct acpi_table_header *table; -+}; -+ -+static struct acpi_override_table_entry acpi_override_table_entries[] = { -+ { .name = "DSDT", }, -+ {} -+}; -+ -+ -+ssize_t initramfs_read(unsigned int fd, const char * buf, size_t count); -+acpi_status __init -+acpi_load_one_override_table(struct acpi_override_table_entry *entry) -+{ -+ int fd, ret; -+ acpi_status err = AE_OK; -+ char filename[10]; /* /DSDT.aml\0 */ -+ struct kstat stat; -+ -+ snprintf(filename, sizeof(filename), "/%.4s.aml", entry->name); -+ -+ fd = sys_open(filename, O_RDONLY, 0); -+ if (fd < 0) -+ return AE_NOT_FOUND; -+ -+ ret = vfs_fstat(fd, &stat); -+ if (ret < 0) { -+ printk(KERN_ERR "ACPI: fstat failed while trying to read %s\n", -+ filename); -+ err = AE_ERROR; -+ goto out; -+ } -+ -+ entry->table = kmalloc(stat.size, GFP_KERNEL); -+ if (!entry->table) { -+ printk(KERN_ERR "ACPI: Could not allocate memory to " -+ "override %s\n", entry->name); -+ err = AE_NO_MEMORY; -+ goto out; -+ } -+ -+ ret = initramfs_read(fd, (char *)entry->table, stat.size); -+ sys_close(fd); -+ if (ret != stat.size) { -+ printk(KERN_ERR "ACPI: Failed to read %s from initramfs\n", -+ entry->name); -+ err = AE_ERROR; -+ goto out; -+ } -+ -+out: -+ if (err != AE_OK) { -+ kfree(entry->table); -+ entry->table = NULL; -+ } -+ sys_close(fd); -+ return ret; -+} -+ -+static void __init -+acpi_replace_table(struct acpi_table_desc *table, struct acpi_table_header *new) -+{ -+ /* This is the top part of acpi_load_table */ -+ memset(table, 0, sizeof(*table)); -+ table->address = ACPI_PTR_TO_PHYSADDR(new); -+ table->pointer = new; -+ table->length = new->length; -+ table->flags |= ACPI_TABLE_ORIGIN_OVERRIDE; -+ table->flags |= ACPI_TABLE_ORIGIN_ALLOCATED; -+ memcpy(table->signature.ascii, new->signature, ACPI_NAME_SIZE); -+} -+ -+/* This replaces tables already opportunistically loaded, but not used. -+ * If the acpica code provided a table descriptor lookup then we wouldn't -+ * need to open code this. */ -+static void __init -+acpi_override_tables(void) -+{ -+ struct acpi_table_header *new = NULL; -+ struct acpi_table_desc *table; -+ acpi_status status; -+ int i; -+ -+ /* This is early enough that we don't need the mutex yet */ -+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { -+ if (acpi_tb_is_table_loaded(i)) -+ continue; -+ -+ table = &acpi_gbl_root_table_list.tables[i]; -+ if (!table->pointer) -+ status = acpi_tb_verify_table(table); -+ -+ if (ACPI_FAILURE(status) || !table->pointer) -+ continue; -+ -+ status = acpi_os_table_override(table->pointer, &new); -+ if (ACPI_SUCCESS(status) && new) { -+ acpi_replace_table(table, new); -+ acpi_tb_print_table_header(table->address, new); -+ } -+ } -+} -+ -+acpi_status __init -+acpi_load_override_tables(void) -+{ -+ struct acpi_override_table_entry *entry = acpi_override_table_entries; -+ while (entry && entry->name) { -+ acpi_load_one_override_table(entry); -+ entry++; -+ } -+ -+ acpi_override_tables(); -+ return AE_OK; -+} -+ -+static struct acpi_table_header * -+acpi_get_override_table(const char *name) -+{ -+ struct acpi_override_table_entry *entry = acpi_override_table_entries; -+ -+ while (entry && entry->name) { -+ if (!memcmp(name, entry->name, ACPI_NAME_SIZE)) -+ return entry->table;; -+ entry++; -+ } -+ -+ return NULL; -+} -+#else -+acpi_status -+acpi_load_override_tables(void) -+{ -+ return AE_OK; -+} -+#endif -+ - acpi_status - acpi_os_table_override(struct acpi_table_header * existing_table, - struct acpi_table_header ** new_table) -@@ -339,6 +506,10 @@ acpi_os_table_override(struct acpi_table - if (strncmp(existing_table->signature, "DSDT", 4) == 0) - *new_table = (struct acpi_table_header *)AmlCode; - #endif -+#ifdef CONFIG_ACPI_CUSTOM_OVERRIDE_INITRAMFS -+ if (!acpi_no_initrd_override) -+ *new_table = acpi_get_override_table(existing_table->signature); -+#endif - if (*new_table != NULL) { - printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " - "this is unsafe: tainting kernel\n", ---- a/include/acpi/acpiosxf.h -+++ b/include/acpi/acpiosxf.h -@@ -92,6 +92,9 @@ acpi_os_predefined_override(const struct - acpi_string * new_val); - - acpi_status -+acpi_load_override_tables(void); -+ -+acpi_status - acpi_os_table_override(struct acpi_table_header *existing_table, - struct acpi_table_header **new_table); - diff --git a/patches.suse/acpi_osi_sle11_ident.patch b/patches.suse/acpi_osi_sle11_ident.patch deleted file mode 100644 index 41731a8..0000000 --- a/patches.suse/acpi_osi_sle11_ident.patch +++ /dev/null @@ -1,29 +0,0 @@ -From: Thomas Renninger -Subject: Provide possibility for vendors to fix BIOS issues for SLE11 only -References: none -Patch-Mainline: never - -These BIOS issues generally should be solved in the mainine kernel. -But such ACPI interpreter fixes often are very intrusive and impossible to -add as a maintenance update. -This interface should only be used by vendors in emergency case, e.g. -for important pre-loads. Use cases could be: - - BIOS cannot generally be fixed because it's already validated against - Windows OSes, with this patch a SLE11 specific BIOS fix can be added - - Kernel cannot be fixed, because the fix would be too intrusive -In most cases both above scenarios would be valid. - ---- - drivers/acpi/acpica/uteval.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/acpi/acpica/uteval.c -+++ b/drivers/acpi/acpica/uteval.c -@@ -70,6 +70,7 @@ static char *acpi_interfaces_supported[] - "Windows 2001.1", /* Windows Server 2003 */ - "Windows 2001.1 SP1", /* Windows Server 2003 SP1 - Added 03/2006 */ - "Windows 2006", /* Windows Vista - Added 03/2006 */ -+ "SLE11", /* SLE11 identification */ - - /* Feature Group Strings */ - diff --git a/patches.suse/add-initramfs-file_read_write b/patches.suse/add-initramfs-file_read_write deleted file mode 100644 index 7caf883..0000000 --- a/patches.suse/add-initramfs-file_read_write +++ /dev/null @@ -1,207 +0,0 @@ -From: Jeff Mahoney -Subject: initramfs: add initramfs_{read,write} -References: bnc#568120 -Patch-mainline: Probably never - - This patch adds initramfs_read and initramfs_write, which will read and - write to the initramfs without traversing huge chunks of the VFS code. - A previous incarnation of the ACPI dynamic table patches ended up - causing "scheduling while atomic" warnings during boot, resulting in a - whole lot of bug reports. - -Signed-off-by: Jeff Mahoney ---- - init/initramfs.c | 152 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- - 1 file changed, 149 insertions(+), 3 deletions(-) - ---- a/init/initramfs.c -+++ b/init/initramfs.c -@@ -1,5 +1,6 @@ - #include - #include -+#include - #include - #include - #include -@@ -8,6 +9,8 @@ - #include - #include - #include -+#include -+#include - - static __initdata char *message; - static void __init error(char *x) -@@ -333,10 +336,153 @@ static int __init do_name(void) - return 0; - } - -+ssize_t initramfs_file_read(struct file *file, const char *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct address_space *mapping = file->f_mapping; -+ struct iovec iov = { .iov_base = (void __user *) buf, -+ .iov_len = count }; -+ struct iov_iter i; -+ long status = 0; -+ loff_t pos = *ppos; -+ ssize_t read = 0; -+ -+ iov_iter_init(&i, &iov, 1, count, 0); -+ -+ do { -+ struct page *page; -+ pgoff_t index; -+ unsigned long offset; -+ unsigned long bytes; -+ char *data; -+ -+ offset = (pos & (PAGE_CACHE_SIZE - 1)); -+ index = pos >> PAGE_CACHE_SHIFT; -+ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, -+ iov_iter_count(&i)); -+ -+ page = read_mapping_page(mapping, index, NULL); -+ if (IS_ERR(page)) { -+ status = PTR_ERR(page); -+ break; -+ } -+ -+ data = kmap_atomic(page, KM_USER0); -+ memcpy(i.iov->iov_base + i.iov_offset, data + offset, bytes); -+ kunmap_atomic(data, KM_USER0); -+ -+ iov_iter_advance(&i, bytes); -+ pos += bytes; -+ read += bytes; -+ } while (iov_iter_count(&i)); -+ -+ *ppos = pos; -+ -+ return read ? read : status; -+} -+ -+ssize_t initramfs_file_write(struct file *file, const char * __user buf, -+ size_t count, loff_t *ppos) -+{ -+ struct address_space *mapping = file->f_mapping; -+ struct iovec iov = { .iov_base = (void __user *) buf, -+ .iov_len = count }; -+ long status = 0; -+ ssize_t written = 0; -+ unsigned int flags = 0; -+ loff_t pos = *ppos; -+ struct iov_iter i; -+ -+ iov_iter_init(&i, &iov, 1, count, 0); -+ -+ /* -+ * Copies from kernel address space cannot fail (NFSD is a big user). -+ */ -+ if (segment_eq(get_fs(), KERNEL_DS)) -+ flags |= AOP_FLAG_UNINTERRUPTIBLE; -+ -+ mutex_lock(&mapping->host->i_mutex); -+ -+ do { -+ struct page *page; -+ pgoff_t index; /* Pagecache index for current page */ -+ unsigned long offset; /* Offset into pagecache page */ -+ unsigned long bytes; /* Bytes to write to page */ -+ size_t copied; /* Bytes copied from user */ -+ void *fsdata; -+ char *data; -+ -+ offset = (pos & (PAGE_CACHE_SIZE - 1)); -+ index = pos >> PAGE_CACHE_SHIFT; -+ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, -+ iov_iter_count(&i)); -+ -+ status = simple_write_begin(file, mapping, pos, bytes, flags, -+ &page, &fsdata); -+ if (unlikely(status)) -+ break; -+ data = kmap_atomic(page, KM_USER0); -+ memcpy(data + offset, i.iov->iov_base + i.iov_offset, bytes); -+ kunmap_atomic(data, KM_USER0); -+ copied = bytes; -+ -+ status = simple_write_end(file, mapping, pos, bytes, copied, -+ page, fsdata); -+ if (unlikely(status < 0)) -+ break; -+ copied = status; -+ -+ iov_iter_advance(&i, copied); -+ pos += copied; -+ written += copied; -+ -+ } while (iov_iter_count(&i)); -+ -+ mutex_unlock(&mapping->host->i_mutex); -+ -+ *ppos = pos; -+ -+ return written ? written : status; -+} -+ -+ssize_t -+initramfs_read(unsigned int fd, const char * buf, size_t count) -+{ -+ struct file *file; -+ ssize_t ret = 0; -+ -+ file = fget(fd); -+ if (file) { -+ loff_t pos = file->f_pos; -+ ret = initramfs_file_read(file, buf, count, &pos); -+ file->f_pos = pos; -+ fput(file); -+ } -+ -+ return ret; -+} -+ -+ssize_t -+initramfs_write(unsigned int fd, const char * buf, size_t count) -+{ -+ struct file *file; -+ ssize_t ret = 0; -+ -+ file = fget(fd); -+ if (file) { -+ loff_t pos = file->f_pos; -+ ret = initramfs_file_write(file, buf, count, &pos); -+ file->f_pos = pos; -+ fput(file); -+ } -+ -+ return ret; -+} -+ - static int __init do_copy(void) - { - if (count >= body_len) { -- sys_write(wfd, victim, body_len); -+ initramfs_write(wfd, victim, body_len); - sys_close(wfd); - do_utime(vcollected, mtime); - kfree(vcollected); -@@ -344,7 +490,7 @@ static int __init do_copy(void) - state = SkipIt; - return 0; - } else { -- sys_write(wfd, victim, count); -+ initramfs_write(wfd, victim, count); - body_len -= count; - eat(count); - return 1; -@@ -592,7 +738,7 @@ static int __init populate_rootfs(void) - "; looks like an initrd\n", err); - fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700); - if (fd >= 0) { -- sys_write(fd, (char *)initrd_start, -+ initramfs_write(fd, (char *)initrd_start, - initrd_end - initrd_start); - sys_close(fd); - free_initrd(); diff --git a/patches.suse/audit-export-logging.patch b/patches.suse/audit-export-logging.patch deleted file mode 100644 index 96cc886..0000000 --- a/patches.suse/audit-export-logging.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Tony Jones -Subject: export audit logging symbols -Patch-mainline: not yet - -In SLE11 the following symbols were exported by patch 'apparmor-audit.diff' -With apparmor now being a built-in these exports were removed for SP1 but -NSS requires that audit_log_untrustedstring be exported. - -Re-export both symbols to be consistent with SLE11. - -Signed-Off-by: Tony Jones - ---- - include/linux/audit.h | 3 +++ - kernel/audit.c | 4 +++- - 2 files changed, 6 insertions(+), 1 deletion(-) - ---- a/include/linux/audit.h -+++ b/include/linux/audit.h -@@ -577,6 +577,9 @@ extern void audit_log(struct audit_ - __attribute__((format(printf,4,5))); - - extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); -+extern void audit_log_vformat(struct audit_buffer *ab, -+ const char *fmt, va_list args) -+ __attribute__((format(printf,2,0))); - extern void audit_log_format(struct audit_buffer *ab, - const char *fmt, ...) - __attribute__((format(printf,2,3))); ---- a/kernel/audit.c -+++ b/kernel/audit.c -@@ -1235,7 +1235,7 @@ static inline int audit_expand(struct au - * will be called a second time. Currently, we assume that a printk - * can't format message larger than 1024 bytes, so we don't either. - */ --static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, -+void audit_log_vformat(struct audit_buffer *ab, const char *fmt, - va_list args) - { - int len, avail; -@@ -1511,3 +1511,5 @@ EXPORT_SYMBOL(audit_log_start); - EXPORT_SYMBOL(audit_log_end); - EXPORT_SYMBOL(audit_log_format); - EXPORT_SYMBOL(audit_log); -+EXPORT_SYMBOL_GPL(audit_log_vformat); -+EXPORT_SYMBOL_GPL(audit_log_untrustedstring); diff --git a/patches.suse/b43-missing-firmware-info.patch b/patches.suse/b43-missing-firmware-info.patch deleted file mode 100644 index ba32b2d..0000000 --- a/patches.suse/b43-missing-firmware-info.patch +++ /dev/null @@ -1,37 +0,0 @@ -Subject: b43: Change firmware missing message to refer to openSUSE script -From: Larry Finger -Patch-mainline: never - -The error message output by b43 contains instructions for obtaining -firmware; however, this naturally does not take account of the script -/usr/sbin/install_bcm43xx_firmware. Modify the messages to suggest use -of the script. - -Signed-off-by: Larry Finger -Modified-by: Jiri Benc -Signed-off-by: Jiri Benc - ---- - drivers/net/wireless/b43/main.c | 11 +++++++---- - 1 file changed, 7 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireless/b43/main.c -+++ b/drivers/net/wireless/b43/main.c -@@ -1976,10 +1976,13 @@ static void b43_release_firmware(struct - static void b43_print_fw_helptext(struct b43_wl *wl, bool error) - { - const char text[] = -- "You must go to " \ -- "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware " \ -- "and download the correct firmware for this driver version. " \ -- "Please carefully read all instructions on this website.\n"; -+ "Please open a terminal and enter the command " \ -+ "\"sudo /usr/sbin/install_bcm43xx_firmware\" to download " \ -+ "the correct firmware for this driver version. " \ -+ "For an off-line installation, go to " \ -+ "http://en.opensuse.org/HCL/Network_Adapters_(Wireless)/" \ -+ "Broadcom_BCM43xx and follow the instructions in the " \ -+ "\"Installing firmware from RPM packages\" section.\n"; - - if (error) - b43err(wl, text); diff --git a/patches.suse/bootsplash b/patches.suse/bootsplash deleted file mode 100644 index fe90f66..0000000 --- a/patches.suse/bootsplash +++ /dev/null @@ -1,2872 +0,0 @@ -From: mls@suse.de -Subject: Bootsplash for current kernel -Patch-mainline: no -References: none - -Better support for other VTs. Don't change percent or silent status -when installing a new jpeg. Provide splash_set_percent function. - -Signed-off-by: mls@suse.de - ---- - drivers/tty/n_tty.c | 9 - drivers/tty/vt/keyboard.c | 9 - drivers/tty/vt/vt.c | 25 - drivers/video/Kconfig | 4 - drivers/video/Makefile | 1 - drivers/video/bootsplash/Kconfig | 17 - drivers/video/bootsplash/Makefile | 5 - drivers/video/bootsplash/bootsplash.c | 1017 ++++++++++++++++++++++++++++++++++ - drivers/video/bootsplash/bootsplash.h | 41 + - drivers/video/bootsplash/decode-jpg.c | 957 +++++++++++++++++++++++++++++++ - drivers/video/bootsplash/decode-jpg.h | 35 + - drivers/video/bootsplash/render.c | 328 ++++++++++ - drivers/video/console/bitblit.c | 39 + - drivers/video/console/fbcon.c | 53 + - drivers/video/console/fbcon.h | 28 - drivers/video/vesafb.c | 8 - include/linux/console_struct.h | 3 - include/linux/fb.h | 8 - kernel/panic.c | 13 - 19 files changed, 2598 insertions(+), 2 deletions(-) - ---- a/drivers/tty/n_tty.c -+++ b/drivers/tty/n_tty.c -@@ -1790,6 +1790,15 @@ do_it_again: - tty->minimum_to_wake = (minimum - (b - buf)); - - if (!input_available_p(tty, 0)) { -+#ifdef CONFIG_BOOTSPLASH -+ if (file->f_dentry->d_inode->i_rdev == MKDEV(TTY_MAJOR,0) || -+ file->f_dentry->d_inode->i_rdev == MKDEV(TTY_MAJOR,1) || -+ file->f_dentry->d_inode->i_rdev == MKDEV(TTYAUX_MAJOR,0) || -+ file->f_dentry->d_inode->i_rdev == MKDEV(TTYAUX_MAJOR,1)) { -+ extern int splash_verbose(void); -+ (void)splash_verbose(); -+ } -+#endif - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { - retval = -EIO; - break; ---- a/drivers/tty/vt/keyboard.c -+++ b/drivers/tty/vt/keyboard.c -@@ -1202,6 +1202,15 @@ static void kbd_keycode(unsigned int key - pr_warning("can't emulate rawmode for keycode %d\n", - keycode); - -+#ifdef CONFIG_BOOTSPLASH -+ /* This code has to be redone for some non-x86 platforms */ -+ if (down == 1 && (keycode == 0x3c || keycode == 0x01)) { /* F2 and ESC on PC keyboard */ -+ extern int splash_verbose(void); -+ if (splash_verbose()) -+ return; -+ } -+#endif -+ - #ifdef CONFIG_SPARC - if (keycode == KEY_A && sparc_l1_a_state) { - sparc_l1_a_state = false; ---- a/drivers/tty/vt/vt.c -+++ b/drivers/tty/vt/vt.c -@@ -4213,6 +4213,31 @@ void vcs_scr_updated(struct vc_data *vc) - notify_update(vc); - } - -+#ifdef CONFIG_BOOTSPLASH -+void con_remap_def_color(struct vc_data *vc, int new_color) -+{ -+ unsigned short *sbuf = vc->vc_screenbuf; -+ unsigned c, len = vc->vc_screenbuf_size >> 1; -+ int old_color; -+ -+ if (sbuf) { -+ old_color = vc->vc_def_color << 8; -+ new_color <<= 8; -+ while(len--) { -+ c = *sbuf; -+ if (((c ^ old_color) & 0xf000) == 0) -+ *sbuf ^= (old_color ^ new_color) & 0xf000; -+ if (((c ^ old_color) & 0x0f00) == 0) -+ *sbuf ^= (old_color ^ new_color) & 0x0f00; -+ sbuf++; -+ } -+ new_color >>= 8; -+ } -+ vc->vc_def_color = vc->vc_color = new_color; -+ update_attr(vc); -+} -+#endif -+ - /* - * Visible symbols for modules - */ ---- a/drivers/video/Kconfig -+++ b/drivers/video/Kconfig -@@ -2379,4 +2379,8 @@ if FB || SGI_NEWPORT_CONSOLE - source "drivers/video/logo/Kconfig" - endif - -+if FB -+ source "drivers/video/bootsplash/Kconfig" -+endif -+ - endmenu ---- a/drivers/video/Makefile -+++ b/drivers/video/Makefile -@@ -14,6 +14,7 @@ fb-objs := $(f - obj-$(CONFIG_VT) += console/ - obj-$(CONFIG_LOGO) += logo/ - obj-y += backlight/ display/ -+obj-$(CONFIG_BOOTSPLASH) += bootsplash/ - - obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o - obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o ---- /dev/null -+++ b/drivers/video/bootsplash/Kconfig -@@ -0,0 +1,17 @@ -+# -+# Bootsplash configuration -+# -+ -+menu "Bootsplash configuration" -+ -+config BOOTSPLASH -+ bool "Bootup splash screen" -+ depends on FRAMEBUFFER_CONSOLE && FB_VESA -+ default n -+ ---help--- -+ This option enables the Linux bootsplash screen. For more -+ information on the bootsplash screen have a look at -+ http://www.bootsplash.org/. -+ If you are unsure, say N -+endmenu -+ ---- /dev/null -+++ b/drivers/video/bootsplash/Makefile -@@ -0,0 +1,5 @@ -+# Makefile for the Linux bootsplash -+ -+obj-$(CONFIG_BOOTSPLASH) += bootsplash.o -+obj-$(CONFIG_BOOTSPLASH) += decode-jpg.o -+obj-$(CONFIG_BOOTSPLASH) += render.o ---- /dev/null -+++ b/drivers/video/bootsplash/bootsplash.c -@@ -0,0 +1,1017 @@ -+/* -+ * linux/drivers/video/bootsplash/bootsplash.c - -+ * splash screen handling functions. -+ * -+ * (w) 2001-2004 by Volker Poplawski, , -+ * Stefan Reinauer, , -+ * Steffen Winterfeldt, , -+ * Michael Schroeder -+ * -+ * Ideas & SuSE screen work by Ken Wimer, -+ * -+ * For more information on this code check http://www.bootsplash.org/ -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "../console/fbcon.h" -+#include "bootsplash.h" -+#include "decode-jpg.h" -+ -+extern struct fb_ops vesafb_ops; -+extern signed char con2fb_map[MAX_NR_CONSOLES]; -+ -+#define SPLASH_VERSION "3.1.6-2004/03/31" -+ -+/* These errors have to match fbcon-jpegdec.h */ -+static unsigned char *jpg_errors[] = { -+ "no SOI found", -+ "not 8 bit", -+ "height mismatch", -+ "width mismatch", -+ "bad width or height", -+ "too many COMPPs", -+ "illegal HV", -+ "quant table selector", -+ "picture is not YCBCR 221111", -+ "unknow CID in scan", -+ "dct not sequential", -+ "wrong marker", -+ "no EOI", -+ "bad tables", -+ "depth mismatch" -+}; -+ -+static struct jpeg_decdata *decdata = 0; /* private decoder data */ -+ -+static int splash_registered = 0; -+static int splash_usesilent = 0; /* shall we display the silentjpeg? */ -+int splash_default = 0xf01; -+ -+static int splash_check_jpeg(unsigned char *jpeg, int width, int height, int depth); -+ -+static int __init splash_setup(char *options) -+{ -+ if(!strncmp("silent", options, 6)) { -+ printk(KERN_INFO "bootsplash: silent mode.\n"); -+ splash_usesilent = 1; -+ /* skip "silent," */ -+ if (strlen(options) == 6) -+ return 0; -+ options += 7; -+ } -+ if(!strncmp("verbose", options, 7)) { -+ printk(KERN_INFO "bootsplash: verbose mode.\n"); -+ splash_usesilent = 0; -+ return 0; -+ } -+ splash_default = simple_strtoul(options, NULL, 0); -+ return 0; -+} -+ -+__setup("splash=", splash_setup); -+ -+ -+static int splash_hasinter(unsigned char *buf, int num) -+{ -+ unsigned char *bufend = buf + num * 12; -+ while(buf < bufend) { -+ if (buf[1] > 127) /* inter? */ -+ return 1; -+ buf += buf[3] > 127 ? 24 : 12; /* blend? */ -+ } -+ return 0; -+} -+ -+static int boxextract(unsigned char *buf, unsigned short *dp, unsigned char *cols, int *blendp) -+{ -+ dp[0] = buf[0] | buf[1] << 8; -+ dp[1] = buf[2] | buf[3] << 8; -+ dp[2] = buf[4] | buf[5] << 8; -+ dp[3] = buf[6] | buf[7] << 8; -+ *(unsigned int *)(cols + 0) = -+ *(unsigned int *)(cols + 4) = -+ *(unsigned int *)(cols + 8) = -+ *(unsigned int *)(cols + 12) = *(unsigned int *)(buf + 8); -+ if (dp[1] > 32767) { -+ dp[1] = ~dp[1]; -+ *(unsigned int *)(cols + 4) = *(unsigned int *)(buf + 12); -+ *(unsigned int *)(cols + 8) = *(unsigned int *)(buf + 16); -+ *(unsigned int *)(cols + 12) = *(unsigned int *)(buf + 20); -+ *blendp = 1; -+ return 24; -+ } -+ return 12; -+} -+ -+static void boxit(unsigned char *pic, int bytes, unsigned char *buf, int num, int percent, int overpaint) -+{ -+ int x, y, i, p, doblend, r, g, b, a, add; -+ unsigned short data1[4]; -+ unsigned char cols1[16]; -+ unsigned short data2[4]; -+ unsigned char cols2[16]; -+ unsigned char *bufend; -+ unsigned short *picp; -+ unsigned int stipple[32], sti, stin, stinn, stixs, stixe, stiys, stiye; -+ int xs, xe, ys, ye, xo, yo; -+ -+ if (num == 0) -+ return; -+ bufend = buf + num * 12; -+ stipple[0] = 0xffffffff; -+ stin = 1; -+ stinn = 0; -+ stixs = stixe = 0; -+ stiys = stiye = 0; -+ while(buf < bufend) { -+ doblend = 0; -+ buf += boxextract(buf, data1, cols1, &doblend); -+ if (data1[0] == 32767 && data1[1] == 32767) { -+ /* box stipple */ -+ if (stinn == 32) -+ continue; -+ if (stinn == 0) { -+ stixs = data1[2]; -+ stixe = data1[3]; -+ stiys = stiye = 0; -+ } else if (stinn == 4) { -+ stiys = data1[2]; -+ stiye = data1[3]; -+ } -+ stipple[stinn++] = (cols1[ 0] << 24) | (cols1[ 1] << 16) | (cols1[ 2] << 8) | cols1[ 3] ; -+ stipple[stinn++] = (cols1[ 4] << 24) | (cols1[ 5] << 16) | (cols1[ 6] << 8) | cols1[ 7] ; -+ stipple[stinn++] = (cols1[ 8] << 24) | (cols1[ 9] << 16) | (cols1[10] << 8) | cols1[11] ; -+ stipple[stinn++] = (cols1[12] << 24) | (cols1[13] << 16) | (cols1[14] << 8) | cols1[15] ; -+ stin = stinn; -+ continue; -+ } -+ stinn = 0; -+ if (data1[0] > 32767) -+ buf += boxextract(buf, data2, cols2, &doblend); -+ if (data1[0] == 32767 && data1[1] == 32766) { -+ /* box copy */ -+ i = 12 * (short)data1[3]; -+ doblend = 0; -+ i += boxextract(buf + i, data1, cols1, &doblend); -+ if (data1[0] > 32767) -+ boxextract(buf + i, data2, cols2, &doblend); -+ } -+ if (data1[0] == 32767) -+ continue; -+ if (data1[2] > 32767) { -+ if (overpaint) -+ continue; -+ data1[2] = ~data1[2]; -+ } -+ if (data1[3] > 32767) { -+ if (percent == 65536) -+ continue; -+ data1[3] = ~data1[3]; -+ } -+ if (data1[0] > 32767) { -+ data1[0] = ~data1[0]; -+ for (i = 0; i < 4; i++) -+ data1[i] = (data1[i] * (65536 - percent) + data2[i] * percent) >> 16; -+ for (i = 0; i < 16; i++) -+ cols1[i] = (cols1[i] * (65536 - percent) + cols2[i] * percent) >> 16; -+ } -+ *(unsigned int *)cols2 = *(unsigned int *)cols1; -+ a = cols2[3]; -+ if (a == 0 && !doblend) -+ continue; -+ -+ if (stixs >= 32768) { -+ xo = xs = (stixs ^ 65535) + data1[0]; -+ xe = stixe ? stixe + data1[0] : data1[2]; -+ } else if (stixe >= 32768) { -+ xs = stixs ? data1[2] - stixs : data1[0]; -+ xe = data1[2] - (stixe ^ 65535); -+ xo = xe + 1; -+ } else { -+ xo = xs = stixs; -+ xe = stixe ? stixe : data1[2]; -+ } -+ if (stiys >= 32768) { -+ yo = ys = (stiys ^ 65535) + data1[1]; -+ ye = stiye ? stiye + data1[1] : data1[3]; -+ } else if (stiye >= 32768) { -+ ys = stiys ? data1[3] - stiys : data1[1]; -+ ye = data1[3] - (stiye ^ 65535); -+ yo = ye + 1; -+ } else { -+ yo = ys = stiys; -+ ye = stiye ? stiye : data1[3]; -+ } -+ xo = 32 - (xo & 31); -+ yo = stin - (yo % stin); -+ if (xs < data1[0]) -+ xs = data1[0]; -+ if (xe > data1[2]) -+ xe = data1[2]; -+ if (ys < data1[1]) -+ ys = data1[1]; -+ if (ye > data1[3]) -+ ye = data1[3]; -+ -+ for (y = ys; y <= ye; y++) { -+ sti = stipple[(y + yo) % stin]; -+ x = (xs + xo) & 31; -+ if (x) -+ sti = (sti << x) | (sti >> (32 - x)); -+ if (doblend) { -+ if ((p = data1[3] - data1[1]) != 0) -+ p = ((y - data1[1]) << 16) / p; -+ for (i = 0; i < 8; i++) -+ cols2[i + 8] = (cols1[i] * (65536 - p) + cols1[i + 8] * p) >> 16; -+ } -+ add = (xs & 1); -+ add ^= (add ^ y) & 1 ? 1 : 3; /* 2x2 ordered dithering */ -+ picp = (unsigned short *)(pic + xs * 2 + y * bytes); -+ for (x = xs; x <= xe; x++) { -+ if (!(sti & 0x80000000)) { -+ sti <<= 1; -+ picp++; -+ add ^= 3; -+ continue; -+ } -+ sti = (sti << 1) | 1; -+ if (doblend) { -+ if ((p = data1[2] - data1[0]) != 0) -+ p = ((x - data1[0]) << 16) / p; -+ for (i = 0; i < 4; i++) -+ cols2[i] = (cols2[i + 8] * (65536 - p) + cols2[i + 12] * p) >> 16; -+ a = cols2[3]; -+ } -+ r = cols2[0]; -+ g = cols2[1]; -+ b = cols2[2]; -+ if (a != 255) { -+ i = *picp; -+ r = ((i >> 8 & 0xf8) * (255 - a) + r * a) / 255; -+ g = ((i >> 3 & 0xfc) * (255 - a) + g * a) / 255; -+ b = ((i << 3 & 0xf8) * (255 - a) + b * a) / 255; -+ } -+ #define CLAMP(x) ((x) >= 256 ? 255 : (x)) -+ i = ((CLAMP(r + add*2+1) & 0xf8) << 8) | -+ ((CLAMP(g + add ) & 0xfc) << 3) | -+ ((CLAMP(b + add*2+1) ) >> 3); -+ *picp++ = i; -+ add ^= 3; -+ } -+ } -+ } -+} -+ -+static int splash_check_jpeg(unsigned char *jpeg, int width, int height, int depth) -+{ -+ int size, err; -+ unsigned char *mem; -+ -+ size = ((width + 15) & ~15) * ((height + 15) & ~15) * (depth >> 3); -+ mem = vmalloc(size); -+ if (!mem) { -+ printk(KERN_INFO "bootsplash: no memory for decoded picture.\n"); -+ return -1; -+ } -+ if (!decdata) -+ decdata = vmalloc(sizeof(*decdata)); -+ if ((err = jpeg_decode(jpeg, mem, ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) -+ printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d)\n",jpg_errors[err - 1], err); -+ vfree(mem); -+ return err ? -1 : 0; -+} -+ -+static void splash_free(struct vc_data *vc, struct fb_info *info) -+{ -+ if (!vc->vc_splash_data) -+ return; -+ if (info->silent_screen_base) -+ info->screen_base = info->silent_screen_base; -+ info->silent_screen_base = 0; -+ if (vc->vc_splash_data->splash_silentjpeg) -+ vfree(vc->vc_splash_data->splash_sboxes); -+ vfree(vc->vc_splash_data); -+ vc->vc_splash_data = 0; -+ info->splash_data = 0; -+} -+ -+static int splash_mkpenguin(struct splash_data *data, int pxo, int pyo, int pwi, int phe, int pr, int pg, int pb) -+{ -+ unsigned char *buf; -+ int i; -+ -+ if (pwi ==0 || phe == 0) -+ return 0; -+ buf = (unsigned char *)data + sizeof(*data); -+ pwi += pxo - 1; -+ phe += pyo - 1; -+ *buf++ = pxo; -+ *buf++ = pxo >> 8; -+ *buf++ = pyo; -+ *buf++ = pyo >> 8; -+ *buf++ = pwi; -+ *buf++ = pwi >> 8; -+ *buf++ = phe; -+ *buf++ = phe >> 8; -+ *buf++ = pr; -+ *buf++ = pg; -+ *buf++ = pb; -+ *buf++ = 0; -+ for (i = 0; i < 12; i++, buf++) -+ *buf = buf[-12]; -+ buf[-24] ^= 0xff; -+ buf[-23] ^= 0xff; -+ buf[-1] = 0xff; -+ return 2; -+} -+ -+static const int splash_offsets[3][16] = { -+ /* len, unit, size, state, fgcol, col, xo, yo, wi, he -+ boxcnt, ssize, sboxcnt, percent, overok, palcnt */ -+ /* V1 */ -+ { 20, -1, 16, -1, -1, -1, 8, 10, 12, 14, -+ -1, -1, -1, -1, -1, -1 }, -+ /* V2 */ -+ { 35, 8, 12, 9, 10, 11, 16, 18, 20, 22, -+ -1, -1, -1, -1, -1, -1 }, -+ /* V3 */ -+ { 38, 8, 12, 9, 10, 11, 16, 18, 20, 22, -+ 24, 28, 32, 34, 36, 37 }, -+}; -+ -+#define SPLASH_OFF_LEN offsets[0] -+#define SPLASH_OFF_UNIT offsets[1] -+#define SPLASH_OFF_SIZE offsets[2] -+#define SPLASH_OFF_STATE offsets[3] -+#define SPLASH_OFF_FGCOL offsets[4] -+#define SPLASH_OFF_COL offsets[5] -+#define SPLASH_OFF_XO offsets[6] -+#define SPLASH_OFF_YO offsets[7] -+#define SPLASH_OFF_WI offsets[8] -+#define SPLASH_OFF_HE offsets[9] -+#define SPLASH_OFF_BOXCNT offsets[10] -+#define SPLASH_OFF_SSIZE offsets[11] -+#define SPLASH_OFF_SBOXCNT offsets[12] -+#define SPLASH_OFF_PERCENT offsets[13] -+#define SPLASH_OFF_OVEROK offsets[14] -+#define SPLASH_OFF_PALCNT offsets[15] -+ -+static inline int splash_getb(unsigned char *pos, int off) -+{ -+ return off == -1 ? 0 : pos[off]; -+} -+ -+static inline int splash_gets(unsigned char *pos, int off) -+{ -+ return off == -1 ? 0 : pos[off] | pos[off + 1] << 8; -+} -+ -+static inline int splash_geti(unsigned char *pos, int off) -+{ -+ return off == -1 ? 0 : -+ pos[off] | pos[off + 1] << 8 | pos[off + 2] << 16 | pos[off + 3] << 24; -+} -+ -+static int splash_getraw(unsigned char *start, unsigned char *end, int *update) -+{ -+ unsigned char *ndata; -+ int version; -+ int splash_size; -+ int unit; -+ int width, height; -+ int silentsize; -+ int boxcnt; -+ int sboxcnt; -+ int palcnt; -+ int i, len; -+ const int *offsets; -+ struct vc_data *vc; -+ struct fb_info *info; -+ struct splash_data *sd; -+ int oldpercent, oldsilent; -+ -+ if (update) -+ *update = -1; -+ -+ if (!update || start[7] < '2' || start[7] > '3' || splash_geti(start, 12) != (int)0xffffffff) -+ printk(KERN_INFO "bootsplash %s: looking for picture...\n", SPLASH_VERSION); -+ -+ for (ndata = start; ndata < end; ndata++) { -+ if (ndata[0] != 'B' || ndata[1] != 'O' || ndata[2] != 'O' || ndata[3] != 'T') -+ continue; -+ if (ndata[4] != 'S' || ndata[5] != 'P' || ndata[6] != 'L' || ndata[7] < '1' || ndata[7] > '3') -+ continue; -+ version = ndata[7] - '0'; -+ offsets = splash_offsets[version - 1]; -+ len = SPLASH_OFF_LEN; -+ unit = splash_getb(ndata, SPLASH_OFF_UNIT); -+ if (unit >= MAX_NR_CONSOLES) -+ continue; -+ if (unit) { -+ vc_allocate(unit); -+ } -+ vc = vc_cons[unit].d; -+ info = registered_fb[(int)con2fb_map[unit]]; -+ width = info->var.xres; -+ height = info->var.yres; -+ splash_size = splash_geti(ndata, SPLASH_OFF_SIZE); -+ if (splash_size == (int)0xffffffff && version > 1) { -+ if ((sd = vc->vc_splash_data) != 0) { -+ int up = 0; -+ i = splash_getb(ndata, SPLASH_OFF_STATE); -+ if (i != 255) { -+ sd->splash_state = i; -+ up = -1; -+ } -+ i = splash_getb(ndata, SPLASH_OFF_FGCOL); -+ if (i != 255) { -+ sd->splash_fg_color = i; -+ up = -1; -+ } -+ i = splash_getb(ndata, SPLASH_OFF_COL); -+ if (i != 255) { -+ sd->splash_color = i; -+ up = -1; -+ } -+ boxcnt = sboxcnt = 0; -+ if (ndata + len <= end) { -+ boxcnt = splash_gets(ndata, SPLASH_OFF_BOXCNT); -+ sboxcnt = splash_gets(ndata, SPLASH_OFF_SBOXCNT); -+ } -+ if (boxcnt) { -+ i = splash_gets(ndata, len); -+ if (boxcnt + i <= sd->splash_boxcount && ndata + len + 2 + boxcnt * 12 <= end) { -+ -+ if (splash_geti(ndata, len + 2) != 0x7ffd7fff || !memcmp(ndata + len + 2, sd->splash_boxes + i * 12, 8)) { -+ -+ memcpy(sd->splash_boxes + i * 12, ndata + len + 2, boxcnt * 12); -+ up |= 1; -+ } -+ } -+ len += boxcnt * 12 + 2; -+ } -+ if (sboxcnt) { -+ i = splash_gets(ndata, len); -+ if (sboxcnt + i <= sd->splash_sboxcount && ndata + len + 2 + sboxcnt * 12 <= end) { -+ if (splash_geti(ndata, len + 2) != 0x7ffd7fff || !memcmp(ndata + len + 2, sd->splash_sboxes + i * 12, 8)) { -+ memcpy(sd->splash_sboxes + i * 12, ndata + len + 2, sboxcnt * 12); -+ up |= 2; -+ } -+ } -+ } -+ if (update) -+ *update = up; -+ } -+ return unit; -+ } -+ if (splash_size == 0) { -+ printk(KERN_INFO "bootsplash: ...found, freeing memory.\n"); -+ if (vc->vc_splash_data) -+ splash_free(vc, info); -+ return unit; -+ } -+ boxcnt = splash_gets(ndata, SPLASH_OFF_BOXCNT); -+ palcnt = 3 * splash_getb(ndata, SPLASH_OFF_PALCNT); -+ if (ndata + len + splash_size > end) { -+ printk(KERN_ERR "bootsplash: ...found, but truncated!\n"); -+ return -1; -+ } -+ if (!jpeg_check_size(ndata + len + boxcnt * 12 + palcnt, width, height)) { -+ ndata += len + splash_size - 1; -+ continue; -+ } -+ if (splash_check_jpeg(ndata + len + boxcnt * 12 + palcnt, width, height, info->var.bits_per_pixel)) -+ return -1; -+ silentsize = splash_geti(ndata, SPLASH_OFF_SSIZE); -+ if (silentsize) -+ printk(KERN_INFO "bootsplash: silentjpeg size %d bytes\n", silentsize); -+ if (silentsize >= splash_size) { -+ printk(KERN_ERR "bootsplash: bigger than splashsize!\n"); -+ return -1; -+ } -+ splash_size -= silentsize; -+ if (!splash_usesilent) -+ silentsize = 0; -+ else if (height * 2 * info->fix.line_length > info->fix.smem_len) { -+ printk(KERN_WARNING "bootsplash: does not fit into framebuffer.\n"); -+ silentsize = 0; -+ } -+ sboxcnt = splash_gets(ndata, SPLASH_OFF_SBOXCNT); -+ if (silentsize) { -+ unsigned char *simage = ndata + len + splash_size + 12 * sboxcnt; -+ if (!jpeg_check_size(simage, width, height) || -+ splash_check_jpeg(simage, width, height, info->var.bits_per_pixel)) { -+ printk(KERN_WARNING "bootsplash: error in silent jpeg.\n"); -+ silentsize = 0; -+ } -+ } -+ oldpercent = -1; -+ oldsilent = -1; -+ if (vc->vc_splash_data) { -+ oldpercent = vc->vc_splash_data->splash_percent; -+ oldsilent = vc->vc_splash_data->splash_dosilent; -+ splash_free(vc, info); -+ } -+ vc->vc_splash_data = sd = vmalloc(sizeof(*sd) + splash_size + (version < 3 ? 2 * 12 : 0)); -+ if (!sd) -+ break; -+ sd->splash_silentjpeg = 0; -+ sd->splash_sboxes = 0; -+ sd->splash_sboxcount = 0; -+ if (silentsize) { -+ sd->splash_silentjpeg = vmalloc(silentsize); -+ if (sd->splash_silentjpeg) { -+ memcpy(sd->splash_silentjpeg, ndata + len + splash_size, silentsize); -+ sd->splash_sboxes = vc->vc_splash_data->splash_silentjpeg; -+ sd->splash_silentjpeg += 12 * sboxcnt; -+ sd->splash_sboxcount = sboxcnt; -+ } -+ } -+ sd->splash_state = splash_getb(ndata, SPLASH_OFF_STATE); -+ sd->splash_fg_color = splash_getb(ndata, SPLASH_OFF_FGCOL); -+ sd->splash_color = splash_getb(ndata, SPLASH_OFF_COL); -+ sd->splash_overpaintok = splash_getb(ndata, SPLASH_OFF_OVEROK); -+ sd->splash_text_xo = splash_gets(ndata, SPLASH_OFF_XO); -+ sd->splash_text_yo = splash_gets(ndata, SPLASH_OFF_YO); -+ sd->splash_text_wi = splash_gets(ndata, SPLASH_OFF_WI); -+ sd->splash_text_he = splash_gets(ndata, SPLASH_OFF_HE); -+ sd->splash_percent = oldpercent == -1 ? splash_gets(ndata, SPLASH_OFF_PERCENT) : oldpercent; -+ if (version == 1) { -+ sd->splash_text_xo *= 8; -+ sd->splash_text_wi *= 8; -+ sd->splash_text_yo *= 16; -+ sd->splash_text_he *= 16; -+ sd->splash_color = (splash_default >> 8) & 0x0f; -+ sd->splash_fg_color = (splash_default >> 4) & 0x0f; -+ sd->splash_state = splash_default & 1; -+ } -+ if (sd->splash_text_xo + sd->splash_text_wi > width || sd->splash_text_yo + sd->splash_text_he > height) { -+ splash_free(vc, info); -+ printk(KERN_ERR "bootsplash: found, but has oversized text area!\n"); -+ return -1; -+ } -+ if (!vc_cons[unit].d || info->fbops->fb_imageblit != cfb_imageblit) { -+ splash_free(vc, info); -+ printk(KERN_ERR "bootsplash: found, but framebuffer can't handle it!\n"); -+ return -1; -+ } -+ printk(KERN_INFO "bootsplash: ...found (%dx%d, %d bytes, v%d).\n", width, height, splash_size, version); -+ if (version == 1) { -+ printk(KERN_WARNING "bootsplash: Using deprecated v1 header. Updating your splash utility recommended.\n"); -+ printk(KERN_INFO "bootsplash: Find the latest version at http://www.bootsplash.org/\n"); -+ } -+ -+ /* fake penguin box for older formats */ -+ if (version == 1) -+ boxcnt = splash_mkpenguin(sd, sd->splash_text_xo + 10, sd->splash_text_yo + 10, sd->splash_text_wi - 20, sd->splash_text_he - 20, 0xf0, 0xf0, 0xf0); -+ else if (version == 2) -+ boxcnt = splash_mkpenguin(sd, splash_gets(ndata, 24), splash_gets(ndata, 26), splash_gets(ndata, 28), splash_gets(ndata, 30), splash_getb(ndata, 32), splash_getb(ndata, 33), splash_getb(ndata, 34)); -+ -+ memcpy((char *)sd + sizeof(*sd) + (version < 3 ? boxcnt * 12 : 0), ndata + len, splash_size); -+ sd->splash_boxcount = boxcnt; -+ sd->splash_boxes = (unsigned char *)sd + sizeof(*sd); -+ sd->splash_palette = sd->splash_boxes + boxcnt * 12; -+ sd->splash_jpeg = sd->splash_palette + palcnt; -+ sd->splash_palcnt = palcnt / 3; -+ sd->splash_dosilent = sd->splash_silentjpeg != 0 ? (oldsilent == -1 ? 1 : oldsilent) : 0; -+ return unit; -+ } -+ printk(KERN_ERR "bootsplash: ...no good signature found.\n"); -+ return -1; -+} -+ -+int splash_verbose(void) -+{ -+ struct vc_data *vc; -+ struct fb_info *info; -+ -+ if (!splash_usesilent) -+ return 0; -+ -+ vc = vc_cons[0].d; -+ -+ if (!vc || !vc->vc_splash_data || !vc->vc_splash_data->splash_state) -+ return 0; -+ if (fg_console != vc->vc_num) -+ return 0; -+ if (!vc->vc_splash_data->splash_silentjpeg || !vc->vc_splash_data->splash_dosilent) -+ return 0; -+ vc->vc_splash_data->splash_dosilent = 0; -+ info = registered_fb[(int)con2fb_map[0]]; -+ if (!info->silent_screen_base) -+ return 0; -+ splashcopy(info->silent_screen_base, info->screen_base, info->var.yres, info->var.xres, info->fix.line_length, info->fix.line_length); -+ info->screen_base = info->silent_screen_base; -+ info->silent_screen_base = 0; -+ return 1; -+} -+ -+static void splash_off(struct fb_info *info) -+{ -+ if (info->silent_screen_base) -+ info->screen_base = info->silent_screen_base; -+ info->silent_screen_base = 0; -+ info->splash_data = 0; -+ if (info->splash_pic) -+ vfree(info->splash_pic); -+ info->splash_pic = 0; -+ info->splash_pic_size = 0; -+} -+ -+int splash_prepare(struct vc_data *vc, struct fb_info *info) -+{ -+ int err; -+ int width, height, depth, size, sbytes; -+ -+ if (!vc->vc_splash_data || !vc->vc_splash_data->splash_state) { -+ if (decdata) -+ vfree(decdata); -+ decdata = 0; -+ splash_off(info); -+ return -1; -+ } -+ -+ width = info->var.xres; -+ height = info->var.yres; -+ depth = info->var.bits_per_pixel; -+ if (depth != 16) { /* Other targets might need fixing */ -+ splash_off(info); -+ return -2; -+ } -+ -+ sbytes = ((width + 15) & ~15) * (depth >> 3); -+ size = sbytes * ((height + 15) & ~15); -+ if (size != info->splash_pic_size) -+ splash_off(info); -+ if (!info->splash_pic) -+ info->splash_pic = vmalloc(size); -+ -+ if (!info->splash_pic) { -+ printk(KERN_INFO "bootsplash: not enough memory.\n"); -+ splash_off(info); -+ return -3; -+ } -+ -+ if (!decdata) -+ decdata = vmalloc(sizeof(*decdata)); -+ -+ if (vc->vc_splash_data->splash_silentjpeg && vc->vc_splash_data->splash_dosilent) { -+ /* fill area after framebuffer with other jpeg */ -+ if ((err = jpeg_decode(vc->vc_splash_data->splash_silentjpeg, info->splash_pic, -+ ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) { -+ printk(KERN_INFO "bootsplash: error while decompressing silent picture: %s (%d)\n", jpg_errors[err - 1], err); -+ if (info->silent_screen_base) -+ info->screen_base = info->silent_screen_base; -+ vc->vc_splash_data->splash_dosilent = 0; -+ } else { -+ if (vc->vc_splash_data->splash_sboxcount) -+ boxit(info->splash_pic, sbytes, vc->vc_splash_data->splash_sboxes, -+ vc->vc_splash_data->splash_sboxcount, vc->vc_splash_data->splash_percent, 0); -+ -+ if (!info->silent_screen_base) -+ info->silent_screen_base = info->screen_base; -+ splashcopy(info->silent_screen_base, info->splash_pic, info->var.yres, info->var.xres, info->fix.line_length, sbytes); -+ info->screen_base = info->silent_screen_base + info->fix.line_length * info->var.yres; -+ } -+ } else if (info->silent_screen_base) -+ info->screen_base = info->silent_screen_base; -+ -+ if ((err = jpeg_decode(vc->vc_splash_data->splash_jpeg, info->splash_pic, -+ ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) { -+ printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d) .\n", jpg_errors[err - 1], err); -+ splash_off(info); -+ return -4; -+ } -+ info->splash_pic_size = size; -+ info->splash_bytes = sbytes; -+ if (vc->vc_splash_data->splash_boxcount) -+ boxit(info->splash_pic, sbytes, vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount, vc->vc_splash_data->splash_percent, 0); -+ if (vc->vc_splash_data->splash_state) -+ info->splash_data = vc->vc_splash_data; -+ else -+ splash_off(info); -+ return 0; -+} -+ -+ -+#ifdef CONFIG_PROC_FS -+ -+#include -+ -+static int splash_read_proc(char *buffer, char **start, off_t offset, int size, -+ int *eof, void *data); -+static int splash_write_proc(struct file *file, const char *buffer, -+ unsigned long count, void *data); -+static int splash_status(struct vc_data *vc); -+static int splash_recolor(struct vc_data *vc); -+static int splash_proc_register(void); -+ -+static struct proc_dir_entry *proc_splash; -+ -+static int splash_recolor(struct vc_data *vc) -+{ -+ if (!vc->vc_splash_data) -+ return -1; -+ if (!vc->vc_splash_data->splash_state) -+ return 0; -+ con_remap_def_color(vc, vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color); -+ if (fg_console == vc->vc_num) { -+ update_region(vc, -+ vc->vc_origin + vc->vc_size_row * vc->vc_top, -+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2); -+ } -+ return 0; -+} -+ -+static int splash_status(struct vc_data *vc) -+{ -+ struct fb_info *info; -+ printk(KERN_INFO "bootsplash: status on console %d changed to %s\n", vc->vc_num, vc->vc_splash_data && vc->vc_splash_data->splash_state ? "on" : "off"); -+ -+ info = registered_fb[(int) con2fb_map[vc->vc_num]]; -+ if (fg_console == vc->vc_num) -+ splash_prepare(vc, info); -+ if (vc->vc_splash_data && vc->vc_splash_data->splash_state) { -+ con_remap_def_color(vc, vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color); -+ /* vc_resize also calls con_switch which resets yscroll */ -+ vc_resize(vc, vc->vc_splash_data->splash_text_wi / vc->vc_font.width, vc->vc_splash_data->splash_text_he / vc->vc_font.height); -+ if (fg_console == vc->vc_num) { -+ update_region(vc, -+ vc->vc_origin + vc->vc_size_row * vc->vc_top, -+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2); -+ splash_clear_margins(vc->vc_splash_data, vc, info, 0); -+ } -+ } else { -+ /* Switch bootsplash off */ -+ con_remap_def_color(vc, 0x07); -+ vc_resize(vc, info->var.xres / vc->vc_font.width, info->var.yres / vc->vc_font.height); -+ } -+ return 0; -+} -+ -+static int splash_read_proc(char *buffer, char **start, off_t offset, int size, -+ int *eof, void *data) -+{ -+ int len = 0; -+ off_t begin = 0; -+ struct vc_data *vc = vc_cons[0].d; -+ struct fb_info *info = registered_fb[(int)con2fb_map[0]]; -+ int color = vc->vc_splash_data ? vc->vc_splash_data->splash_color << 4 | -+ vc->vc_splash_data->splash_fg_color : splash_default >> 4; -+ int status = vc->vc_splash_data ? vc->vc_splash_data->splash_state & 1 : 0; -+ len += sprintf(buffer + len, "Splash screen v%s (0x%02x, %dx%d%s): %s\n", -+ SPLASH_VERSION, color, info->var.xres, info->var.yres, -+ (vc->vc_splash_data ? vc->vc_splash_data->splash_dosilent : 0)? ", silent" : "", -+ status ? "on" : "off"); -+ if (offset >= begin + len) -+ return 0; -+ -+ *start = buffer + (begin - offset); -+ -+ return (size < begin + len - offset ? size : begin + len - offset); -+} -+ -+void splash_set_percent(struct vc_data *vc, int pe) -+{ -+ struct fb_info *info; -+ struct fbcon_ops *ops; -+ int oldpe; -+ -+ if (pe < 0) -+ pe = 0; -+ if (pe > 65535) -+ pe = 65535; -+ pe += pe > 32767;; -+ -+ if (!vc->vc_splash_data || vc->vc_splash_data->splash_percent == pe) -+ return; -+ -+ oldpe = vc->vc_splash_data->splash_percent; -+ vc->vc_splash_data->splash_percent = pe; -+ if (fg_console != vc->vc_num || !vc->vc_splash_data->splash_state) { -+ return; -+ } -+ info = registered_fb[(int) con2fb_map[vc->vc_num]]; -+ ops = info->fbcon_par; -+ if (ops->blank_state) -+ return; -+ if (!vc->vc_splash_data->splash_overpaintok || pe == 65536 || pe < oldpe) { -+ if (splash_hasinter(vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount)) -+ splash_status(vc); -+ else -+ splash_prepare(vc, info); -+ } else { -+ if (vc->vc_splash_data->splash_silentjpeg && vc->vc_splash_data->splash_dosilent && info->silent_screen_base) -+ boxit(info->silent_screen_base, info->fix.line_length, vc->vc_splash_data->splash_sboxes, vc->vc_splash_data->splash_sboxcount, vc->vc_splash_data->splash_percent, 1); -+ boxit(info->screen_base, info->fix.line_length, vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount, vc->vc_splash_data->splash_percent, 1); -+ } -+} -+ -+static int splash_write_proc(struct file *file, const char *buffer, -+ unsigned long count, void *data) -+{ -+ int new, unit; -+ struct vc_data *vc; -+ -+ if (!buffer || !splash_default) -+ return count; -+ -+ console_lock(); -+ unit = 0; -+ if (buffer[0] == '@' && buffer[1] >= '0' && buffer[1] <= '9') { -+ unit = buffer[1] - '0'; -+ buffer += 2; -+ if (*buffer >= '0' && *buffer <= '9') { -+ unit = unit * 10 + *buffer - '0'; -+ buffer++; -+ } -+ if (*buffer == ' ') -+ buffer++; -+ if (unit >= MAX_NR_CONSOLES || !vc_cons[unit].d) { -+ console_unlock(); -+ return count; -+ } -+ } -+ vc = vc_cons[unit].d; -+ if (!strncmp(buffer, "redraw", 6)) { -+ splash_status(vc); -+ console_unlock(); -+ return count; -+ } -+ if (!strncmp(buffer, "show", 4) || !strncmp(buffer, "hide", 4)) { -+ int pe; -+ -+ if (buffer[4] == ' ' && buffer[5] == 'p') -+ pe = 0; -+ else if (buffer[4] == '\n') -+ pe = 65535; -+ else -+ pe = simple_strtoul(buffer + 5, NULL, 0); -+ if (pe < 0) -+ pe = 0; -+ if (pe > 65535) -+ pe = 65535; -+ if (*buffer == 'h') -+ pe = 65535 - pe; -+ splash_set_percent(vc, pe); -+ console_unlock(); -+ return count; -+ } -+ if (!strncmp(buffer,"silent\n",7) || !strncmp(buffer,"verbose\n",8)) { -+ if (vc->vc_splash_data && vc->vc_splash_data->splash_silentjpeg) { -+ if (vc->vc_splash_data->splash_dosilent != (buffer[0] == 's')) { -+ vc->vc_splash_data->splash_dosilent = buffer[0] == 's'; -+ splash_status(vc); -+ } -+ } -+ console_unlock(); -+ return count; -+ } -+ if (!strncmp(buffer,"freesilent\n",11)) { -+ if (vc->vc_splash_data && vc->vc_splash_data->splash_silentjpeg) { -+ printk(KERN_INFO "bootsplash: freeing silent jpeg\n"); -+ vc->vc_splash_data->splash_silentjpeg = 0; -+ vfree(vc->vc_splash_data->splash_sboxes); -+ vc->vc_splash_data->splash_sboxes = 0; -+ vc->vc_splash_data->splash_sboxcount = 0; -+ if (vc->vc_splash_data->splash_dosilent) -+ splash_status(vc); -+ vc->vc_splash_data->splash_dosilent = 0; -+ } -+ console_unlock(); -+ return count; -+ } -+ -+ if (!strncmp(buffer, "BOOTSPL", 7)) { -+ int up = -1; -+ unit = splash_getraw((unsigned char *)buffer, (unsigned char *)buffer + count, &up); -+ if (unit >= 0) { -+ vc = vc_cons[unit].d; -+ if (up == -1) -+ splash_status(vc); -+ else { -+ struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]]; -+ struct fbcon_ops *ops = info->fbcon_par; -+ if (ops->blank_state) -+ up = 0; -+ if ((up & 2) != 0 && vc->vc_splash_data->splash_silentjpeg && vc->vc_splash_data->splash_dosilent && info->silent_screen_base) -+ boxit(info->silent_screen_base, info->fix.line_length, vc->vc_splash_data->splash_sboxes, vc->vc_splash_data->splash_sboxcount, vc->vc_splash_data->splash_percent, 1); -+ if ((up & 1) != 0) -+ boxit(info->screen_base, info->fix.line_length, vc->vc_splash_data->splash_boxes, vc->vc_splash_data->splash_boxcount, vc->vc_splash_data->splash_percent, 1); -+ } -+ } -+ console_unlock(); -+ return count; -+ } -+ if (!vc->vc_splash_data) { -+ console_unlock(); -+ return count; -+ } -+ if (buffer[0] == 't') { -+ vc->vc_splash_data->splash_state ^= 1; -+ splash_status(vc); -+ console_unlock(); -+ return count; -+ } -+ new = simple_strtoul(buffer, NULL, 0); -+ if (new > 1) { -+ /* expert user */ -+ vc->vc_splash_data->splash_color = new >> 8 & 0xff; -+ vc->vc_splash_data->splash_fg_color = new >> 4 & 0x0f; -+ } -+ if ((new & 1) == vc->vc_splash_data->splash_state) -+ splash_recolor(vc); -+ else { -+ vc->vc_splash_data->splash_state = new & 1; -+ splash_status(vc); -+ } -+ console_unlock(); -+ return count; -+} -+ -+static int splash_proc_register(void) -+{ -+ if ((proc_splash = create_proc_entry("splash", 0, 0))) { -+ proc_splash->read_proc = splash_read_proc; -+ proc_splash->write_proc = splash_write_proc; -+ return 0; -+ } -+ return 1; -+} -+ -+# if 0 -+static int splash_proc_unregister(void) -+{ -+ if (proc_splash) -+ remove_proc_entry("splash", 0); -+ return 0; -+} -+# endif -+#endif /* CONFIG_PROC_FS */ -+ -+void splash_init(void) -+{ -+ struct fb_info *info; -+ struct vc_data *vc; -+ int isramfs = 1; -+ int fd; -+ int len; -+ int max_len = 1024*1024*2; -+ char *mem; -+ -+ if (splash_registered) -+ return; -+ vc = vc_cons[0].d; -+ info = registered_fb[0]; -+ if (!vc || !info || info->var.bits_per_pixel != 16) -+ return; -+#ifdef CONFIG_PROC_FS -+ splash_proc_register(); -+#endif -+ splash_registered = 1; -+ if (vc->vc_splash_data) -+ return; -+ if ((fd = sys_open("/bootsplash", O_RDONLY, 0)) < 0) { -+ isramfs = 0; -+ fd = sys_open("/initrd.image", O_RDONLY, 0); -+ } -+ if (fd < 0) -+ return; -+ if ((len = (int)sys_lseek(fd, (off_t)0, 2)) <= 0) { -+ sys_close(fd); -+ return; -+ } -+ /* Don't look for more than the last 2MB */ -+ if (len > max_len) { -+ printk( KERN_INFO "bootsplash: scanning last %dMB of initrd for signature\n", -+ max_len>>20); -+ sys_lseek(fd, (off_t)(len - max_len), 0); -+ len = max_len; -+ } else { -+ sys_lseek(fd, (off_t)0, 0); -+ } -+ -+ mem = vmalloc(len); -+ if (mem) { -+ console_lock(); -+ if ((int)sys_read(fd, mem, len) == len && splash_getraw((unsigned char *)mem, (unsigned char *)mem + len, (int *)0) == 0 && vc->vc_splash_data) -+ vc->vc_splash_data->splash_state = splash_default & 1; -+ console_unlock(); -+ vfree(mem); -+ } -+ sys_close(fd); -+ if (isramfs) -+ sys_unlink("/bootsplash"); -+ return; -+} -+ ---- /dev/null -+++ b/drivers/video/bootsplash/bootsplash.h -@@ -0,0 +1,41 @@ -+/* -+ * linux/drivers/video/bootsplash/bootsplash.h - splash screen definition. -+ * -+ * (w) 2001-2003 by Volker Poplawski, -+ * Stefan Reinauer, -+ * -+ * -+ * idea and SuSE screen work by Ken Wimer, -+ */ -+ -+#ifndef __BOOTSPLASH_H -+#define __BOOTSPLASH_H -+ -+struct fb_info; -+ -+/* splash.c */ -+extern int splash_prepare(struct vc_data *, struct fb_info *); -+extern void splash_init(void); -+ -+/* splash_render.c */ -+extern void splash_putcs(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ const unsigned short *s, int count, int ypos, int xpos); -+extern void splash_putc(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ int c, int ypos, int xpos); -+extern void splashcopy(u8 *dst, u8 *src, int height, int width, int dstbytes, int srcbytes); -+extern void splash_clear(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+ int sx, int height, int width); -+extern void splash_bmove(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+ int sx, int dy, int dx, int height, int width); -+extern void splash_clear_margins(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ int bottom_only); -+extern int splash_cursor(struct splash_data *sd, struct fb_info *info, struct fb_cursor *cursor); -+extern void splash_bmove_redraw(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ int y, int sx, int dx, int width); -+extern void splash_blank(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ int blank); -+ -+/* vt.c */ -+extern void con_remap_def_color(struct vc_data *vc, int new_color); -+ -+#endif ---- /dev/null -+++ b/drivers/video/bootsplash/decode-jpg.c -@@ -0,0 +1,957 @@ -+/* -+ * linux/drivers/video/bootsplash/decode-jpg.c - a tiny jpeg decoder. -+ * -+ * (w) August 2001 by Michael Schroeder, -+ * -+ */ -+ -+#include -+#include -+ -+#include "decode-jpg.h" -+ -+#define ISHIFT 11 -+ -+#define IFIX(a) ((int)((a) * (1 << ISHIFT) + .5)) -+#define IMULT(a, b) (((a) * (b)) >> ISHIFT) -+#define ITOINT(a) ((a) >> ISHIFT) -+ -+#ifndef __P -+# define __P(x) x -+#endif -+ -+/* special markers */ -+#define M_BADHUFF -1 -+#define M_EOF 0x80 -+ -+struct in { -+ unsigned char *p; -+ unsigned int bits; -+ int left; -+ int marker; -+ -+ int (*func) __P((void *)); -+ void *data; -+}; -+ -+/*********************************/ -+struct dec_hufftbl; -+struct enc_hufftbl; -+ -+union hufftblp { -+ struct dec_hufftbl *dhuff; -+ struct enc_hufftbl *ehuff; -+}; -+ -+struct scan { -+ int dc; /* old dc value */ -+ -+ union hufftblp hudc; -+ union hufftblp huac; -+ int next; /* when to switch to next scan */ -+ -+ int cid; /* component id */ -+ int hv; /* horiz/vert, copied from comp */ -+ int tq; /* quant tbl, copied from comp */ -+}; -+ -+/*********************************/ -+ -+#define DECBITS 10 /* seems to be the optimum */ -+ -+struct dec_hufftbl { -+ int maxcode[17]; -+ int valptr[16]; -+ unsigned char vals[256]; -+ unsigned int llvals[1 << DECBITS]; -+}; -+ -+static void decode_mcus __P((struct in *, int *, int, struct scan *, int *)); -+static int dec_readmarker __P((struct in *)); -+static void dec_makehuff __P((struct dec_hufftbl *, int *, unsigned char *)); -+ -+static void setinput __P((struct in *, unsigned char *)); -+/*********************************/ -+ -+#undef PREC -+#define PREC int -+ -+static void idctqtab __P((unsigned char *, PREC *)); -+static void idct __P((int *, int *, PREC *, PREC, int)); -+static void scaleidctqtab __P((PREC *, PREC)); -+ -+/*********************************/ -+ -+static void initcol __P((PREC[][64])); -+ -+static void col221111 __P((int *, unsigned char *, int)); -+static void col221111_16 __P((int *, unsigned char *, int)); -+ -+/*********************************/ -+ -+#define M_SOI 0xd8 -+#define M_APP0 0xe0 -+#define M_DQT 0xdb -+#define M_SOF0 0xc0 -+#define M_DHT 0xc4 -+#define M_DRI 0xdd -+#define M_SOS 0xda -+#define M_RST0 0xd0 -+#define M_EOI 0xd9 -+#define M_COM 0xfe -+ -+static unsigned char *datap; -+ -+static int getbyte(void) -+{ -+ return *datap++; -+} -+ -+static int getword(void) -+{ -+ int c1, c2; -+ c1 = *datap++; -+ c2 = *datap++; -+ return c1 << 8 | c2; -+} -+ -+struct comp { -+ int cid; -+ int hv; -+ int tq; -+}; -+ -+#define MAXCOMP 4 -+struct jpginfo { -+ int nc; /* number of components */ -+ int ns; /* number of scans */ -+ int dri; /* restart interval */ -+ int nm; /* mcus til next marker */ -+ int rm; /* next restart marker */ -+}; -+ -+static struct jpginfo info; -+static struct comp comps[MAXCOMP]; -+ -+static struct scan dscans[MAXCOMP]; -+ -+static unsigned char quant[4][64]; -+ -+static struct dec_hufftbl dhuff[4]; -+ -+#define dec_huffdc (dhuff + 0) -+#define dec_huffac (dhuff + 2) -+ -+static struct in in; -+ -+static int readtables(int till) -+{ -+ int m, l, i, j, lq, pq, tq; -+ int tc, th, tt; -+ -+ for (;;) { -+ if (getbyte() != 0xff) -+ return -1; -+ if ((m = getbyte()) == till) -+ break; -+ -+ switch (m) { -+ case 0xc2: -+ return 0; -+ -+ case M_DQT: -+ lq = getword(); -+ while (lq > 2) { -+ pq = getbyte(); -+ tq = pq & 15; -+ if (tq > 3) -+ return -1; -+ pq >>= 4; -+ if (pq != 0) -+ return -1; -+ for (i = 0; i < 64; i++) -+ quant[tq][i] = getbyte(); -+ lq -= 64 + 1; -+ } -+ break; -+ -+ case M_DHT: -+ l = getword(); -+ while (l > 2) { -+ int hufflen[16], k; -+ unsigned char huffvals[256]; -+ -+ tc = getbyte(); -+ th = tc & 15; -+ tc >>= 4; -+ tt = tc * 2 + th; -+ if (tc > 1 || th > 1) -+ return -1; -+ for (i = 0; i < 16; i++) -+ hufflen[i] = getbyte(); -+ l -= 1 + 16; -+ k = 0; -+ for (i = 0; i < 16; i++) { -+ for (j = 0; j < hufflen[i]; j++) -+ huffvals[k++] = getbyte(); -+ l -= hufflen[i]; -+ } -+ dec_makehuff(dhuff + tt, hufflen, -+ huffvals); -+ } -+ break; -+ -+ case M_DRI: -+ l = getword(); -+ info.dri = getword(); -+ break; -+ -+ default: -+ l = getword(); -+ while (l-- > 2) -+ getbyte(); -+ break; -+ } -+ } -+ return 0; -+} -+ -+static void dec_initscans(void) -+{ -+ int i; -+ -+ info.nm = info.dri + 1; -+ info.rm = M_RST0; -+ for (i = 0; i < info.ns; i++) -+ dscans[i].dc = 0; -+} -+ -+static int dec_checkmarker(void) -+{ -+ int i; -+ -+ if (dec_readmarker(&in) != info.rm) -+ return -1; -+ info.nm = info.dri; -+ info.rm = (info.rm + 1) & ~0x08; -+ for (i = 0; i < info.ns; i++) -+ dscans[i].dc = 0; -+ return 0; -+} -+ -+int jpeg_check_size(unsigned char *buf, int width, int height) -+{ -+ datap = buf; -+ getbyte(); -+ getbyte(); -+ readtables(M_SOF0); -+ getword(); -+ getbyte(); -+ if (height != getword() || width != getword()) -+ return 0; -+ return 1; -+} -+ -+int jpeg_decode(buf, pic, width, height, depth, decdata) -+unsigned char *buf, *pic; -+int width, height, depth; -+struct jpeg_decdata *decdata; -+{ -+ int i, j, m, tac, tdc; -+ int mcusx, mcusy, mx, my; -+ int max[6]; -+ -+ if (!decdata || !buf || !pic) -+ return -1; -+ datap = buf; -+ if (getbyte() != 0xff) -+ return ERR_NO_SOI; -+ if (getbyte() != M_SOI) -+ return ERR_NO_SOI; -+ if (readtables(M_SOF0)) -+ return ERR_BAD_TABLES; -+ getword(); -+ i = getbyte(); -+ if (i != 8) -+ return ERR_NOT_8BIT; -+ if (((getword() + 15) & ~15) != height) -+ return ERR_HEIGHT_MISMATCH; -+ if (((getword() + 15) & ~15) != width) -+ return ERR_WIDTH_MISMATCH; -+ if ((height & 15) || (width & 15)) -+ return ERR_BAD_WIDTH_OR_HEIGHT; -+ info.nc = getbyte(); -+ if (info.nc > MAXCOMP) -+ return ERR_TOO_MANY_COMPPS; -+ for (i = 0; i < info.nc; i++) { -+ int h, v; -+ comps[i].cid = getbyte(); -+ comps[i].hv = getbyte(); -+ v = comps[i].hv & 15; -+ h = comps[i].hv >> 4; -+ comps[i].tq = getbyte(); -+ if (h > 3 || v > 3) -+ return ERR_ILLEGAL_HV; -+ if (comps[i].tq > 3) -+ return ERR_QUANT_TABLE_SELECTOR; -+ } -+ if (readtables(M_SOS)) -+ return ERR_BAD_TABLES; -+ getword(); -+ info.ns = getbyte(); -+ if (info.ns != 3) -+ return ERR_NOT_YCBCR_221111; -+ for (i = 0; i < 3; i++) { -+ dscans[i].cid = getbyte(); -+ tdc = getbyte(); -+ tac = tdc & 15; -+ tdc >>= 4; -+ if (tdc > 1 || tac > 1) -+ return ERR_QUANT_TABLE_SELECTOR; -+ for (j = 0; j < info.nc; j++) -+ if (comps[j].cid == dscans[i].cid) -+ break; -+ if (j == info.nc) -+ return ERR_UNKNOWN_CID_IN_SCAN; -+ dscans[i].hv = comps[j].hv; -+ dscans[i].tq = comps[j].tq; -+ dscans[i].hudc.dhuff = dec_huffdc + tdc; -+ dscans[i].huac.dhuff = dec_huffac + tac; -+ } -+ -+ i = getbyte(); -+ j = getbyte(); -+ m = getbyte(); -+ -+ if (i != 0 || j != 63 || m != 0) -+ return ERR_NOT_SEQUENTIAL_DCT; -+ -+ if (dscans[0].cid != 1 || dscans[1].cid != 2 || dscans[2].cid != 3) -+ return ERR_NOT_YCBCR_221111; -+ -+ if (dscans[0].hv != 0x22 || dscans[1].hv != 0x11 || dscans[2].hv != 0x11) -+ return ERR_NOT_YCBCR_221111; -+ -+ mcusx = width >> 4; -+ mcusy = height >> 4; -+ -+ -+ idctqtab(quant[dscans[0].tq], decdata->dquant[0]); -+ idctqtab(quant[dscans[1].tq], decdata->dquant[1]); -+ idctqtab(quant[dscans[2].tq], decdata->dquant[2]); -+ initcol(decdata->dquant); -+ setinput(&in, datap); -+ -+#if 0 -+ /* landing zone */ -+ img[len] = 0; -+ img[len + 1] = 0xff; -+ img[len + 2] = M_EOF; -+#endif -+ -+ dec_initscans(); -+ -+ dscans[0].next = 6 - 4; -+ dscans[1].next = 6 - 4 - 1; -+ dscans[2].next = 6 - 4 - 1 - 1; /* 411 encoding */ -+ for (my = 0; my < mcusy; my++) { -+ for (mx = 0; mx < mcusx; mx++) { -+ if (info.dri && !--info.nm) -+ if (dec_checkmarker()) -+ return ERR_WRONG_MARKER; -+ -+ decode_mcus(&in, decdata->dcts, 6, dscans, max); -+ idct(decdata->dcts, decdata->out, decdata->dquant[0], IFIX(128.5), max[0]); -+ idct(decdata->dcts + 64, decdata->out + 64, decdata->dquant[0], IFIX(128.5), max[1]); -+ idct(decdata->dcts + 128, decdata->out + 128, decdata->dquant[0], IFIX(128.5), max[2]); -+ idct(decdata->dcts + 192, decdata->out + 192, decdata->dquant[0], IFIX(128.5), max[3]); -+ idct(decdata->dcts + 256, decdata->out + 256, decdata->dquant[1], IFIX(0.5), max[4]); -+ idct(decdata->dcts + 320, decdata->out + 320, decdata->dquant[2], IFIX(0.5), max[5]); -+ -+ switch (depth) { -+ case 24: -+ col221111(decdata->out, pic + (my * 16 * mcusx + mx) * 16 * 3, mcusx * 16 * 3); -+ break; -+ case 16: -+ col221111_16(decdata->out, pic + (my * 16 * mcusx + mx) * (16 * 2), mcusx * (16 * 2)); -+ break; -+ default: -+ return ERR_DEPTH_MISMATCH; -+ break; -+ } -+ } -+ } -+ -+ m = dec_readmarker(&in); -+ if (m != M_EOI) -+ return ERR_NO_EOI; -+ -+ return 0; -+} -+ -+/****************************************************************/ -+/************** huffman decoder ***************/ -+/****************************************************************/ -+ -+static int fillbits __P((struct in *, int, unsigned int)); -+static int dec_rec2 -+__P((struct in *, struct dec_hufftbl *, int *, int, int)); -+ -+static void setinput(in, p) -+struct in *in; -+unsigned char *p; -+{ -+ in->p = p; -+ in->left = 0; -+ in->bits = 0; -+ in->marker = 0; -+} -+ -+static int fillbits(in, le, bi) -+struct in *in; -+int le; -+unsigned int bi; -+{ -+ int b, m; -+ -+ if (in->marker) { -+ if (le <= 16) -+ in->bits = bi << 16, le += 16; -+ return le; -+ } -+ while (le <= 24) { -+ b = *in->p++; -+ if (b == 0xff && (m = *in->p++) != 0) { -+ if (m == M_EOF) { -+ if (in->func && (m = in->func(in->data)) == 0) -+ continue; -+ } -+ in->marker = m; -+ if (le <= 16) -+ bi = bi << 16, le += 16; -+ break; -+ } -+ bi = bi << 8 | b; -+ le += 8; -+ } -+ in->bits = bi; /* tmp... 2 return values needed */ -+ return le; -+} -+ -+static int dec_readmarker(in) -+struct in *in; -+{ -+ int m; -+ -+ in->left = fillbits(in, in->left, in->bits); -+ if ((m = in->marker) == 0) -+ return 0; -+ in->left = 0; -+ in->marker = 0; -+ return m; -+} -+ -+#define LEBI_DCL int le, bi -+#define LEBI_GET(in) (le = in->left, bi = in->bits) -+#define LEBI_PUT(in) (in->left = le, in->bits = bi) -+ -+#define GETBITS(in, n) ( \ -+ (le < (n) ? le = fillbits(in, le, bi), bi = in->bits : 0), \ -+ (le -= (n)), \ -+ bi >> le & ((1 << (n)) - 1) \ -+) -+ -+#define UNGETBITS(in, n) ( \ -+ le += (n) \ -+) -+ -+ -+static int dec_rec2(in, hu, runp, c, i) -+struct in *in; -+struct dec_hufftbl *hu; -+int *runp; -+int c, i; -+{ -+ LEBI_DCL; -+ -+ LEBI_GET(in); -+ if (i) { -+ UNGETBITS(in, i & 127); -+ *runp = i >> 8 & 15; -+ i >>= 16; -+ } else { -+ for (i = DECBITS; (c = ((c << 1) | GETBITS(in, 1))) >= (hu->maxcode[i]); i++); -+ if (i >= 16) { -+ in->marker = M_BADHUFF; -+ return 0; -+ } -+ i = hu->vals[hu->valptr[i] + c - hu->maxcode[i - 1] * 2]; -+ *runp = i >> 4; -+ i &= 15; -+ } -+ if (i == 0) { /* sigh, 0xf0 is 11 bit */ -+ LEBI_PUT(in); -+ return 0; -+ } -+ /* receive part */ -+ c = GETBITS(in, i); -+ if (c < (1 << (i - 1))) -+ c += (-1 << i) + 1; -+ LEBI_PUT(in); -+ return c; -+} -+ -+#define DEC_REC(in, hu, r, i) ( \ -+ r = GETBITS(in, DECBITS), \ -+ i = hu->llvals[r], \ -+ i & 128 ? \ -+ ( \ -+ UNGETBITS(in, i & 127), \ -+ r = i >> 8 & 15, \ -+ i >> 16 \ -+ ) \ -+ : \ -+ ( \ -+ LEBI_PUT(in), \ -+ i = dec_rec2(in, hu, &r, r, i), \ -+ LEBI_GET(in), \ -+ i \ -+ ) \ -+) -+ -+static void decode_mcus(in, dct, n, sc, maxp) -+struct in *in; -+int *dct; -+int n; -+struct scan *sc; -+int *maxp; -+{ -+ struct dec_hufftbl *hu; -+ int i, r, t; -+ LEBI_DCL; -+ -+ memset(dct, 0, n * 64 * sizeof(*dct)); -+ LEBI_GET(in); -+ while (n-- > 0) { -+ hu = sc->hudc.dhuff; -+ *dct++ = (sc->dc += DEC_REC(in, hu, r, t)); -+ -+ hu = sc->huac.dhuff; -+ i = 63; -+ while (i > 0) { -+ t = DEC_REC(in, hu, r, t); -+ if (t == 0 && r == 0) { -+ dct += i; -+ break; -+ } -+ dct += r; -+ *dct++ = t; -+ i -= r + 1; -+ } -+ *maxp++ = 64 - i; -+ if (n == sc->next) -+ sc++; -+ } -+ LEBI_PUT(in); -+} -+ -+static void dec_makehuff(hu, hufflen, huffvals) -+struct dec_hufftbl *hu; -+int *hufflen; -+unsigned char *huffvals; -+{ -+ int code, k, i, j, d, x, c, v; -+ for (i = 0; i < (1 << DECBITS); i++) -+ hu->llvals[i] = 0; -+ -+/* -+ * llvals layout: -+ * -+ * value v already known, run r, backup u bits: -+ * vvvvvvvvvvvvvvvv 0000 rrrr 1 uuuuuuu -+ * value unknown, size b bits, run r, backup u bits: -+ * 000000000000bbbb 0000 rrrr 0 uuuuuuu -+ * value and size unknown: -+ * 0000000000000000 0000 0000 0 0000000 -+ */ -+ code = 0; -+ k = 0; -+ for (i = 0; i < 16; i++, code <<= 1) { /* sizes */ -+ hu->valptr[i] = k; -+ for (j = 0; j < hufflen[i]; j++) { -+ hu->vals[k] = *huffvals++; -+ if (i < DECBITS) { -+ c = code << (DECBITS - 1 - i); -+ v = hu->vals[k] & 0x0f; /* size */ -+ for (d = 1 << (DECBITS - 1 - i); --d >= 0;) { -+ if (v + i < DECBITS) { /* both fit in table */ -+ x = d >> (DECBITS - 1 - v - -+ i); -+ if (v && x < (1 << (v - 1))) -+ x += (-1 << v) + 1; -+ x = x << 16 | (hu-> vals[k] & 0xf0) << 4 | -+ (DECBITS - (i + 1 + v)) | 128; -+ } else -+ x = v << 16 | (hu-> vals[k] & 0xf0) << 4 | -+ (DECBITS - (i + 1)); -+ hu->llvals[c | d] = x; -+ } -+ } -+ code++; -+ k++; -+ } -+ hu->maxcode[i] = code; -+ } -+ hu->maxcode[16] = 0x20000; /* always terminate decode */ -+} -+ -+/****************************************************************/ -+/************** idct ***************/ -+/****************************************************************/ -+ -+#define ONE ((PREC)IFIX(1.)) -+#define S2 ((PREC)IFIX(0.382683432)) -+#define C2 ((PREC)IFIX(0.923879532)) -+#define C4 ((PREC)IFIX(0.707106781)) -+ -+#define S22 ((PREC)IFIX(2 * 0.382683432)) -+#define C22 ((PREC)IFIX(2 * 0.923879532)) -+#define IC4 ((PREC)IFIX(1 / 0.707106781)) -+ -+#define C3IC1 ((PREC)IFIX(0.847759065)) /* c3/c1 */ -+#define C5IC1 ((PREC)IFIX(0.566454497)) /* c5/c1 */ -+#define C7IC1 ((PREC)IFIX(0.198912367)) /* c7/c1 */ -+ -+#define XPP(a,b) (t = a + b, b = a - b, a = t) -+#define XMP(a,b) (t = a - b, b = a + b, a = t) -+#define XPM(a,b) (t = a + b, b = b - a, a = t) -+ -+#define ROT(a,b,s,c) ( t = IMULT(a + b, s), \ -+ a = IMULT(a, c - s) + t, \ -+ b = IMULT(b, c + s) - t) -+ -+#define IDCT \ -+( \ -+ XPP(t0, t1), \ -+ XMP(t2, t3), \ -+ t2 = IMULT(t2, IC4) - t3, \ -+ XPP(t0, t3), \ -+ XPP(t1, t2), \ -+ XMP(t4, t7), \ -+ XPP(t5, t6), \ -+ XMP(t5, t7), \ -+ t5 = IMULT(t5, IC4), \ -+ ROT(t4, t6, S22, C22),\ -+ t6 -= t7, \ -+ t5 -= t6, \ -+ t4 -= t5, \ -+ XPP(t0, t7), \ -+ XPP(t1, t6), \ -+ XPP(t2, t5), \ -+ XPP(t3, t4) \ -+) -+ -+static unsigned char zig2[64] = { -+ 0, 2, 3, 9, 10, 20, 21, 35, -+ 14, 16, 25, 31, 39, 46, 50, 57, -+ 5, 7, 12, 18, 23, 33, 37, 48, -+ 27, 29, 41, 44, 52, 55, 59, 62, -+ 15, 26, 30, 40, 45, 51, 56, 58, -+ 1, 4, 8, 11, 19, 22, 34, 36, -+ 28, 42, 43, 53, 54, 60, 61, 63, -+ 6, 13, 17, 24, 32, 38, 47, 49 -+}; -+ -+void idct(in, out, quant, off, max) -+int *in; -+int *out; -+PREC *quant; -+PREC off; -+int max; -+{ -+ PREC t0, t1, t2, t3, t4, t5, t6, t7, t; -+ PREC tmp[64], *tmpp; -+ int i, j; -+ unsigned char *zig2p; -+ -+ t0 = off; -+ if (max == 1) { -+ t0 += in[0] * quant[0]; -+ for (i = 0; i < 64; i++) -+ out[i] = ITOINT(t0); -+ return; -+ } -+ zig2p = zig2; -+ tmpp = tmp; -+ for (i = 0; i < 8; i++) { -+ j = *zig2p++; -+ t0 += in[j] * quant[j]; -+ j = *zig2p++; -+ t5 = in[j] * quant[j]; -+ j = *zig2p++; -+ t2 = in[j] * quant[j]; -+ j = *zig2p++; -+ t7 = in[j] * quant[j]; -+ j = *zig2p++; -+ t1 = in[j] * quant[j]; -+ j = *zig2p++; -+ t4 = in[j] * quant[j]; -+ j = *zig2p++; -+ t3 = in[j] * quant[j]; -+ j = *zig2p++; -+ t6 = in[j] * quant[j]; -+ IDCT; -+ tmpp[0 * 8] = t0; -+ tmpp[1 * 8] = t1; -+ tmpp[2 * 8] = t2; -+ tmpp[3 * 8] = t3; -+ tmpp[4 * 8] = t4; -+ tmpp[5 * 8] = t5; -+ tmpp[6 * 8] = t6; -+ tmpp[7 * 8] = t7; -+ tmpp++; -+ t0 = 0; -+ } -+ for (i = 0; i < 8; i++) { -+ t0 = tmp[8 * i + 0]; -+ t1 = tmp[8 * i + 1]; -+ t2 = tmp[8 * i + 2]; -+ t3 = tmp[8 * i + 3]; -+ t4 = tmp[8 * i + 4]; -+ t5 = tmp[8 * i + 5]; -+ t6 = tmp[8 * i + 6]; -+ t7 = tmp[8 * i + 7]; -+ IDCT; -+ out[8 * i + 0] = ITOINT(t0); -+ out[8 * i + 1] = ITOINT(t1); -+ out[8 * i + 2] = ITOINT(t2); -+ out[8 * i + 3] = ITOINT(t3); -+ out[8 * i + 4] = ITOINT(t4); -+ out[8 * i + 5] = ITOINT(t5); -+ out[8 * i + 6] = ITOINT(t6); -+ out[8 * i + 7] = ITOINT(t7); -+ } -+} -+ -+static unsigned char zig[64] = { -+ 0, 1, 5, 6, 14, 15, 27, 28, -+ 2, 4, 7, 13, 16, 26, 29, 42, -+ 3, 8, 12, 17, 25, 30, 41, 43, -+ 9, 11, 18, 24, 31, 40, 44, 53, -+ 10, 19, 23, 32, 39, 45, 52, 54, -+ 20, 22, 33, 38, 46, 51, 55, 60, -+ 21, 34, 37, 47, 50, 56, 59, 61, -+ 35, 36, 48, 49, 57, 58, 62, 63 -+}; -+ -+static PREC aaidct[8] = { -+ IFIX(0.3535533906), IFIX(0.4903926402), -+ IFIX(0.4619397663), IFIX(0.4157348062), -+ IFIX(0.3535533906), IFIX(0.2777851165), -+ IFIX(0.1913417162), IFIX(0.0975451610) -+}; -+ -+ -+static void idctqtab(qin, qout) -+unsigned char *qin; -+PREC *qout; -+{ -+ int i, j; -+ -+ for (i = 0; i < 8; i++) -+ for (j = 0; j < 8; j++) -+ qout[zig[i * 8 + j]] = qin[zig[i * 8 + j]] * -+ IMULT(aaidct[i], aaidct[j]); -+} -+ -+static void scaleidctqtab(q, sc) -+PREC *q; -+PREC sc; -+{ -+ int i; -+ -+ for (i = 0; i < 64; i++) -+ q[i] = IMULT(q[i], sc); -+} -+ -+/****************************************************************/ -+/************** color decoder ***************/ -+/****************************************************************/ -+ -+#define ROUND -+ -+/* -+ * YCbCr Color transformation: -+ * -+ * y:0..255 Cb:-128..127 Cr:-128..127 -+ * -+ * R = Y + 1.40200 * Cr -+ * G = Y - 0.34414 * Cb - 0.71414 * Cr -+ * B = Y + 1.77200 * Cb -+ * -+ * => -+ * Cr *= 1.40200; -+ * Cb *= 1.77200; -+ * Cg = 0.19421 * Cb + .50937 * Cr; -+ * R = Y + Cr; -+ * G = Y - Cg; -+ * B = Y + Cb; -+ * -+ * => -+ * Cg = (50 * Cb + 130 * Cr + 128) >> 8; -+ */ -+ -+static void initcol(q) -+PREC q[][64]; -+{ -+ scaleidctqtab(q[1], IFIX(1.77200)); -+ scaleidctqtab(q[2], IFIX(1.40200)); -+} -+ -+/* This is optimized for the stupid sun SUNWspro compiler. */ -+#define STORECLAMP(a,x) \ -+( \ -+ (a) = (x), \ -+ (unsigned int)(x) >= 256 ? \ -+ ((a) = (x) < 0 ? 0 : 255) \ -+ : \ -+ 0 \ -+) -+ -+#define CLAMP(x) ((unsigned int)(x) >= 256 ? ((x) < 0 ? 0 : 255) : (x)) -+ -+#ifdef ROUND -+ -+#define CBCRCG(yin, xin) \ -+( \ -+ cb = outc[0 +yin*8+xin], \ -+ cr = outc[64+yin*8+xin], \ -+ cg = (50 * cb + 130 * cr + 128) >> 8 \ -+) -+ -+#else -+ -+#define CBCRCG(yin, xin) \ -+( \ -+ cb = outc[0 +yin*8+xin], \ -+ cr = outc[64+yin*8+xin], \ -+ cg = (3 * cb + 8 * cr) >> 4 \ -+) -+ -+#endif -+ -+#define PIC(yin, xin, p, xout) \ -+( \ -+ y = outy[(yin) * 8 + xin], \ -+ STORECLAMP(p[(xout) * 3 + 0], y + cr), \ -+ STORECLAMP(p[(xout) * 3 + 1], y - cg), \ -+ STORECLAMP(p[(xout) * 3 + 2], y + cb) \ -+) -+ -+#ifdef __LITTLE_ENDIAN -+#define PIC_16(yin, xin, p, xout, add) \ -+( \ -+ y = outy[(yin) * 8 + xin], \ -+ y = ((CLAMP(y + cr + add*2+1) & 0xf8) << 8) | \ -+ ((CLAMP(y - cg + add ) & 0xfc) << 3) | \ -+ ((CLAMP(y + cb + add*2+1) ) >> 3), \ -+ p[(xout) * 2 + 0] = y & 0xff, \ -+ p[(xout) * 2 + 1] = y >> 8 \ -+) -+#else -+#ifdef CONFIG_PPC -+#define PIC_16(yin, xin, p, xout, add) \ -+( \ -+ y = outy[(yin) * 8 + xin], \ -+ y = ((CLAMP(y + cr + add*2+1) & 0xf8) << 7) | \ -+ ((CLAMP(y - cg + add*2+1) & 0xf8) << 2) | \ -+ ((CLAMP(y + cb + add*2+1) ) >> 3), \ -+ p[(xout) * 2 + 0] = y >> 8, \ -+ p[(xout) * 2 + 1] = y & 0xff \ -+) -+#else -+#define PIC_16(yin, xin, p, xout, add) \ -+( \ -+ y = outy[(yin) * 8 + xin], \ -+ y = ((CLAMP(y + cr + add*2+1) & 0xf8) << 8) | \ -+ ((CLAMP(y - cg + add ) & 0xfc) << 3) | \ -+ ((CLAMP(y + cb + add*2+1) ) >> 3), \ -+ p[(xout) * 2 + 0] = y >> 8, \ -+ p[(xout) * 2 + 1] = y & 0xff \ -+) -+#endif -+#endif -+ -+#define PIC221111(xin) \ -+( \ -+ CBCRCG(0, xin), \ -+ PIC(xin / 4 * 8 + 0, (xin & 3) * 2 + 0, pic0, xin * 2 + 0), \ -+ PIC(xin / 4 * 8 + 0, (xin & 3) * 2 + 1, pic0, xin * 2 + 1), \ -+ PIC(xin / 4 * 8 + 1, (xin & 3) * 2 + 0, pic1, xin * 2 + 0), \ -+ PIC(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1) \ -+) -+ -+#define PIC221111_16(xin) \ -+( \ -+ CBCRCG(0, xin), \ -+ PIC_16(xin / 4 * 8 + 0, (xin & 3) * 2 + 0, pic0, xin * 2 + 0, 3), \ -+ PIC_16(xin / 4 * 8 + 0, (xin & 3) * 2 + 1, pic0, xin * 2 + 1, 0), \ -+ PIC_16(xin / 4 * 8 + 1, (xin & 3) * 2 + 0, pic1, xin * 2 + 0, 1), \ -+ PIC_16(xin / 4 * 8 + 1, (xin & 3) * 2 + 1, pic1, xin * 2 + 1, 2) \ -+) -+ -+static void col221111(out, pic, width) -+int *out; -+unsigned char *pic; -+int width; -+{ -+ int i, j, k; -+ unsigned char *pic0, *pic1; -+ int *outy, *outc; -+ int cr, cg, cb, y; -+ -+ pic0 = pic; -+ pic1 = pic + width; -+ outy = out; -+ outc = out + 64 * 4; -+ for (i = 2; i > 0; i--) { -+ for (j = 4; j > 0; j--) { -+ for (k = 0; k < 8; k++) { -+ PIC221111(k); -+ } -+ outc += 8; -+ outy += 16; -+ pic0 += 2 * width; -+ pic1 += 2 * width; -+ } -+ outy += 64 * 2 - 16 * 4; -+ } -+} -+ -+static void col221111_16(out, pic, width) -+int *out; -+unsigned char *pic; -+int width; -+{ -+ int i, j, k; -+ unsigned char *pic0, *pic1; -+ int *outy, *outc; -+ int cr, cg, cb, y; -+ -+ pic0 = pic; -+ pic1 = pic + width; -+ outy = out; -+ outc = out + 64 * 4; -+ for (i = 2; i > 0; i--) { -+ for (j = 4; j > 0; j--) { -+ for (k = 0; k < 8; k++) { -+ PIC221111_16(k); -+ } -+ outc += 8; -+ outy += 16; -+ pic0 += 2 * width; -+ pic1 += 2 * width; -+ } -+ outy += 64 * 2 - 16 * 4; -+ } -+} ---- /dev/null -+++ b/drivers/video/bootsplash/decode-jpg.h -@@ -0,0 +1,35 @@ -+/* -+ * linux/drivers/video/bootsplash/decode-jpg.h - a tiny jpeg decoder. -+ * -+ * (w) August 2001 by Michael Schroeder, -+ */ -+ -+#ifndef __DECODE_JPG_H -+#define __DECODE_JPG_H -+ -+#define ERR_NO_SOI 1 -+#define ERR_NOT_8BIT 2 -+#define ERR_HEIGHT_MISMATCH 3 -+#define ERR_WIDTH_MISMATCH 4 -+#define ERR_BAD_WIDTH_OR_HEIGHT 5 -+#define ERR_TOO_MANY_COMPPS 6 -+#define ERR_ILLEGAL_HV 7 -+#define ERR_QUANT_TABLE_SELECTOR 8 -+#define ERR_NOT_YCBCR_221111 9 -+#define ERR_UNKNOWN_CID_IN_SCAN 10 -+#define ERR_NOT_SEQUENTIAL_DCT 11 -+#define ERR_WRONG_MARKER 12 -+#define ERR_NO_EOI 13 -+#define ERR_BAD_TABLES 14 -+#define ERR_DEPTH_MISMATCH 15 -+ -+struct jpeg_decdata { -+ int dcts[6 * 64 + 16]; -+ int out[64 * 6]; -+ int dquant[3][64]; -+}; -+ -+extern int jpeg_decode(unsigned char *, unsigned char *, int, int, int, struct jpeg_decdata *); -+extern int jpeg_check_size(unsigned char *, int, int); -+ -+#endif ---- /dev/null -+++ b/drivers/video/bootsplash/render.c -@@ -0,0 +1,328 @@ -+/* -+ * linux/drivers/video/bootsplash/render.c - splash screen render functions. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../console/fbcon.h" -+#include "bootsplash.h" -+ -+void splash_putcs(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ const unsigned short *s, int count, int ypos, int xpos) -+{ -+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; -+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; -+ int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; -+ u8 *src; -+ u8 *dst, *splashsrc; -+ unsigned int d, x, y; -+ u32 dd, fgx, bgx; -+ u16 c = scr_readw(s); -+ -+ int fg_color, bg_color, transparent; -+ if (console_blanked) -+ return; -+ fg_color = attr_fgcol(fgshift, c); -+ bg_color = attr_bgcol(bgshift, c); -+ transparent = sd->splash_color == bg_color; -+ xpos = xpos * vc->vc_font.width + sd->splash_text_xo; -+ ypos = ypos * vc->vc_font.height + sd->splash_text_yo; -+ splashsrc = (u8 *)(info->splash_pic + ypos * info->splash_bytes + xpos * 2); -+ dst = (u8 *)(info->screen_base + ypos * info->fix.line_length + xpos * 2); -+ -+ fgx = ((u32 *)info->pseudo_palette)[fg_color]; -+ if (transparent && sd->splash_color == 15) { -+ if (fgx == 0xffea) -+ fgx = 0xfe4a; -+ else if (fgx == 0x57ea) -+ fgx = 0x0540; -+ else if (fgx == 0xffff) -+ fgx = 0x52aa; -+ } -+ bgx = ((u32 *)info->pseudo_palette)[bg_color]; -+ d = 0; -+ -+ while (count--) { -+ c = scr_readw(s++); -+ src = vc->vc_font.data + (c & charmask) * vc->vc_font.height * ((vc->vc_font.width + 7) >> 3); -+ -+ for (y = 0; y < vc->vc_font.height; y++) { -+ for (x = 0; x < vc->vc_font.width; x += 2) { -+ if ((x & 7) == 0) -+ d = *src++; -+ if (d & 0x80) -+ dd = fgx; -+ else -+ dd = transparent ? *(u16 *)splashsrc : bgx; -+ splashsrc += 2; -+ if (d & 0x40) -+ dd |= fgx << 16; -+ else -+ dd |= (transparent ? *(u16 *)splashsrc : bgx) << 16; -+ splashsrc += 2; -+ d <<= 2; -+ fb_writel(dd, dst); -+ dst += 4; -+ } -+ dst += info->fix.line_length - vc->vc_font.width * 2; -+ splashsrc += info->splash_bytes - vc->vc_font.width * 2; -+ } -+ dst -= info->fix.line_length * vc->vc_font.height - vc->vc_font.width * 2; -+ splashsrc -= info->splash_bytes * vc->vc_font.height - vc->vc_font.width * 2; -+ } -+} -+ -+static void splash_renderc(struct splash_data *sd, struct fb_info *info, int fg_color, int bg_color, u8 *src, int ypos, int xpos, int height, int width) -+{ -+ int transparent = sd->splash_color == bg_color; -+ u32 dd, fgx, bgx; -+ u8 *dst, *splashsrc; -+ unsigned int d, x, y; -+ -+ if (console_blanked) -+ return; -+ splashsrc = (u8 *)(info->splash_pic + ypos * info->splash_bytes + xpos * 2); -+ dst = (u8 *)(info->screen_base + ypos * info->fix.line_length + xpos * 2); -+ fgx = ((u32 *)info->pseudo_palette)[fg_color]; -+ if (transparent && sd->splash_color == 15) { -+ if (fgx == 0xffea) -+ fgx = 0xfe4a; -+ else if (fgx == 0x57ea) -+ fgx = 0x0540; -+ else if (fgx == 0xffff) -+ fgx = 0x52aa; -+ } -+ bgx = ((u32 *)info->pseudo_palette)[bg_color]; -+ d = 0; -+ for (y = 0; y < height; y++) { -+ for (x = 0; x < width; x += 2) { -+ if ((x & 7) == 0) -+ d = *src++; -+ if (d & 0x80) -+ dd = fgx; -+ else -+ dd = transparent ? *(u16 *)splashsrc : bgx; -+ splashsrc += 2; -+ if (d & 0x40) -+ dd |= fgx << 16; -+ else -+ dd |= (transparent ? *(u16 *)splashsrc : bgx) << 16; -+ splashsrc += 2; -+ d <<= 2; -+ fb_writel(dd, dst); -+ dst += 4; -+ } -+ dst += info->fix.line_length - width * 2; -+ splashsrc += info->splash_bytes - width * 2; -+ } -+} -+ -+void splash_putc(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ int c, int ypos, int xpos) -+{ -+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; -+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; -+ int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; -+ u8 *src = vc->vc_font.data + (c & charmask) * vc->vc_font.height * ((vc->vc_font.width + 7) >> 3); -+ xpos = xpos * vc->vc_font.width + sd->splash_text_xo; -+ ypos = ypos * vc->vc_font.height + sd->splash_text_yo; -+ splash_renderc(sd, info, attr_fgcol(fgshift, c), attr_bgcol(bgshift, c), src, ypos, xpos, vc->vc_font.height, vc->vc_font.width); -+} -+ -+void splashcopy(u8 *dst, u8 *src, int height, int width, int dstbytes, int srcbytes) -+{ -+ int i; -+ -+ while (height-- > 0) { -+ u32 *p = (u32 *)dst; -+ u32 *q = (u32 *)src; -+ for (i=0; i < width/4; i++) { -+ fb_writel(*q++,p++); -+ fb_writel(*q++,p++); -+ } -+ if (width & 2) -+ fb_writel(*q++,p++); -+ if (width & 1) -+ fb_writew(*(u16*)q,(u16*)p); -+ dst += dstbytes; -+ src += srcbytes; -+ } -+} -+ -+static void splashset(u8 *dst, int height, int width, int dstbytes, u32 bgx) { -+ int i; -+ -+ bgx |= bgx << 16; -+ while (height-- > 0) { -+ u32 *p = (u32 *)dst; -+ for (i=0; i < width/4; i++) { -+ fb_writel(bgx,p++); -+ fb_writel(bgx,p++); -+ } -+ if (width & 2) -+ fb_writel(bgx,p++); -+ if (width & 1) -+ fb_writew(bgx,(u16*)p); -+ dst += dstbytes; -+ } -+} -+ -+static void splashfill(struct fb_info *info, int sy, int sx, int height, int width) { -+ splashcopy((u8 *)(info->screen_base + sy * info->fix.line_length + sx * 2), (u8 *)(info->splash_pic + sy * info->splash_bytes + sx * 2), height, width, info->fix.line_length, info->splash_bytes); -+} -+ -+void splash_clear(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+ int sx, int height, int width) -+{ -+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; -+ int bg_color = attr_bgcol_ec(bgshift, vc, info); -+ int transparent = sd->splash_color == bg_color; -+ u32 bgx; -+ u8 *dst; -+ -+ if (console_blanked) -+ return; -+ sy = sy * vc->vc_font.height + sd->splash_text_yo; -+ sx = sx * vc->vc_font.width + sd->splash_text_xo; -+ height *= vc->vc_font.height; -+ width *= vc->vc_font.width; -+ if (transparent) { -+ splashfill(info, sy, sx, height, width); -+ return; -+ } -+ dst = (u8 *)(info->screen_base + sy * info->fix.line_length + sx * 2); -+ bgx = ((u32 *)info->pseudo_palette)[bg_color]; -+ splashset(dst, height, width, info->fix.line_length, bgx); -+} -+ -+void splash_bmove(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int sy, -+ int sx, int dy, int dx, int height, int width) -+{ -+ struct fb_copyarea area; -+ -+ if (console_blanked) -+ return; -+ area.sx = sx * vc->vc_font.width; -+ area.sy = sy * vc->vc_font.height; -+ area.dx = dx * vc->vc_font.width; -+ area.dy = dy * vc->vc_font.height; -+ area.sx += sd->splash_text_xo; -+ area.sy += sd->splash_text_yo; -+ area.dx += sd->splash_text_xo; -+ area.dy += sd->splash_text_yo; -+ area.height = height * vc->vc_font.height; -+ area.width = width * vc->vc_font.width; -+ -+ info->fbops->fb_copyarea(info, &area); -+} -+ -+void splash_clear_margins(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, -+ int bottom_only) -+{ -+ unsigned int tw = vc->vc_cols*vc->vc_font.width; -+ unsigned int th = vc->vc_rows*vc->vc_font.height; -+ -+ if (console_blanked) -+ return; -+ if (!bottom_only) { -+ /* top margin */ -+ splashfill(info, 0, 0, sd->splash_text_yo, info->var.xres); -+ /* left margin */ -+ splashfill(info, sd->splash_text_yo, 0, th, sd->splash_text_xo); -+ /* right margin */ -+ splashfill(info, sd->splash_text_yo, sd->splash_text_xo + tw, th, info->var.xres - sd->splash_text_xo - tw); -+ -+ } -+ splashfill(info, sd->splash_text_yo + th, 0, info->var.yres - sd->splash_text_yo - th, info->var.xres); -+} -+ -+int splash_cursor(struct splash_data *sd, struct fb_info *info, struct fb_cursor *cursor) -+{ -+ int i; -+ unsigned int dsize, s_pitch; -+ -+ if (info->state != FBINFO_STATE_RUNNING) -+ return 0; -+ -+ s_pitch = (cursor->image.width + 7) >> 3; -+ dsize = s_pitch * cursor->image.height; -+ if (cursor->enable) { -+ switch (cursor->rop) { -+ case ROP_XOR: -+ for (i = 0; i < dsize; i++) -+ info->fb_cursordata[i] = cursor->image.data[i] ^ cursor->mask[i]; -+ break; -+ case ROP_COPY: -+ default: -+ for (i = 0; i < dsize; i++) -+ info->fb_cursordata[i] = cursor->image.data[i] & cursor->mask[i]; -+ break; -+ } -+ } else if (info->fb_cursordata != cursor->image.data) -+ memcpy(info->fb_cursordata, cursor->image.data, dsize); -+ cursor->image.data = info->fb_cursordata; -+ splash_renderc(sd, info, cursor->image.fg_color, cursor->image.bg_color, (u8 *)info->fb_cursordata, cursor->image.dy + sd->splash_text_yo, cursor->image.dx + sd->splash_text_xo, cursor->image.height, cursor->image.width); -+ return 0; -+} -+ -+void splash_bmove_redraw(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) -+{ -+ unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * y + dx * 2); -+ unsigned short *s = d + (dx - sx); -+ unsigned short *start = d; -+ unsigned short *ls = d; -+ unsigned short *le = d + width; -+ unsigned short c; -+ int x = dx; -+ unsigned short attr = 1; -+ -+ if (console_blanked) -+ return; -+ do { -+ c = scr_readw(d); -+ if (attr != (c & 0xff00)) { -+ attr = c & 0xff00; -+ if (d > start) { -+ splash_putcs(sd, vc, info, start, d - start, y, x); -+ x += d - start; -+ start = d; -+ } -+ } -+ if (s >= ls && s < le && c == scr_readw(s)) { -+ if (d > start) { -+ splash_putcs(sd, vc, info, start, d - start, y, x); -+ x += d - start + 1; -+ start = d + 1; -+ } else { -+ x++; -+ start++; -+ } -+ } -+ s++; -+ d++; -+ } while (d < le); -+ if (d > start) -+ splash_putcs(sd, vc, info, start, d - start, y, x); -+} -+ -+void splash_blank(struct splash_data *sd, struct vc_data *vc, struct fb_info *info, int blank) -+{ -+ if (blank) { -+ if (info->silent_screen_base) -+ splashset((u8 *)info->silent_screen_base, info->var.yres, info->var.xres, info->fix.line_length, 0); -+ splashset((u8 *)info->screen_base, info->var.yres, info->var.xres, info->fix.line_length, 0); -+ } else { -+ if (info->silent_screen_base) -+ splash_prepare(vc, info); -+ splash_clear_margins(vc->vc_splash_data, vc, info, 0); -+ /* no longer needed, done in fbcon_blank */ -+ /* update_screen(vc->vc_num); */ -+ } -+} -+ ---- a/drivers/video/console/bitblit.c -+++ b/drivers/video/console/bitblit.c -@@ -18,6 +18,9 @@ - #include - #include - #include "fbcon.h" -+#ifdef CONFIG_BOOTSPLASH -+#include "../bootsplash/bootsplash.h" -+#endif - - /* - * Accelerated handlers. -@@ -48,6 +51,13 @@ static void bit_bmove(struct vc_data *vc - { - struct fb_copyarea area; - -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) { -+ splash_bmove(info->splash_data, vc, info, -+ sy, sx, dy, dx, height, width); -+ return; -+ } -+#endif - area.sx = sx * vc->vc_font.width; - area.sy = sy * vc->vc_font.height; - area.dx = dx * vc->vc_font.width; -@@ -64,6 +74,13 @@ static void bit_clear(struct vc_data *vc - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - struct fb_fillrect region; - -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) { -+ splash_clear(info->splash_data, vc, info, -+ sy, sx, height, width); -+ return; -+ } -+#endif - region.color = attr_bgcol_ec(bgshift, vc, info); - region.dx = sx * vc->vc_font.width; - region.dy = sy * vc->vc_font.height; -@@ -161,6 +178,13 @@ static void bit_putcs(struct vc_data *vc - image.height = vc->vc_font.height; - image.depth = 1; - -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) { -+ splash_putcs(info->splash_data, vc, info, s, count, yy, xx); -+ return; -+ } -+#endif -+ - if (attribute) { - buf = kmalloc(cellsize, GFP_KERNEL); - if (!buf) -@@ -214,6 +238,13 @@ static void bit_clear_margins(struct vc_ - unsigned int bs = info->var.yres - bh; - struct fb_fillrect region; - -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) { -+ splash_clear_margins(info->splash_data, vc, info, bottom_only); -+ return; -+ } -+#endif -+ - region.color = attr_bgcol_ec(bgshift, vc, info); - region.rop = ROP_COPY; - -@@ -380,6 +411,14 @@ static void bit_cursor(struct vc_data *v - cursor.image.depth = 1; - cursor.rop = ROP_XOR; - -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) { -+ splash_cursor(info->splash_data, info, &cursor); -+ ops->cursor_reset = 0; -+ return; -+ } -+#endif -+ - if (info->fbops->fb_cursor) - err = info->fbops->fb_cursor(info, &cursor); - ---- a/drivers/video/console/fbcon.c -+++ b/drivers/video/console/fbcon.c -@@ -80,6 +80,9 @@ - #include - - #include "fbcon.h" -+#ifdef CONFIG_BOOTSPLASH -+#include "../bootsplash/bootsplash.h" -+#endif - - #ifdef FBCONDEBUG - # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args) -@@ -95,7 +98,11 @@ enum { - - static struct display fb_display[MAX_NR_CONSOLES]; - -+#ifdef CONFIG_BOOTSPLASH -+signed char con2fb_map[MAX_NR_CONSOLES]; -+#else - static signed char con2fb_map[MAX_NR_CONSOLES]; -+#endif - static signed char con2fb_map_boot[MAX_NR_CONSOLES]; - - static int logo_lines; -@@ -538,6 +545,10 @@ static int fbcon_takeover(int show_logo) - for (i = first_fb_vc; i <= last_fb_vc; i++) - con2fb_map[i] = info_idx; - -+#ifdef CONFIG_BOOTSPLASH -+ splash_init(); -+#endif -+ - err = take_over_console(&fb_con, first_fb_vc, last_fb_vc, - fbcon_is_default); - -@@ -1101,6 +1112,16 @@ static void fbcon_init(struct vc_data *v - new_cols /= vc->vc_font.width; - new_rows /= vc->vc_font.height; - -+#ifdef CONFIG_BOOTSPLASH -+ if (vc->vc_splash_data && vc->vc_splash_data->splash_state) { -+ new_cols = vc->vc_splash_data->splash_text_wi / vc->vc_font.width; -+ new_rows = vc->vc_splash_data->splash_text_he / vc->vc_font.height; -+ logo = 0; -+ con_remap_def_color(vc, vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color); -+ } -+#endif -+ -+ - /* - * We must always set the mode. The mode of the previous console - * driver could be in the same resolution but we are using different -@@ -1802,6 +1823,10 @@ static int fbcon_scroll(struct vc_data * - fbcon_softback_note(vc, t, count); - if (logo_shown >= 0) - goto redraw_up; -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) -+ goto redraw_up; -+#endif - switch (p->scrollmode) { - case SCROLL_MOVE: - fbcon_redraw_blit(vc, info, p, t, b - t - count, -@@ -1893,6 +1918,10 @@ static int fbcon_scroll(struct vc_data * - count = vc->vc_rows; - if (logo_shown >= 0) - goto redraw_down; -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) -+ goto redraw_down; -+#endif - switch (p->scrollmode) { - case SCROLL_MOVE: - fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, -@@ -2041,6 +2070,14 @@ static void fbcon_bmove_rec(struct vc_da - } - return; - } -+ -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data && sy == dy && height == 1) { -+ /* must use slower redraw bmove to keep background pic intact */ -+ splash_bmove_redraw(info->splash_data, vc, info, sy, sx, dx, width); -+ return; -+ } -+#endif - ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, - height, width); - } -@@ -2149,6 +2186,10 @@ static int fbcon_switch(struct vc_data * - info = registered_fb[con2fb_map[vc->vc_num]]; - ops = info->fbcon_par; - -+#ifdef CONFIG_BOOTSPLASH -+ splash_prepare(vc, info); -+#endif -+ - if (softback_top) { - if (softback_lines) - fbcon_set_origin(vc); -@@ -2282,6 +2323,12 @@ static void fbcon_generic_blank(struct v - { - struct fb_event event; - -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) { -+ splash_blank(info->splash_data, vc, info, blank); -+ return; -+ } -+#endif - if (blank) { - unsigned short charmask = vc->vc_hi_font_mask ? - 0x1ff : 0xff; -@@ -2507,6 +2554,12 @@ static int fbcon_do_set_font(struct vc_d - - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); -+#ifdef CONFIG_BOOTSPLASH -+ if (info->splash_data) { -+ cols = info->splash_data->splash_text_wi; -+ rows = info->splash_data->splash_text_he; -+ } -+#endif - cols /= w; - rows /= h; - vc_resize(vc, cols, rows); ---- a/drivers/video/console/fbcon.h -+++ b/drivers/video/console/fbcon.h -@@ -25,6 +25,34 @@ - * low-level frame buffer device - */ - -+#ifdef CONFIG_BOOTSPLASH -+struct splash_data { -+ int splash_state; /* show splash? */ -+ int splash_color; /* transparent color */ -+ int splash_fg_color; /* foreground color */ -+ int splash_width; /* width of image */ -+ int splash_height; /* height of image */ -+ int splash_text_xo; /* text area origin */ -+ int splash_text_yo; -+ int splash_text_wi; /* text area size */ -+ int splash_text_he; -+ int splash_showtext; /* silent/verbose mode */ -+ int splash_boxcount; -+ int splash_percent; -+ int splash_overpaintok; /* is it ok to overpaint boxes */ -+ int splash_palcnt; -+ char *oldscreen_base; /* pointer to top of virtual screen */ -+ unsigned char *splash_boxes; -+ unsigned char *splash_jpeg; /* jpeg */ -+ unsigned char *splash_palette; /* palette for 8-bit */ -+ -+ int splash_dosilent; /* show silent jpeg */ -+ unsigned char *splash_silentjpeg; -+ unsigned char *splash_sboxes; -+ int splash_sboxcount; -+}; -+#endif -+ - struct display { - /* Filled in by the low-level console driver */ - const u_char *fontdata; ---- a/drivers/video/vesafb.c -+++ b/drivers/video/vesafb.c -@@ -181,7 +181,10 @@ static void vesafb_destroy(struct fb_inf - framebuffer_release(info); - } - --static struct fb_ops vesafb_ops = { -+#ifndef CONFIG_BOOTSPLASH -+static -+#endif -+struct fb_ops vesafb_ops = { - .owner = THIS_MODULE, - .fb_destroy = vesafb_destroy, - .fb_setcolreg = vesafb_setcolreg, -@@ -266,6 +269,9 @@ static int __init vesafb_probe(struct pl - * option to simply use size_total as that - * wastes plenty of kernel address space. */ - size_remap = size_vmode * 2; -+#ifdef CONFIG_BOOTSPLASH -+ size_remap *= 2; /* some more for the images */ -+#endif - if (vram_remap) - size_remap = vram_remap * 1024 * 1024; - if (size_remap < size_vmode) ---- a/include/linux/console_struct.h -+++ b/include/linux/console_struct.h -@@ -106,6 +106,9 @@ struct vc_data { - struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ - unsigned long vc_uni_pagedir; - unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ -+#ifdef CONFIG_BOOTSPLASH -+ struct splash_data *vc_splash_data; -+#endif - bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ - /* additional information is in vt_kern.h */ - }; ---- a/include/linux/fb.h -+++ b/include/linux/fb.h -@@ -875,6 +875,14 @@ struct fb_info { - void *fbcon_par; /* fbcon use-only private area */ - /* From here on everything is device dependent */ - void *par; -+#ifdef CONFIG_BOOTSPLASH -+ struct splash_data *splash_data; -+ unsigned char *splash_pic; -+ int splash_pic_size; -+ int splash_bytes; -+ char *silent_screen_base; /* real screen base */ -+ char fb_cursordata[64]; -+#endif - /* we need the PCI or similiar aperture base/size not - smem_start/size as smem_start may just be an object - allocated inside the aperture so may not actually overlap */ ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -110,7 +110,12 @@ NORET_TYPE void panic(const char * fmt, - * We can't use the "normal" timers since we just panicked. - */ - printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); -- -+#ifdef CONFIG_BOOTSPLASH -+ { -+ extern int splash_verbose(void); -+ (void)splash_verbose(); -+ } -+#endif - for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { - touch_nmi_watchdog(); - if (i >= i_next) { -@@ -143,6 +148,12 @@ NORET_TYPE void panic(const char * fmt, - } - #endif - local_irq_enable(); -+#ifdef CONFIG_BOOTSPLASH -+ { -+ extern int splash_verbose(void); -+ (void)splash_verbose(); -+ } -+#endif - for (i = 0; ; i += PANIC_TIMER_STEP) { - touch_softlockup_watchdog(); - if (i >= i_next) { diff --git a/patches.suse/bootsplash-console-fix b/patches.suse/bootsplash-console-fix deleted file mode 100644 index b7becf2..0000000 --- a/patches.suse/bootsplash-console-fix +++ /dev/null @@ -1,65 +0,0 @@ -From: Takashi Iwai -Subject: Fix rendering on linux console with bootsplash -Patch-mainline: Never -References: bnc#595657,bnc#594209 - -Fix a bug introduced by Cleanup-and-make-boot-splash-work-with-KMS.patch. -The position was wrongly calculated in splash_bmove_redraw(). - -Also, a few clean-ups of render codes. - -Signed-off-by: Takashi Iwai - ---- - drivers/video/bootsplash/render.c | 19 +++++-------------- - 1 file changed, 5 insertions(+), 14 deletions(-) - ---- a/drivers/video/bootsplash/render.c -+++ b/drivers/video/bootsplash/render.c -@@ -210,11 +210,7 @@ void splashcopy(u8 *dst, u8 *src, int he - union pt p, q; - p.ul = (u32 *)dst; - q.ul = (u32 *)src; -- for (i=0; i < width/8; i++) { -- fb_writel(*q.ul++,p.ul++); -- fb_writel(*q.ul++,p.ul++); -- } -- if (width & 4) -+ for (i = 0; i < width / 4; i++) - fb_writel(*q.ul++,p.ul++); - if (width & 2) - fb_writew(*q.us++,p.us++); -@@ -234,12 +230,8 @@ static void splashset(u8 *dst, int heigh - while (height-- > 0) { - union pt p; - p.ul = (u32 *)dst; -- if (octpp != 3) { -- for (i=0; i < width/8; i++) { -- fb_writel(bgx,p.ul++); -- fb_writel(bgx,p.ul++); -- } -- if (width & 4) -+ if (!(octpp & 1)) { -+ for (i = 0; i < width / 4; i++) - fb_writel(bgx,p.ul++); - if (width & 2) - fb_writew(bgx,p.us++); -@@ -248,7 +240,7 @@ static void splashset(u8 *dst, int heigh - dst += dstbytes; - } else { /* slow! */ - for (i=0; i < width; i++) -- fb_writeb((bgx >> ((i & 0x3) * 8)) && 0xff,p.ub++); -+ fb_writeb((bgx >> ((i % 3) * 8)) && 0xff,p.ub++); - } - } - } -@@ -398,8 +390,7 @@ int splash_cursor(struct fb_info *info, - void splash_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) - { - struct splash_data *sd; -- int octpp = (info->var.bits_per_pixel + 1) >> 3; -- unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * y + dx * octpp); -+ unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * y + dx * 2); - unsigned short *s = d + (dx - sx); - unsigned short *start = d; - unsigned short *ls = d; diff --git a/patches.suse/bootsplash-keep-multiple-data b/patches.suse/bootsplash-keep-multiple-data deleted file mode 100644 index 30f8d6a..0000000 --- a/patches.suse/bootsplash-keep-multiple-data +++ /dev/null @@ -1,321 +0,0 @@ -From: Takashi Iwai -Subject: Keep multiple splash screens for KMS -Patch-mainline: Never -References: bnc#570082 - -Keep multiple splash screens for reloading splash again when KMS is -kicked off. - -Signed-off-by: Takashi Iwai - ---- - drivers/video/bootsplash/bootsplash.c | 148 ++++++++++++++++++++++------------ - drivers/video/bootsplash/decode-jpg.c | 7 - - drivers/video/bootsplash/decode-jpg.h | 2 - drivers/video/console/fbcon.c | 11 ++ - drivers/video/console/fbcon.h | 1 - 5 files changed, 115 insertions(+), 54 deletions(-) - ---- a/drivers/video/bootsplash/bootsplash.c -+++ b/drivers/video/bootsplash/bootsplash.c -@@ -331,12 +331,14 @@ static int splash_check_jpeg(unsigned ch - - static void splash_free(struct vc_data *vc, struct fb_info *info) - { -+ struct splash_data *sd; -+ struct splash_data *next; - SPLASH_DEBUG(); -- if (!vc->vc_splash_data) -- return; -- if (vc->vc_splash_data->splash_silentjpeg) -- vfree(vc->vc_splash_data->splash_sboxes); -- vfree(vc->vc_splash_data); -+ for (sd = vc->vc_splash_data; sd; sd = next) { -+ next = sd->next; -+ vfree(sd->splash_sboxes); -+ vfree(sd); -+ } - vc->vc_splash_data = 0; - info->splash_data = 0; - } -@@ -418,6 +420,32 @@ static inline int splash_geti(unsigned c - pos[off] | pos[off + 1] << 8 | pos[off + 2] << 16 | pos[off + 3] << 24; - } - -+/* move the given splash_data to the current one */ -+static void splash_pivot_current(struct vc_data *vc, struct splash_data *new) -+{ -+ struct splash_data *sd; -+ int state, percent, silent; -+ -+ sd = vc->vc_splash_data; -+ if (!sd || sd == new) -+ return; -+ state = sd->splash_state; -+ percent = sd->splash_percent; -+ silent = sd->splash_dosilent; -+ for (; sd->next; sd = sd->next) { -+ if (sd->next == new) { -+ sd->next = new->next; -+ new->next = vc->vc_splash_data; -+ vc->vc_splash_data = new; -+ /* copy the current states */ -+ new->splash_state = state; -+ new->splash_percent = percent; -+ new->splash_dosilent = silent; -+ return; -+ } -+ } -+} -+ - static int splash_getraw(unsigned char *start, unsigned char *end, int *update) - { - unsigned char *ndata; -@@ -434,6 +462,8 @@ static int splash_getraw(unsigned char * - struct vc_data *vc; - struct fb_info *info; - struct splash_data *sd; -+ struct splash_data *splash_found = NULL; -+ int unit_found = -1; - int oldpercent, oldsilent; - - if (update) -@@ -442,6 +472,8 @@ static int splash_getraw(unsigned char * - if (!update || start[7] < '2' || start[7] > '3' || splash_geti(start, 12) != (int)0xffffffff) - printk(KERN_INFO "bootsplash %s: looking for picture...\n", SPLASH_VERSION); - -+ oldpercent = -1; -+ oldsilent = -1; - for (ndata = start; ndata < end; ndata++) { - if (ndata[0] != 'B' || ndata[1] != 'O' || ndata[2] != 'O' || ndata[3] != 'T') - continue; -@@ -522,12 +554,6 @@ static int splash_getraw(unsigned char * - printk(KERN_ERR "bootsplash: ...found, but truncated!\n"); - return -1; - } -- if (!jpeg_check_size(ndata + len + boxcnt * 12 + palcnt, width, height)) { -- ndata += len + splash_size - 1; -- continue; -- } -- if (splash_check_jpeg(ndata + len + boxcnt * 12 + palcnt, width, height, info->var.bits_per_pixel)) -- return -1; - silentsize = splash_geti(ndata, SPLASH_OFF_SSIZE); - if (silentsize) - printk(KERN_INFO "bootsplash: silentjpeg size %d bytes\n", silentsize); -@@ -543,32 +569,21 @@ static int splash_getraw(unsigned char * - silentsize = 0; - } - sboxcnt = splash_gets(ndata, SPLASH_OFF_SBOXCNT); -- if (silentsize) { -- unsigned char *simage = ndata + len + splash_size + 12 * sboxcnt; -- if (!jpeg_check_size(simage, width, height) || -- splash_check_jpeg(simage, width, height, info->var.bits_per_pixel)) { -- printk(KERN_WARNING "bootsplash: error in silent jpeg.\n"); -- silentsize = 0; -- } -- } -- oldpercent = -1; -- oldsilent = -1; - if (vc->vc_splash_data) { - oldpercent = vc->vc_splash_data->splash_percent; - oldsilent = vc->vc_splash_data->splash_dosilent; -- splash_free(vc, info); - } -- vc->vc_splash_data = sd = vmalloc(sizeof(*sd) + splash_size + (version < 3 ? 2 * 12 : 0)); -+ sd = vmalloc(sizeof(*sd) + splash_size + (version < 3 ? 2 * 12 : 0)); - if (!sd) - break; -- sd->splash_silentjpeg = 0; -- sd->splash_sboxes = 0; -- sd->splash_sboxcount = 0; -+ memset(sd, 0, sizeof(*sd)); -+ jpeg_get_size(ndata + len + boxcnt * 12 + palcnt, -+ &sd->splash_width, &sd->splash_height); - if (silentsize) { - sd->splash_silentjpeg = vmalloc(silentsize); - if (sd->splash_silentjpeg) { - memcpy(sd->splash_silentjpeg, ndata + len + splash_size, silentsize); -- sd->splash_sboxes = vc->vc_splash_data->splash_silentjpeg; -+ sd->splash_sboxes = sd->splash_silentjpeg; - sd->splash_silentjpeg += 12 * sboxcnt; - sd->splash_sboxcount = sboxcnt; - } -@@ -591,22 +606,6 @@ static int splash_getraw(unsigned char * - sd->splash_fg_color = (splash_default >> 4) & 0x0f; - sd->splash_state = splash_default & 1; - } -- if (sd->splash_text_xo + sd->splash_text_wi > width || sd->splash_text_yo + sd->splash_text_he > height) { -- splash_free(vc, info); -- printk(KERN_ERR "bootsplash: found, but has oversized text area!\n"); -- return -1; -- } -- if (!vc_cons[unit].d || info->fbops->fb_imageblit != cfb_imageblit) { -- splash_free(vc, info); -- printk(KERN_ERR "bootsplash: found, but framebuffer can't handle it!\n"); -- return -1; -- } -- printk(KERN_INFO "bootsplash: ...found (%dx%d, %d bytes, v%d).\n", width, height, splash_size, version); -- if (version == 1) { -- printk(KERN_WARNING "bootsplash: Using deprecated v1 header. Updating your splash utility recommended.\n"); -- printk(KERN_INFO "bootsplash: Find the latest version at http://www.bootsplash.org/\n"); -- } -- - /* fake penguin box for older formats */ - if (version == 1) - boxcnt = splash_mkpenguin(sd, sd->splash_text_xo + 10, sd->splash_text_yo + 10, sd->splash_text_wi - 20, sd->splash_text_he - 20, 0xf0, 0xf0, 0xf0); -@@ -620,8 +619,38 @@ static int splash_getraw(unsigned char * - sd->splash_jpeg = sd->splash_palette + palcnt; - sd->splash_palcnt = palcnt / 3; - sd->splash_dosilent = sd->splash_silentjpeg != 0 ? (oldsilent == -1 ? 1 : oldsilent) : 0; -- return unit; -+ -+ sd->next = vc->vc_splash_data; -+ vc->vc_splash_data = sd; -+ -+ if (sd->splash_width != width || sd->splash_height != height) { -+ ndata += len + splash_size - 1; -+ continue; -+ } -+ if (splash_check_jpeg(ndata + len + boxcnt * 12 + palcnt, width, height, info->var.bits_per_pixel)) { -+ ndata += len + splash_size - 1; -+ continue; -+ } -+ if (!vc_cons[unit].d || info->fbops->fb_imageblit != cfb_imageblit) { -+ splash_free(vc, info); -+ printk(KERN_ERR "bootsplash: found, but framebuffer can't handle it!\n"); -+ return -1; -+ } -+ printk(KERN_INFO "bootsplash: ...found (%dx%d, %d bytes, v%d).\n", width, height, splash_size, version); -+ if (version == 1) { -+ printk(KERN_WARNING "bootsplash: Using deprecated v1 header. Updating your splash utility recommended.\n"); -+ printk(KERN_INFO "bootsplash: Find the latest version at http://www.bootsplash.org/\n"); -+ } -+ -+ splash_found = sd; -+ unit_found = unit; -+ } -+ -+ if (splash_found) { -+ splash_pivot_current(vc, splash_found); -+ return unit_found; - } -+ - printk(KERN_ERR "bootsplash: ...no good signature found.\n"); - return -1; - } -@@ -696,6 +725,20 @@ static void splash_off(struct fb_info *i - info->splash_pic_size = 0; - } - -+/* look for the splash with the matching size and set it as the current */ -+static int splash_look_for_jpeg(struct vc_data *vc, int width, int height) -+{ -+ struct splash_data *sd; -+ -+ for (sd = vc->vc_splash_data; sd; sd = sd->next) { -+ if (sd->splash_width == width && sd->splash_height == height) { -+ splash_pivot_current(vc, sd); -+ return 0; -+ } -+ } -+ return -1; -+} -+ - int splash_prepare(struct vc_data *vc, struct fb_info *info) - { - int err; -@@ -719,6 +762,12 @@ int splash_prepare(struct vc_data *vc, s - splash_off(info); - return -2; - } -+ if (splash_look_for_jpeg(vc, width, height) < 0) { -+ printk(KERN_INFO "bootsplash: no matching splash %dx%d\n", -+ width, height); -+ splash_off(info); -+ return -2; -+ } - - sbytes = ((width + 15) & ~15) * octpp; - size = sbytes * ((height + 15) & ~15); -@@ -1007,11 +1056,14 @@ static int splash_write_proc(struct file - if (!strncmp(buffer,"freesilent\n",11)) { - SPLASH_DEBUG( " freesilent"); - if (vc->vc_splash_data && vc->vc_splash_data->splash_silentjpeg) { -+ struct splash_data *sd; - printk(KERN_INFO "bootsplash: freeing silent jpeg\n"); -- vc->vc_splash_data->splash_silentjpeg = 0; -- vfree(vc->vc_splash_data->splash_sboxes); -- vc->vc_splash_data->splash_sboxes = 0; -- vc->vc_splash_data->splash_sboxcount = 0; -+ for (sd = vc->vc_splash_data; sd; sd = sd->next) { -+ sd->splash_silentjpeg = 0; -+ vfree(sd->splash_sboxes); -+ sd->splash_sboxes = 0; -+ sd->splash_sboxcount = 0; -+ } - if (vc->vc_splash_data->splash_dosilent) { - splash_status(vc); - } ---- a/drivers/video/bootsplash/decode-jpg.c -+++ b/drivers/video/bootsplash/decode-jpg.c -@@ -240,7 +240,7 @@ static int dec_checkmarker(void) - return 0; - } - --int jpeg_check_size(unsigned char *buf, int width, int height) -+void jpeg_get_size(unsigned char *buf, int *width, int *height) - { - datap = buf; - getbyte(); -@@ -248,9 +248,8 @@ int jpeg_check_size(unsigned char *buf, - readtables(M_SOF0); - getword(); - getbyte(); -- if (height != getword() || width != getword()) -- return 0; -- return 1; -+ *height = getword(); -+ *width = getword(); - } - - int jpeg_decode(buf, pic, width, height, depth, decdata) ---- a/drivers/video/bootsplash/decode-jpg.h -+++ b/drivers/video/bootsplash/decode-jpg.h -@@ -30,6 +30,6 @@ struct jpeg_decdata { - }; - - extern int jpeg_decode(unsigned char *, unsigned char *, int, int, int, struct jpeg_decdata *); --extern int jpeg_check_size(unsigned char *, int, int); -+extern void jpeg_get_size(unsigned char *, int *, int *); - - #endif ---- a/drivers/video/console/fbcon.c -+++ b/drivers/video/console/fbcon.c -@@ -2187,7 +2187,16 @@ static int fbcon_switch(struct vc_data * - ops = info->fbcon_par; - - #ifdef CONFIG_BOOTSPLASH -- splash_prepare(vc, info); -+ { -+ struct splash_data *prev_sd = vc->vc_splash_data; -+ splash_prepare(vc, info); -+ if (vc->vc_splash_data && vc->vc_splash_data->splash_state && -+ vc->vc_splash_data != prev_sd) { -+ vc_resize(vc, vc->vc_splash_data->splash_text_wi / vc->vc_font.width, -+ vc->vc_splash_data->splash_text_he / vc->vc_font.height); -+ con_remap_def_color(vc, vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color); -+ } -+ } - #endif - - if (softback_top) { ---- a/drivers/video/console/fbcon.h -+++ b/drivers/video/console/fbcon.h -@@ -50,6 +50,7 @@ struct splash_data { - unsigned char *splash_silentjpeg; - unsigned char *splash_sboxes; - int splash_sboxcount; -+ struct splash_data *next; - }; - #endif - diff --git a/patches.suse/bootsplash-scaler b/patches.suse/bootsplash-scaler deleted file mode 100644 index 9d249f0..0000000 --- a/patches.suse/bootsplash-scaler +++ /dev/null @@ -1,1282 +0,0 @@ -From: Egbert Eich -Subject: Add bootsplash image scaler -Patch-mainline: Never -References: bnc#570082 - -The initrd most often contains a single fixed size boot image which is of the -size of the VESA framebuffer used for boot. If the size of the framebuffer -changes when KMS is initialized for example the boot splash is turned off. -Takashi Iwai has provided a patch which allows to add multiple images to initrd -so that the kernel can select the appropriate size. This is only feasible for -mobile devices with a build in panel of a fixed resolution in which case one -would have to ship two images at the most: the one of the VESA resolution at -boot time and the one of the panel as used by KMS. -The attached patch adds a boot splash scaler which allows the down scaling of -bootsplash image which is bigger than the screen size. This way by supplying a -single image large enough to accomodate the largest screens possible all -resolutions can be derived from it. - -Acked-by: Michal Marek - ---- - drivers/video/bootsplash/bootsplash.c | 855 +++++++++++++++++++++++++++++++--- - drivers/video/bootsplash/decode-jpg.c | 4 - drivers/video/bootsplash/render.c | 16 - drivers/video/console/fbcon.h | 11 - include/linux/fb.h | 3 - 5 files changed, 807 insertions(+), 82 deletions(-) - ---- a/drivers/video/bootsplash/bootsplash.c -+++ b/drivers/video/bootsplash/bootsplash.c -@@ -6,6 +6,7 @@ - * Stefan Reinauer, , - * Steffen Winterfeldt, , - * Michael Schroeder -+ * 2009, 2010 Egbert Eich - * - * Ideas & SuSE screen work by Ken Wimer, - * -@@ -55,7 +56,9 @@ - "wrong marker", - "no EOI", - "bad tables", -- "depth mismatch" -+ "depth mismatch", -+ "scale error", -+ "out of memory" - }; - - static struct jpeg_decdata *decdata = 0; /* private decoder data */ -@@ -64,7 +67,9 @@ - static int splash_usesilent = 0; /* shall we display the silentjpeg? */ - int splash_default = 0xf01; - --static int splash_check_jpeg(unsigned char *jpeg, int width, int height, int depth); -+static int jpeg_get(unsigned char *buf, unsigned char *pic, int width, int height, int depth, -+ struct jpeg_decdata *decdata); -+static int splash_look_for_jpeg(struct vc_data *vc, int width, int height); - - static int __init splash_setup(char *options) - { -@@ -120,7 +125,8 @@ - return 12; - } - --static void boxit(unsigned char *pic, int bytes, unsigned char *buf, int num, int percent, int overpaint, int octpp) -+static void boxit(unsigned char *pic, int bytes, unsigned char *buf, int num, -+ int percent, int xoff, int yoff, int overpaint, int octpp) - { - int x, y, p, doblend, r, g, b, a, add; - unsigned int i = 0; -@@ -245,7 +251,7 @@ - } - add = (xs & 1); - add ^= (add ^ y) & 1 ? 1 : 3; /* 2x2 ordered dithering */ -- picp.ub = (pic + xs * octpp + y * bytes); -+ picp.ub = (pic + (xs + xoff) * octpp + (y + yoff) * bytes); - for (x = xs; x <= xe; x++) { - if (!(sti & 0x80000000)) { - sti <<= 1; -@@ -310,19 +316,172 @@ - } - } - -+static void box_offsets(unsigned char *buf, int num, -+ int screen_w, int screen_h, int pic_w, int pic_h, -+ int *x_off, int *y_off) -+{ -+ int a, doblend; -+ int x_min = pic_w, x_max = 0; -+ int y_min = pic_h, y_max = 0; -+ unsigned int i = 0; -+ unsigned short data1[4]; -+ unsigned char cols1[16]; -+ unsigned short data2[4]; -+ unsigned char cols2[16]; -+ unsigned char *bufend; -+ unsigned int stin, stinn, stixs, stixe, stiys, stiye; -+ int xs, xe, ys, ye; -+ -+ SPLASH_DEBUG(); -+ -+ if ((screen_w == pic_w && screen_h == pic_h) || num == 0) -+ *x_off = *y_off = 0; -+ -+ bufend = buf + num * 12; -+ stin = 1; -+ stinn = 0; -+ stixs = stixe = 0; -+ stiys = stiye = 0; -+ -+ while(buf < bufend) { -+ doblend = 0; -+ buf += boxextract(buf, data1, cols1, &doblend); -+ if (data1[0] == 32767 && data1[1] == 32767) { -+ /* box stipple */ -+ if (stinn == 32) -+ continue; -+ if (stinn == 0) { -+ stixs = data1[2]; -+ stixe = data1[3]; -+ stiys = stiye = 0; -+ } else if (stinn == 4) { -+ stiys = data1[2]; -+ stiye = data1[3]; -+ } -+ stin = stinn; -+ continue; -+ } -+ stinn = 0; -+ if (data1[0] > 32767) -+ buf += boxextract(buf, data2, cols2, &doblend); -+ if (data1[0] == 32767 && data1[1] == 32766) { -+ /* box copy */ -+ i = 12 * (short)data1[3]; -+ doblend = 0; -+ i += boxextract(buf + i, data1, cols1, &doblend); -+ if (data1[0] > 32767) -+ boxextract(buf + i, data2, cols2, &doblend); -+ } -+ if (data1[0] == 32767) -+ continue; -+ if (data1[2] > 32767) { -+ data1[2] = ~data1[2]; -+ } -+ if (data1[3] > 32767) { -+ data1[3] = ~data1[3]; -+ } -+ if (data1[0] > 32767) { -+ data1[0] = ~data1[0]; -+ for (i = 0; i < 4; i++) -+ data1[i] = (data1[i] * (65536 - 1) + data2[i] * 1) >> 16; -+ } -+ *(unsigned int *)cols2 = *(unsigned int *)cols1; -+ a = cols2[3]; -+ if (a == 0 && !doblend) -+ continue; -+ -+ if (stixs >= 32768) { -+ xs = (stixs ^ 65535) + data1[0]; -+ xe = stixe ? stixe + data1[0] : data1[2]; -+ } else if (stixe >= 32768) { -+ xs = stixs ? data1[2] - stixs : data1[0]; -+ xe = data1[2] - (stixe ^ 65535); -+ } else { -+ xs = stixs; -+ xe = stixe ? stixe : data1[2]; -+ } -+ if (stiys >= 32768) { -+ ys = (stiys ^ 65535) + data1[1]; -+ ye = stiye ? stiye + data1[1] : data1[3]; -+ } else if (stiye >= 32768) { -+ ys = stiys ? data1[3] - stiys : data1[1]; -+ ye = data1[3] - (stiye ^ 65535); -+ } else { -+ ys = stiys; -+ ye = stiye ? stiye : data1[3]; -+ } -+ if (xs < data1[0]) -+ xs = data1[0]; -+ if (xe > data1[2]) -+ xe = data1[2]; -+ if (ys < data1[1]) -+ ys = data1[1]; -+ if (ye > data1[3]) -+ ye = data1[3]; -+ -+ if (xs < x_min) -+ x_min = xs; -+ if (xe > x_max) -+ x_max = xe; -+ if (ys < y_min) -+ y_min = ys; -+ if (ye > y_max) -+ y_max = ye; -+ } -+ { -+ int x_center = (x_min + x_max) / 2; -+ int y_center = (y_min + y_max) / 2; -+ -+ if (screen_w == pic_w) -+ *x_off = 0; -+ else { -+ if (x_center < (pic_w + pic_w / 10) >> 1 && x_center > (pic_w - pic_w / 10) >> 1) -+ *x_off = (screen_w - pic_w) >> 1; -+ else { -+ int x = x_center * screen_w / pic_w; -+ *x_off = x - x_center; -+ if (x_min + x_off > 0) -+ *x_off = 0; -+ if (x_max + *x_off > screen_w) -+ *x_off = screen_w - pic_w; -+ } -+ } -+ if (screen_h == pic_h) -+ *y_off = 0; -+ else { -+ if (y_center < (pic_h + pic_h / 10) >> 1 && y_center > (pic_h - pic_h / 10) >> 1) -+ *y_off = (screen_h - pic_h) >> 1; -+ else { -+ int x = y_center * screen_h / pic_h; -+ *y_off = x - y_center; -+ if (y_min + y_off > 0) -+ *y_off = 0; -+ if (y_max + *x_off > screen_h) -+ *y_off = screen_h - pic_h; -+ } -+ } -+ } -+} -+ - static int splash_check_jpeg(unsigned char *jpeg, int width, int height, int depth) - { - int size, err; - unsigned char *mem; - -- size = ((width + 15) & ~15) * ((height + 15) & ~15) * (depth >> 3); -+ size = ((width + 15) & ~15) * ((height + 15) & ~15) * ((depth + 1) >> 3); - mem = vmalloc(size); - if (!mem) { - printk(KERN_INFO "bootsplash: no memory for decoded picture.\n"); - return -1; - } -- if (!decdata) -- decdata = vmalloc(sizeof(*decdata)); -+ if (!decdata) { -+ decdata = vmalloc(sizeof(*decdata)); -+ if (!decdata) { -+ printk(KERN_INFO "bootsplash: not enough memory.\n"); -+ vfree(mem); -+ return -1; -+ } -+ } - if ((err = jpeg_decode(jpeg, mem, ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) - printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d)\n",jpg_errors[err - 1], err); - vfree(mem); -@@ -337,6 +496,7 @@ - for (sd = vc->vc_splash_data; sd; sd = next) { - next = sd->next; - vfree(sd->splash_sboxes); -+ vfree(sd->splash_pic); - vfree(sd); - } - vc->vc_splash_data = 0; -@@ -432,6 +592,11 @@ - state = sd->splash_state; - percent = sd->splash_percent; - silent = sd->splash_dosilent; -+ vfree(sd->splash_pic); -+ sd->splash_pic_size = 0; -+ sd->splash_pic = NULL; -+ sd->splash_text_wi = sd->splash_jpg_text_wi; -+ sd->splash_text_he = sd->splash_jpg_text_he; - for (; sd->next; sd = sd->next) { - if (sd->next == new) { - sd->next = new->next; -@@ -441,6 +606,17 @@ - new->splash_state = state; - new->splash_percent = percent; - new->splash_dosilent = silent; -+ new->splash_text_wi = new->splash_jpg_text_wi; -+ new->splash_text_he = new->splash_jpg_text_he; -+ -+ vfree(new->splash_pic); -+ new->splash_pic = NULL; -+ new->splash_pic_size = 0; -+ -+ new->splash_boxes_xoff = 0; -+ new->splash_boxes_yoff = 0; -+ new->splash_sboxes_xoff = 0; -+ new->splash_sboxes_yoff = 0; - return; - } - } -@@ -459,7 +635,7 @@ - int palcnt; - int i, len; - const int *offsets; -- struct vc_data *vc; -+ struct vc_data *vc = NULL; - struct fb_info *info; - struct splash_data *sd; - struct splash_data *splash_found = NULL; -@@ -489,7 +665,16 @@ - vc_allocate(unit); - } - vc = vc_cons[unit].d; -+ if (!vc) -+ continue; -+ - info = registered_fb[(int)con2fb_map[unit]]; -+ -+ if (info->fbops->fb_imageblit != cfb_imageblit) { -+ splash_free(vc, info); -+ printk(KERN_ERR "bootsplash: found, but framebuffer can't handle it!\n"); -+ return -1; -+ } - width = info->var.xres; - height = info->var.yres; - splash_size = splash_geti(ndata, SPLASH_OFF_SIZE); -@@ -579,6 +764,12 @@ - memset(sd, 0, sizeof(*sd)); - jpeg_get_size(ndata + len + boxcnt * 12 + palcnt, - &sd->splash_width, &sd->splash_height); -+ if (splash_check_jpeg(ndata + len + boxcnt * 12 + palcnt, -+ sd->splash_width, sd->splash_height, info->var.bits_per_pixel)) { -+ ndata += len + splash_size - 1; -+ vfree(sd); -+ continue; -+ } - if (silentsize) { - sd->splash_silentjpeg = vmalloc(silentsize); - if (sd->splash_silentjpeg) { -@@ -596,6 +787,8 @@ - sd->splash_text_yo = splash_gets(ndata, SPLASH_OFF_YO); - sd->splash_text_wi = splash_gets(ndata, SPLASH_OFF_WI); - sd->splash_text_he = splash_gets(ndata, SPLASH_OFF_HE); -+ sd->splash_pic = NULL; -+ sd->splash_pic_size = 0; - sd->splash_percent = oldpercent == -1 ? splash_gets(ndata, SPLASH_OFF_PERCENT) : oldpercent; - if (version == 1) { - sd->splash_text_xo *= 8; -@@ -606,6 +799,9 @@ - sd->splash_fg_color = (splash_default >> 4) & 0x0f; - sd->splash_state = splash_default & 1; - } -+ sd->splash_jpg_text_wi = sd->splash_text_wi; -+ sd->splash_jpg_text_he = sd->splash_text_he; -+ - /* fake penguin box for older formats */ - if (version == 1) - boxcnt = splash_mkpenguin(sd, sd->splash_text_xo + 10, sd->splash_text_yo + 10, sd->splash_text_wi - 20, sd->splash_text_he - 20, 0xf0, 0xf0, 0xf0); -@@ -627,15 +823,6 @@ - ndata += len + splash_size - 1; - continue; - } -- if (splash_check_jpeg(ndata + len + boxcnt * 12 + palcnt, width, height, info->var.bits_per_pixel)) { -- ndata += len + splash_size - 1; -- continue; -- } -- if (!vc_cons[unit].d || info->fbops->fb_imageblit != cfb_imageblit) { -- splash_free(vc, info); -- printk(KERN_ERR "bootsplash: found, but framebuffer can't handle it!\n"); -- return -1; -- } - printk(KERN_INFO "bootsplash: ...found (%dx%d, %d bytes, v%d).\n", width, height, splash_size, version); - if (version == 1) { - printk(KERN_WARNING "bootsplash: Using deprecated v1 header. Updating your splash utility recommended.\n"); -@@ -649,6 +836,16 @@ - if (splash_found) { - splash_pivot_current(vc, splash_found); - return unit_found; -+ } else { -+ vc = vc_cons[0].d; -+ if (vc) { -+ info = registered_fb[(int)con2fb_map[0]]; -+ width = info->var.xres; -+ height = info->var.yres; -+ if (!splash_look_for_jpeg(vc, width, height)) -+ return -1; -+ return 0; -+ } - } - - printk(KERN_ERR "bootsplash: ...no good signature found.\n"); -@@ -715,27 +912,71 @@ - return 0; - } - --static void splash_off(struct fb_info *info) -+static void splash_off(struct vc_data *vc,struct fb_info *info) - { -+ int rows = info->var.xres / vc->vc_font.width; -+ int cols = info->var.yres / vc->vc_font.height; - SPLASH_DEBUG(); -+ - info->splash_data = 0; -- if (info->splash_pic) -- vfree(info->splash_pic); -- info->splash_pic = 0; -- info->splash_pic_size = 0; -+ if (rows != vc->vc_rows || cols != vc->vc_cols) -+ vc_resize(vc, rows, cols); -+ if (vc->vc_def_color != 0x07) -+ con_remap_def_color(vc, 0x07); - } - - /* look for the splash with the matching size and set it as the current */ - static int splash_look_for_jpeg(struct vc_data *vc, int width, int height) - { -- struct splash_data *sd; -+ struct splash_data *sd, *found = NULL; -+ int found_delta_x = INT_MAX, found_delta_y = INT_MAX; - - for (sd = vc->vc_splash_data; sd; sd = sd->next) { -- if (sd->splash_width == width && sd->splash_height == height) { -- splash_pivot_current(vc, sd); -- return 0; -+ int delta_x = abs(sd->splash_width - width) * height; -+ int delta_y = abs(sd->splash_height - height) * width; -+ if (!found || (found_delta_x + found_delta_y > delta_x + delta_y)) { -+ found = sd; -+ found_delta_x = delta_x; -+ found_delta_y = delta_y; - } - } -+ -+ if (found) { -+ SPLASH_DEBUG("bootsplash: scalable image found (%dx%d scaled to %dx%d).", -+ found->splash_width, found->splash_height, width, height); -+ -+ splash_pivot_current(vc, found); -+ -+ /* textarea margins are constant independent from image size */ -+ if (found->splash_height != height) -+ found->splash_text_he = height - (found->splash_height - found->splash_jpg_text_he); -+ else -+ found->splash_text_he = found->splash_jpg_text_he; -+ if (found->splash_width != width) -+ found->splash_text_wi = width - (found->splash_width - found->splash_jpg_text_wi); -+ else -+ found->splash_text_wi = found->splash_jpg_text_wi; -+ -+ if (found->splash_width != width || found->splash_height != height) { -+ box_offsets(found->splash_boxes, found->splash_boxcount, -+ width, height, found->splash_width, found->splash_height, -+ &found->splash_boxes_xoff, &found->splash_boxes_yoff); -+ SPLASH_DEBUG("bootsplash: offsets for boxes: x=%d y=%d", -+ found->splash_boxes_xoff,found->splash_boxes_yoff); -+ -+ if (found->splash_sboxes) { -+ box_offsets(found->splash_sboxes, found->splash_sboxcount, -+ width, height, found->splash_width, found->splash_height, -+ &found->splash_sboxes_xoff, &found->splash_sboxes_yoff); -+ SPLASH_DEBUG("bootsplash: offsets sboxes: x=%d y=%d", -+ found->splash_sboxes_xoff,found->splash_sboxes_yoff); -+ } -+ } else { -+ found->splash_sboxes_xoff = 0; -+ found->splash_sboxes_yoff = 0; -+ } -+ return 0; -+ } - return -1; - } - -@@ -743,13 +984,14 @@ - { - int err; - int width, height, depth, octpp, size, sbytes; -+ int pic_update = 0; - - SPLASH_DEBUG("vc_num: %i", vc->vc_num); - if (!vc->vc_splash_data || !vc->vc_splash_data->splash_state) { - if (decdata) - vfree(decdata); - decdata = 0; -- splash_off(info); -+ splash_off(vc,info); - return -1; - } - -@@ -759,52 +1001,62 @@ - octpp = (depth + 1) >> 3; - - if (depth == 24 || depth < 15) { /* Other targets might need fixing */ -- splash_off(info); -+ splash_off(vc,info); - return -2; - } - if (splash_look_for_jpeg(vc, width, height) < 0) { - printk(KERN_INFO "bootsplash: no matching splash %dx%d\n", - width, height); -- splash_off(info); -+ splash_off(vc,info); - return -2; - } - - sbytes = ((width + 15) & ~15) * octpp; - size = sbytes * ((height + 15) & ~15); -- if (size != info->splash_pic_size) { -- vfree(info->splash_pic); -- info->splash_pic = NULL; -- } -- if (!info->splash_pic) -- info->splash_pic = vmalloc(size); - -- if (!info->splash_pic) { -+ if (size != vc->vc_splash_data->splash_pic_size) { -+ vfree(vc->vc_splash_data->splash_pic); -+ vc->vc_splash_data->splash_pic = NULL; -+ } -+ if (!vc->vc_splash_data->splash_pic) { -+ vc->vc_splash_data->splash_pic = vmalloc(size); -+ pic_update = 1; -+ } -+ if (!vc->vc_splash_data->splash_pic) { - printk(KERN_INFO "bootsplash: not enough memory.\n"); -- splash_off(info); -+ splash_off(vc,info); - return -3; - } - -- if (!decdata) -+ if (!decdata) { - decdata = vmalloc(sizeof(*decdata)); -+ if (!decdata) { -+ printk(KERN_INFO "bootsplash: not enough memory.\n"); -+ splash_off(vc,info); -+ return -3; -+ } -+ } - - if (vc->vc_splash_data->splash_silentjpeg && vc->vc_splash_data->splash_dosilent) { -- /* fill area after framebuffer with other jpeg */ -- if ((err = jpeg_decode(vc->vc_splash_data->splash_silentjpeg, info->splash_pic, -- ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) { -+ pic_update = 1; -+ if ((err = jpeg_get(vc->vc_splash_data->splash_silentjpeg, vc->vc_splash_data->splash_pic, -+ width, height, depth, decdata))) { - printk(KERN_INFO "bootsplash: error while decompressing silent picture: %s (%d)\n", - jpg_errors[err - 1], err); - vc->vc_splash_data->splash_dosilent = 0; - } else { - if (vc->vc_splash_data->splash_sboxcount) -- boxit(info->splash_pic, -+ boxit(vc->vc_splash_data->splash_pic, - sbytes, - vc->vc_splash_data->splash_sboxes, - vc->vc_splash_data->splash_sboxcount, - vc->vc_splash_data->splash_percent, -+ vc->vc_splash_data->splash_sboxes_xoff, -+ vc->vc_splash_data->splash_sboxes_yoff, - 0, - octpp); - splashcopy(info->screen_base, -- info->splash_pic, -+ vc->vc_splash_data->splash_pic, - info->var.yres, - info->var.xres, - info->fix.line_length, sbytes, -@@ -813,27 +1065,43 @@ - } else - vc->vc_splash_data->splash_dosilent = 0; - -- if ((err = jpeg_decode(vc->vc_splash_data->splash_jpeg, info->splash_pic, -- ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) { -- printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d) .\n", -- jpg_errors[err - 1], err); -- splash_off(info); -- return -4; -+ if (pic_update) { -+ if ((err = jpeg_get(vc->vc_splash_data->splash_jpeg, vc->vc_splash_data->splash_pic, -+ width, height, depth, decdata))) { -+ printk(KERN_INFO "bootsplash: error while decompressing picture: %s (%d) .\n", -+ jpg_errors[err - 1], err); -+ splash_off(vc,info); -+ return -4; -+ } - } -- info->splash_pic_size = size; -- info->splash_pic_stride = sbytes; -+ -+ vc->vc_splash_data->splash_pic_size = size; -+ vc->vc_splash_data->splash_pic_stride = sbytes; -+ - if (vc->vc_splash_data->splash_boxcount) -- boxit(info->splash_pic, -+ boxit(vc->vc_splash_data->splash_pic, - sbytes, - vc->vc_splash_data->splash_boxes, - vc->vc_splash_data->splash_boxcount, - vc->vc_splash_data->splash_percent, -+ vc->vc_splash_data->splash_boxes_xoff, -+ vc->vc_splash_data->splash_boxes_yoff, - 0, - octpp); -- if (vc->vc_splash_data->splash_state) -+ if (vc->vc_splash_data->splash_state) { -+ int cols = vc->vc_splash_data->splash_text_wi / vc->vc_font.width; -+ int rows = vc->vc_splash_data->splash_text_he / vc->vc_font.height; -+ int color = vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color; - info->splash_data = vc->vc_splash_data; -- else { -- splash_off(info); -+ -+ /* vc_resize also calls con_switch which resets yscroll */ -+ if (rows != vc->vc_rows || cols != vc->vc_cols) -+ vc_resize(vc, cols, rows); -+ if (vc->vc_def_color != color) -+ con_remap_def_color(vc, color); -+ -+ } else { -+ splash_off(vc,info); - return -5; - } - return 0; -@@ -856,12 +1124,16 @@ - - static int splash_recolor(struct vc_data *vc) - { -+ int color; -+ - SPLASH_DEBUG(); - if (!vc->vc_splash_data) - return -1; - if (!vc->vc_splash_data->splash_state) - return 0; -- con_remap_def_color(vc, vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color); -+ color = vc->vc_splash_data->splash_color << 4 | vc->vc_splash_data->splash_fg_color; -+ if (vc->vc_def_color != color) -+ con_remap_def_color(vc, color); - if (fg_console == vc->vc_num) { - update_region(vc, - vc->vc_origin + vc->vc_size_row * vc->vc_top, -@@ -884,10 +1156,6 @@ - splash_prepare(vc, info); - if (vc->vc_splash_data && vc->vc_splash_data->splash_state) { - if (info->splash_data) { -- con_remap_def_color(vc, info->splash_data->splash_color << 4 | info->splash_data->splash_fg_color); -- /* vc_resize also calls con_switch which resets yscroll */ -- vc_resize(vc, info->splash_data->splash_text_wi / vc->vc_font.width, -- info->splash_data->splash_text_he / vc->vc_font.height); - if (fg_console == vc->vc_num) { - update_region(vc, - vc->vc_origin + vc->vc_size_row * vc->vc_top, -@@ -895,11 +1163,9 @@ - splash_clear_margins(vc, info, 0); - } - } -- } else { -- /* Switch bootsplash off */ -- con_remap_def_color(vc, 0x07); -- vc_resize(vc, info->var.xres / vc->vc_font.width, info->var.yres / vc->vc_font.height); -- } -+ } else -+ splash_off(vc,info); -+ - return 0; - } - -@@ -956,10 +1222,9 @@ - || pe < oldpe) { - if (splash_hasinter(vc->vc_splash_data->splash_boxes, - vc->vc_splash_data->splash_boxcount)) { -- splash_status(vc); -- } -- else -- splash_prepare(vc, info); -+ splash_status(vc); -+ } else -+ splash_prepare(vc, info); - } else { - int octpp = (info->var.bits_per_pixel + 1) >> 3; - if (info->splash_data) { -@@ -970,6 +1235,8 @@ - info->splash_data->splash_sboxes, - info->splash_data->splash_sboxcount, - info->splash_data->splash_percent, -+ info->splash_data->splash_sboxes_xoff, -+ info->splash_data->splash_sboxes_yoff, - 1, - octpp); - #if 0 -@@ -979,6 +1246,8 @@ - info->splash_data->splash_boxes, - info->splash_data->splash_boxcount, - info->splash_data->splash_percent, -+ info->splash_data->splash_boxes_xoff, -+ info->splash_data->splash_boxes_yoff, - 1, - octpp); - #endif -@@ -1100,6 +1369,8 @@ - info->splash_data->splash_sboxes, - info->splash_data->splash_sboxcount, - info->splash_data->splash_percent, -+ info->splash_data->splash_sboxes_xoff, -+ info->splash_data->splash_sboxes_yoff, - 1, - octpp); - } else if ((up & 1) != 0) { -@@ -1108,6 +1379,8 @@ - info->splash_data->splash_boxes, - info->splash_data->splash_boxcount, - info->splash_data->splash_percent, -+ info->splash_data->splash_boxes_xoff, -+ info->splash_data->splash_boxes_yoff, - 1, - octpp); - } -@@ -1226,3 +1499,447 @@ - return; - } - -+#define SPLASH_ALIGN 15 -+ -+static u32 *do_coefficients(u32 from, u32 to, u32 *shift) -+{ -+ u32 *coefficients; -+ u32 left = to; -+ int n = 1; -+ u32 upper = 31; -+ int col_cnt = 0; -+ int row_cnt = 0; -+ int m; -+ u32 rnd = from >> 1; -+ -+ if (from > to) { -+ left = to; -+ rnd = from >> 1; -+ -+ while (upper > 0) { -+ if ((1 << upper) & from) -+ break; -+ upper--; -+ } -+ upper++; -+ -+ *shift = 32 - 8 - 1 - upper; -+ -+ coefficients = vmalloc(sizeof (u32) * (from / to + 2) * from + 1); -+ if (!coefficients) -+ return NULL; -+ -+ n = 1; -+ while (1) { -+ u32 sum = left; -+ col_cnt = 0; -+ m = n++; -+ while (sum < from) { -+ coefficients[n++] = ((left << *shift) + rnd) / from; -+ col_cnt++; -+ left = to; -+ sum += left; -+ } -+ left = sum - from; -+ coefficients[n++] = (((to - left) << *shift) + rnd) / from; -+ col_cnt++; -+ coefficients[m] = col_cnt; -+ row_cnt++; -+ if (!left) { -+ coefficients[0] = row_cnt; -+ return coefficients; -+ } -+ } -+ } else { -+ left = 0; -+ rnd = to >> 1; -+ -+ while (upper > 0) { -+ if ((1 << upper) & to) -+ break; -+ upper--; -+ } -+ upper++; -+ -+ *shift = 32 - 8 - 1 - upper; -+ -+ coefficients = vmalloc(sizeof (u32) * 3 * from + 1); -+ if (!coefficients) -+ return NULL; -+ -+ while (1) { -+ u32 diff; -+ u32 sum = left; -+ col_cnt = 0; -+ row_cnt++; -+ while (sum < to) { -+ col_cnt++; -+ sum += from; -+ } -+ left = sum - to; -+ diff = from - left; -+ if (!left) { -+ coefficients[n] = col_cnt; -+ coefficients[0] = row_cnt; -+ return coefficients; -+ } -+ coefficients[n++] = col_cnt - 1; -+ coefficients[n++] = ((diff << *shift) + rnd) / from; -+ coefficients[n++] = ((left << *shift) + rnd) / from; -+ } -+ } -+} -+ -+ -+struct pixel -+{ -+ u32 red; -+ u32 green; -+ u32 blue; -+}; -+ -+#define put_pixel(pix, buf, depth) \ -+ switch (depth) { \ -+ case 15: \ -+ *(u16 *)(buf) = (u16)((pix).red << 10 | (pix).green << 5 | (pix).blue); \ -+ (buf) += 2; \ -+ break; \ -+ case 16: \ -+ *(u16 *)(buf) = (u16)((pix).red << 11 | (pix).green << 5 | (pix).blue); \ -+ (buf) += 2; \ -+ break; \ -+ case 24: \ -+ *(u16 *)(buf) = (u16)((pix).red << 8 | (pix).green); \ -+ buf += 2; \ -+ *((buf)++) = (pix).blue; \ -+ break; \ -+ case 32: \ -+ *(u32 *)(buf) = (u32)((pix).red << 16 | (pix).green << 8 | (pix).blue); \ -+ (buf) += 4; \ -+ break; \ -+} -+ -+#define get_pixel(pix, buf, depth) \ -+ switch (depth) { \ -+case 15: \ -+ (pix).red = ((*(u16 *)(buf)) >> 10) & 0x1f; \ -+ (pix).green = ((*(u16 *)(buf)) >> 5) & 0x1f; \ -+ (pix).blue = (*(u16 *)(buf)) & 0x1f; \ -+ (buf) += 2; \ -+ break; \ -+case 16: \ -+ (pix).red = ((*(u16 *)(buf)) >> 11) & 0x1f; \ -+ (pix).green = ((*(u16 *)(buf)) >> 5) & 0x3f; \ -+ (pix).blue = (*(u16 *)(buf)) & 0x1f; \ -+ (buf) += 2; \ -+ break; \ -+case 24: \ -+ (pix).blue = *(((buf))++); \ -+ (pix).green = *(((buf))++); \ -+ (pix).red = *(((buf))++); \ -+ break; \ -+case 32: \ -+ (pix).blue = *(((buf))++); \ -+ (pix).green = *(((buf))++); \ -+ (pix).red = *(((buf))++); \ -+ (buf)++; \ -+ break; \ -+} -+ -+static inline void -+scale_x_down(int depth, int src_w, unsigned char **src_p, u32 *x_coeff, u32 x_shift, u32 y_coeff, struct pixel *row_buffer) -+{ -+ u32 curr_x_coeff = 1; -+ struct pixel curr_pixel, tmp_pixel; -+ u32 x_array_size = x_coeff[0]; -+ int x_column_num; -+ int i; -+ int l,m; -+ int k = 0; -+ u32 rnd = (1 << (x_shift - 1)); -+ -+ for (i = 0; i < src_w; ) { -+ curr_x_coeff = 1; -+ get_pixel(tmp_pixel, *src_p, depth); -+ i++; -+ for (l = 0; l < x_array_size; l++) { -+ x_column_num = x_coeff[curr_x_coeff++]; -+ curr_pixel.red = curr_pixel.green = curr_pixel.blue = 0; -+ for (m = 0; m < x_column_num - 1; m++) { -+ curr_pixel.red += tmp_pixel.red * x_coeff[curr_x_coeff]; -+ curr_pixel.green += tmp_pixel.green * x_coeff[curr_x_coeff]; -+ curr_pixel.blue += tmp_pixel.blue * x_coeff[curr_x_coeff]; -+ curr_x_coeff++; -+ get_pixel(tmp_pixel, *src_p, depth); -+ i++; -+ } -+ curr_pixel.red += tmp_pixel.red * x_coeff[curr_x_coeff]; -+ curr_pixel.green += tmp_pixel.green * x_coeff[curr_x_coeff]; -+ curr_pixel.blue += tmp_pixel.blue * x_coeff[curr_x_coeff]; -+ curr_x_coeff++; -+ curr_pixel.red = (curr_pixel.red + rnd) >> x_shift; -+ curr_pixel.green = (curr_pixel.green + rnd) >> x_shift; -+ curr_pixel.blue = (curr_pixel.blue + rnd) >> x_shift; -+ row_buffer[k].red += curr_pixel.red * y_coeff; -+ row_buffer[k].green += curr_pixel.green * y_coeff; -+ row_buffer[k].blue += curr_pixel.blue * y_coeff; -+ k++; -+ } -+ } -+} -+ -+static inline void -+scale_x_up(int depth, int src_w, unsigned char **src_p, u32 *x_coeff, u32 x_shift, u32 y_coeff, struct pixel *row_buffer) -+{ -+ u32 curr_x_coeff = 1; -+ struct pixel curr_pixel, tmp_pixel; -+ u32 x_array_size = x_coeff[0]; -+ int x_column_num; -+ int i; -+ int l,m; -+ int k = 0; -+ u32 rnd = (1 << (x_shift - 1)); -+ -+ for (i = 0; i < src_w;) { -+ curr_x_coeff = 1; -+ get_pixel(tmp_pixel, *src_p, depth); -+ i++; -+ for (l = 0; l < x_array_size - 1; l++) { -+ x_column_num = x_coeff[curr_x_coeff++]; -+ for (m = 0; m < x_column_num; m++) { -+ row_buffer[k].red += tmp_pixel.red * y_coeff; -+ row_buffer[k].green += tmp_pixel.green * y_coeff; -+ row_buffer[k].blue += tmp_pixel.blue * y_coeff; -+ k++; -+ } -+ curr_pixel.red = tmp_pixel.red * x_coeff[curr_x_coeff]; -+ curr_pixel.green = tmp_pixel.green * x_coeff[curr_x_coeff]; -+ curr_pixel.blue = tmp_pixel.blue * x_coeff[curr_x_coeff]; -+ curr_x_coeff++; -+ get_pixel(tmp_pixel, *src_p, depth); -+ i++; -+ row_buffer[k].red += ((curr_pixel.red + tmp_pixel.red * x_coeff[curr_x_coeff] + rnd) >> x_shift) * y_coeff; -+ row_buffer[k].green += ((curr_pixel.green + tmp_pixel.green * x_coeff[curr_x_coeff] + rnd) >> x_shift) * y_coeff; -+ row_buffer[k].blue += ((curr_pixel.blue + tmp_pixel.blue * x_coeff[curr_x_coeff] + rnd) >> x_shift) * y_coeff; -+ k++; -+ curr_x_coeff++; -+ } -+ for (m = 0; m < x_coeff[curr_x_coeff]; m++) { -+ row_buffer[k].red += tmp_pixel.red * y_coeff; -+ row_buffer[k].green += tmp_pixel.green * y_coeff; -+ row_buffer[k].blue += tmp_pixel.blue * y_coeff; -+ k++; -+ } -+ } -+} -+ -+static int scale_y_down(unsigned char *src, unsigned char *dst, int depth, int src_w, int src_h, int dst_w, int dst_h) -+{ -+ int octpp = (depth + 1) >> 3; -+ int src_x_bytes = octpp * ((src_w + SPLASH_ALIGN) & ~SPLASH_ALIGN); -+ int dst_x_bytes = octpp * ((dst_w + SPLASH_ALIGN) & ~SPLASH_ALIGN); -+ int j; -+ struct pixel *row_buffer; -+ u32 x_shift, y_shift; -+ u32 *x_coeff; -+ u32 *y_coeff; -+ u32 curr_y_coeff = 1; -+ unsigned char *src_p; -+ unsigned char *src_p_line = src; -+ char *dst_p_line; -+ int r,s; -+ int y_array_rows; -+ int y_column_num; -+ int k; -+ u32 rnd; -+ int xup; -+ -+ row_buffer = (struct pixel *)vmalloc(sizeof(struct pixel) * (dst_w + 1)); -+ x_coeff = do_coefficients(src_w, dst_w, &x_shift); -+ y_coeff = do_coefficients(src_h, dst_h, &y_shift); -+ if (!row_buffer || !x_coeff || !y_coeff) { -+ vfree(row_buffer); -+ vfree(x_coeff); -+ vfree(y_coeff); -+ return -ENOMEM; -+ } -+ y_array_rows = y_coeff[0]; -+ rnd = (1 << (y_shift - 1)); -+ xup = (src_w <= dst_w) ? 1 : 0; -+ -+ dst_p_line = dst; -+ -+ for (j = 0; j < src_h;) { -+ curr_y_coeff = 1; -+ for (r = 0; r < y_array_rows; r++) { -+ y_column_num = y_coeff[curr_y_coeff++]; -+ for (k = 0; k < dst_w + 1; k++) -+ row_buffer[k].red = row_buffer[k].green = row_buffer[k].blue = 0; -+ src_p = src_p_line; -+ if (xup) -+ scale_x_up(depth, src_w, &src_p, x_coeff, x_shift, y_coeff[curr_y_coeff], row_buffer ); -+ else -+ scale_x_down(depth, src_w, &src_p, x_coeff, x_shift, y_coeff[curr_y_coeff], row_buffer ); -+ curr_y_coeff++; -+ for (s = 1; s < y_column_num; s++) { -+ src_p = src_p_line = src_p_line + src_x_bytes; -+ j++; -+ if (xup) -+ scale_x_up(depth, src_w, &src_p, x_coeff, x_shift, y_coeff[curr_y_coeff], row_buffer ); -+ else -+ scale_x_down(depth, src_w, &src_p, x_coeff, x_shift, y_coeff[curr_y_coeff], row_buffer ); -+ curr_y_coeff++; -+ } -+ for (k = 0; k < dst_w; k++) { -+ row_buffer[k].red = ( row_buffer[k].red + rnd) >> y_shift; -+ row_buffer[k].green = (row_buffer[k].green + rnd) >> y_shift; -+ row_buffer[k].blue = (row_buffer[k].blue + rnd) >> y_shift; -+ put_pixel (row_buffer[k], dst, depth); -+ } -+ dst = dst_p_line = dst_p_line + dst_x_bytes; -+ } -+ src_p_line = src_p_line + src_x_bytes; -+ j++; -+ } -+ vfree(row_buffer); -+ vfree(x_coeff); -+ vfree(y_coeff); -+ return 0; -+} -+ -+static int scale_y_up(unsigned char *src, unsigned char *dst, int depth, int src_w, int src_h, int dst_w, int dst_h) -+{ -+ int octpp = (depth + 1) >> 3; -+ int src_x_bytes = octpp * ((src_w + SPLASH_ALIGN) & ~SPLASH_ALIGN); -+ int dst_x_bytes = octpp * ((dst_w + SPLASH_ALIGN) & ~SPLASH_ALIGN); -+ int j; -+ u32 x_shift, y_shift; -+ u32 *x_coeff; -+ u32 *y_coeff; -+ struct pixel *row_buf_list[2]; -+ struct pixel *row_buffer; -+ u32 curr_y_coeff = 1; -+ unsigned char *src_p; -+ unsigned char *src_p_line = src; -+ char *dst_p_line; -+ int r,s; -+ int y_array_rows; -+ int y_column_num; -+ int k; -+ u32 rnd; -+ int bi; -+ int xup; -+ int writes; -+ -+ x_coeff = do_coefficients(src_w, dst_w, &x_shift); -+ y_coeff = do_coefficients(src_h, dst_h, &y_shift); -+ row_buf_list[0] = (struct pixel *)vmalloc(2 * sizeof(struct pixel) * (dst_w + 1)); -+ if (!row_buf_list[0] || !x_coeff || !y_coeff) { -+ vfree(row_buf_list[0]); -+ vfree(x_coeff); -+ vfree(y_coeff); -+ return -ENOMEM; -+ } -+ row_buf_list[1] = row_buf_list[0] + (dst_w + 1); -+ -+ y_array_rows = y_coeff[0]; -+ rnd = (1 << (y_shift - 1)); -+ bi = 1; -+ xup = (src_w <= dst_w) ? 1 : 0; -+ writes = 0; -+ -+ dst_p_line = dst; -+ src_p = src_p_line; -+ -+ row_buffer = row_buf_list[0]; -+ -+ for (j = 0; j < src_h;) { -+ memset(row_buf_list[0], 0, 2 * sizeof(struct pixel) * (dst_w + 1)); -+ curr_y_coeff = 1; -+ if (xup) -+ scale_x_up(depth, src_w, &src_p, x_coeff, x_shift, 1, row_buffer ); -+ else -+ scale_x_down(depth, src_w, &src_p, x_coeff, x_shift, 1, row_buffer ); -+ src_p = src_p_line = src_p_line + src_x_bytes; -+ j++; -+ for (r = 0; r < y_array_rows - 1; r++) { -+ struct pixel *old_row_buffer = row_buffer; -+ u32 prev_y_coeff_val; -+ -+ y_column_num = y_coeff[curr_y_coeff]; -+ for (s = 0; s < y_column_num; s++) { -+ for (k = 0; k < dst_w; k++) -+ put_pixel(row_buffer[k], dst, depth); -+ dst = dst_p_line = dst_p_line + dst_x_bytes; -+ writes++; -+ } -+ curr_y_coeff++; -+ row_buffer = row_buf_list[(bi++) % 2]; -+ prev_y_coeff_val = y_coeff[curr_y_coeff++]; -+ if (xup) -+ scale_x_up(depth, src_w, &src_p, x_coeff, x_shift, 1, row_buffer ); -+ else -+ scale_x_down(depth, src_w, &src_p, x_coeff, x_shift, 1, row_buffer ); -+ src_p = src_p_line = src_p_line + src_x_bytes; -+ j++; -+ for (k = 0; k > y_shift; -+ pix.green = (old_row_buffer[k].green * prev_y_coeff_val + row_buffer[k].green * y_coeff[curr_y_coeff] + rnd) >> y_shift; -+ pix.blue = (old_row_buffer[k].blue * prev_y_coeff_val + row_buffer[k].blue * y_coeff[curr_y_coeff] + rnd) >> y_shift; -+ old_row_buffer[k].red = old_row_buffer[k].green = old_row_buffer[k].blue = 0; -+ put_pixel(pix, dst, depth); -+ } -+ dst = dst_p_line = dst_p_line + dst_x_bytes; -+ writes++; -+ curr_y_coeff++; -+ } -+ for (r = 0; r < y_coeff[curr_y_coeff]; r++) { -+ for (k = 0; k < dst_w; k++) { -+ put_pixel(row_buffer[k], dst, depth); -+ } -+ dst = dst_p_line = dst_p_line + dst_x_bytes; -+ writes++; -+ } -+ } -+ vfree(row_buf_list[0]); -+ vfree(x_coeff); -+ vfree(y_coeff); -+ -+ return 0; -+} -+ -+static int jpeg_get(unsigned char *buf, unsigned char *pic, -+ int width, int height, int depth, -+ struct jpeg_decdata *decdata) -+{ -+ int my_width, my_height; -+ int err; -+ -+ jpeg_get_size(buf, &my_width, &my_height); -+ -+ if (my_height != height || my_width != width) { -+ int my_size = ((my_width + 15) & ~15) -+ * ((my_height + 15) & ~15) * ((depth + 1) >> 3); -+ unsigned char *mem = vmalloc(my_size); -+ if (!mem) -+ return 17; -+ if ((err = jpeg_decode(buf, mem, ((my_width + 15) & ~15), -+ ((my_height + 15) & ~15), depth, decdata))) { -+ vfree(mem); -+ return err; -+ } -+ printk(KERN_INFO "bootsplash: scaling image from %dx%d to %dx%d\n", my_width, my_height, width, height); -+ if (my_height <= height) -+ err = scale_y_up(mem, pic, depth, my_width, my_height, ((width + 15) & ~15), ((height + 15) & ~15)); -+ else -+ err = scale_y_down(mem, pic, depth, my_width, my_height, ((width + 15) & ~15), ((height + 15) & ~15)); -+ vfree(mem); -+ if (err < 0) -+ return 17; -+ } else { -+ if ((err = jpeg_decode(buf, pic, ((width + 15) & ~15), ((height + 15) & ~15), depth, decdata))) -+ return err; -+ } -+ return 0; -+} ---- a/drivers/video/bootsplash/decode-jpg.c -+++ b/drivers/video/bootsplash/decode-jpg.c -@@ -888,9 +888,9 @@ - #define PIC_32(yin, xin, p, xout) \ - ( \ - y = outy[(yin) * 8 + xin], \ -- STORECLAMP(p[(xout) * 4 + 0], y + cr), \ -+ STORECLAMP(p[(xout) * 4 + 0], y + cb), \ - STORECLAMP(p[(xout) * 4 + 1], y - cg), \ -- STORECLAMP(p[(xout) * 4 + 2], y + cb), \ -+ STORECLAMP(p[(xout) * 4 + 2], y + cr), \ - p[(xout) * 4 + 3] = 0 \ - ) - ---- a/drivers/video/bootsplash/render.c -+++ b/drivers/video/bootsplash/render.c -@@ -45,7 +45,7 @@ - transparent = sd->splash_color == bg_color; - xpos = xpos * vc->vc_font.width + sd->splash_text_xo; - ypos = ypos * vc->vc_font.height + sd->splash_text_yo; -- splashsrc.ub = (u8 *)(info->splash_pic + ypos * info->splash_pic_stride + xpos * octpp); -+ splashsrc.ub = (u8 *)(sd->splash_pic + ypos * sd->splash_pic_stride + xpos * octpp); - dst.ub = (u8 *)(info->screen_base + ypos * info->fix.line_length + xpos * octpp); - fgx = ((u32 *)info->pseudo_palette)[fg_color]; - if (transparent && sd->splash_color == 15) { -@@ -109,10 +109,10 @@ - } - } - dst.ub += info->fix.line_length - vc->vc_font.width * octpp; -- splashsrc.ub += info->splash_pic_stride - vc->vc_font.width * octpp; -+ splashsrc.ub += sd->splash_pic_stride - vc->vc_font.width * octpp; - } - dst.ub -= info->fix.line_length * vc->vc_font.height - vc->vc_font.width * octpp; -- splashsrc.ub -= info->splash_pic_stride * vc->vc_font.height - vc->vc_font.width * octpp; -+ splashsrc.ub -= sd->splash_pic_stride * vc->vc_font.height - vc->vc_font.width * octpp; - } - } - -@@ -136,7 +136,7 @@ - sd = info->splash_data; - - transparent = sd->splash_color == bg_color; -- splashsrc.ub = (u8*)(info->splash_pic + ypos * info->splash_pic_stride + xpos * octpp); -+ splashsrc.ub = (u8*)(sd->splash_pic + ypos * sd->splash_pic_stride + xpos * octpp); - dst.ub = (u8*)(info->screen_base + ypos * info->fix.line_length + xpos * octpp); - fgx = ((u32 *)info->pseudo_palette)[fg_color]; - if (transparent && sd->splash_color == 15) { -@@ -197,7 +197,7 @@ - } - } - dst.ub += info->fix.line_length - width * octpp; -- splashsrc.ub += info->splash_pic_stride - width * octpp; -+ splashsrc.ub += sd->splash_pic_stride - width * octpp; - } - } - -@@ -255,10 +255,11 @@ - - static void splashfill(struct fb_info *info, int sy, int sx, int height, int width) { - int octpp = (info->var.bits_per_pixel + 1) >> 3; -+ struct splash_data *sd = info->splash_data; - - splashcopy((u8 *)(info->screen_base + sy * info->fix.line_length + sx * octpp), -- (u8 *)(info->splash_pic + sy * info->splash_pic_stride + sx * octpp), -- height, width, info->fix.line_length, info->splash_pic_stride, -+ (u8 *)(sd->splash_pic + sy * sd->splash_pic_stride + sx * octpp), -+ height, width, info->fix.line_length, sd->splash_pic_stride, - octpp); - } - -@@ -442,6 +443,7 @@ - void splash_blank(struct vc_data *vc, struct fb_info *info, int blank) - { - SPLASH_DEBUG(); -+ - if (blank) { - splashset((u8 *)info->screen_base, - info->var.yres, info->var.xres, ---- a/drivers/video/console/fbcon.h -+++ b/drivers/video/console/fbcon.h -@@ -34,8 +34,10 @@ - int splash_height; /* height of image */ - int splash_text_xo; /* text area origin */ - int splash_text_yo; -- int splash_text_wi; /* text area size */ -+ int splash_text_wi; /* text area size used*/ - int splash_text_he; -+ int splash_jpg_text_wi; /* text area size of jpeg*/ -+ int splash_jpg_text_he; - int splash_showtext; /* silent/verbose mode */ - int splash_boxcount; - int splash_percent; -@@ -45,12 +47,19 @@ - unsigned char *splash_boxes; - unsigned char *splash_jpeg; /* jpeg */ - unsigned char *splash_palette; /* palette for 8-bit */ -+ int splash_boxes_xoff; -+ int splash_boxes_yoff; - - int splash_dosilent; /* show silent jpeg */ - unsigned char *splash_silentjpeg; - unsigned char *splash_sboxes; - int splash_sboxcount; - struct splash_data *next; -+ int splash_sboxes_xoff; -+ int splash_sboxes_yoff; -+ int splash_pic_stride; -+ unsigned char *splash_pic; -+ int splash_pic_size; - }; - #endif - ---- a/include/linux/fb.h -+++ b/include/linux/fb.h -@@ -877,9 +877,6 @@ - void *par; - #ifdef CONFIG_BOOTSPLASH - struct splash_data *splash_data; -- unsigned char *splash_pic; -- int splash_pic_size; -- int splash_pic_stride; - char fb_cursordata[64]; - #endif - /* we need the PCI or similiar aperture base/size not diff --git a/patches.suse/connector-read-mostly b/patches.suse/connector-read-mostly deleted file mode 100644 index 1cefe96..0000000 --- a/patches.suse/connector-read-mostly +++ /dev/null @@ -1,23 +0,0 @@ -From: Chris Mason -Subject: Make proc_event_num_listeners __read_mostly -Patch-mainline: not yet - -This will lower the fast path costs of the userland connector code. - -Acked-by: Jeff Mahoney - ---- - drivers/connector/cn_proc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/connector/cn_proc.c -+++ b/drivers/connector/cn_proc.c -@@ -35,7 +35,7 @@ - - #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) - --static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); -+static atomic_t proc_event_num_listeners __read_mostly = ATOMIC_INIT(0); - static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; - - /* proc_event_counts is used as the sequence number of the netlink message */ diff --git a/patches.suse/crasher-26.diff b/patches.suse/crasher-26.diff deleted file mode 100644 index 74572be..0000000 --- a/patches.suse/crasher-26.diff +++ /dev/null @@ -1,264 +0,0 @@ -From: Chris Mason -Subject: slab testing module -Patch-mainline: probably never - ---- - drivers/char/Kconfig | 5 + - drivers/char/Makefile | 1 - drivers/char/crasher.c | 228 +++++++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 234 insertions(+) - ---- a/drivers/char/Kconfig -+++ b/drivers/char/Kconfig -@@ -1129,5 +1129,10 @@ config RAMOOPS - This enables panic and oops messages to be logged to a circular - buffer in RAM where it can be read back at some later point. - -+config CRASHER -+ tristate "Crasher Module" -+ help -+ Slab cache memory tester. Only use this as a module -+ - endmenu - ---- a/drivers/char/Makefile -+++ b/drivers/char/Makefile -@@ -108,6 +108,7 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/ - - obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o - obj-$(CONFIG_TCG_TPM) += tpm/ -+obj-$(CONFIG_CRASHER) += crasher.o - - obj-$(CONFIG_PS3_FLASH) += ps3flash.o - obj-$(CONFIG_RAMOOPS) += ramoops.o ---- /dev/null -+++ b/drivers/char/crasher.c -@@ -0,0 +1,228 @@ -+/* -+ * crasher.c, it breaks things -+ */ -+ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static int module_exiting; -+static struct completion startup = COMPLETION_INITIALIZER(startup); -+static unsigned long rand_seed = 152L; -+static unsigned long seed = 152L; -+static int threads = 1; -+static int call_panic, call_bug, call_warn; -+static int trap_null, call_null, jump_null; -+static long trap_read, trap_write, call_bad, jump_bad; -+ -+module_param(seed, ulong, 0); -+module_param(call_panic, bool, 0); -+module_param(call_bug, bool, 0); -+module_param(call_warn, bool, 0); -+module_param(trap_null, bool, 0); -+module_param(trap_read, long, 0); -+module_param(trap_write, long, 0); -+module_param(call_null, bool, 0); -+module_param(call_bad, long, 0); -+module_param(jump_null, bool, 0); -+module_param(jump_bad, long, 0); -+module_param(threads, int, 0); -+MODULE_PARM_DESC(seed, "random seed for memory tests"); -+MODULE_PARM_DESC(call_panic, "test option. call panic() and render the system unusable."); -+MODULE_PARM_DESC(call_bug, "test option. call BUG() and render the system unusable."); -+MODULE_PARM_DESC(call_warn, "test option. call WARN() and leave the system usable."); -+MODULE_PARM_DESC(trap_null, "test option. dereference a NULL pointer to simulate a crash and render the system unusable."); -+MODULE_PARM_DESC(trap_read, "test option. read from an invalid address to simulate a crash and render the system unusable."); -+MODULE_PARM_DESC(trap_write, "test option. write to an invalid address to simulate a crash and render the system unusable."); -+MODULE_PARM_DESC(call_null, "test option. call a NULL pointer to simulate a crash and render the system unusable."); -+MODULE_PARM_DESC(call_bad, "test option. call an invalid address to simulate a crash and render the system unusable."); -+MODULE_PARM_DESC(jump_null, "test option. jump to a NULL pointer to simulate a crash and render the system unusable."); -+MODULE_PARM_DESC(jump_bad, "test option. jump to an invalid address to simulate a crash and render the system unusable."); -+MODULE_PARM_DESC(threads, "number of threads to run"); -+MODULE_LICENSE("GPL"); -+ -+#define NUM_ALLOC 24 -+#define NUM_SIZES 8 -+static int sizes[] = { 32, 64, 128, 192, 256, 1024, 2048, 4096 }; -+ -+struct mem_buf { -+ char *buf; -+ int size; -+}; -+ -+static unsigned long crasher_random(void) -+{ -+ rand_seed = rand_seed*69069L+1; -+ return rand_seed^jiffies; -+} -+ -+void crasher_srandom(unsigned long entropy) -+{ -+ rand_seed ^= entropy; -+ crasher_random(); -+} -+ -+static char *mem_alloc(int size) { -+ char *p = kmalloc(size, GFP_KERNEL); -+ int i; -+ if (!p) -+ return p; -+ for (i = 0 ; i < size; i++) -+ p[i] = (i % 119) + 8; -+ return p; -+} -+ -+static void mem_check(char *p, int size) { -+ int i; -+ if (!p) -+ return; -+ for (i = 0 ; i < size; i++) { -+ if (p[i] != ((i % 119) + 8)) { -+ printk(KERN_CRIT "verify error at %lX offset %d " -+ " wanted %d found %d size %d\n", -+ (unsigned long)(p + i), i, (i % 119) + 8, -+ p[i], size); -+ } -+ } -+ // try and trigger slab poisoning for people using this buffer -+ // wrong -+ memset(p, 0, size); -+} -+ -+static void mem_verify(void) { -+ struct mem_buf bufs[NUM_ALLOC]; -+ struct mem_buf *b; -+ int index; -+ int size; -+ unsigned long sleep; -+ memset(bufs, 0, sizeof(struct mem_buf) * NUM_ALLOC); -+ while(!module_exiting) { -+ index = crasher_random() % NUM_ALLOC; -+ b = bufs + index; -+ if (b->size) { -+ mem_check(b->buf, b->size); -+ kfree(b->buf); -+ b->buf = NULL; -+ b->size = 0; -+ } else { -+ size = crasher_random() % NUM_SIZES; -+ size = sizes[size]; -+ b->buf = mem_alloc(size); -+ b->size = size; -+ } -+ sleep = crasher_random() % (HZ / 10); -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule_timeout(sleep); -+ set_current_state(TASK_RUNNING); -+ } -+ for (index = 0 ; index < NUM_ALLOC ; index++) { -+ b = bufs + index; -+ if (b->size) { -+ mem_check(b->buf, b->size); -+ kfree(b->buf); -+ } -+ } -+} -+ -+static int crasher_thread(void *unused) -+{ -+ daemonize("crasher"); -+ complete(&startup); -+ mem_verify(); -+ complete(&startup); -+ return 0; -+} -+ -+static int __init crasher_init(void) -+{ -+ int i; -+ init_completion(&startup); -+ crasher_srandom(seed); -+ -+ if (call_panic) { -+ panic("test panic from crasher module. Good Luck.\n"); -+ return -EFAULT; -+ } -+ if (call_bug) { -+ printk("triggering BUG\n"); -+ BUG_ON(1); -+ return -EFAULT; -+ } -+ if (WARN(call_warn, "triggering WARN\n")) -+ return -EFAULT; -+ -+ if (trap_null) { -+ volatile char *p = NULL; -+ printk("dereferencing NULL pointer.\n"); -+ p[0] = '\n'; -+ return -EFAULT; -+ } -+ if (trap_read) { -+ const volatile char *p = (char *)trap_read; -+ printk("reading from invalid(?) address %p.\n", p); -+ return p[0] ? -EFAULT : -EACCES; -+ } -+ if (trap_write) { -+ volatile char *p = (char *)trap_write; -+ printk("writing to invalid(?) address %p.\n", p); -+ p[0] = ' '; -+ return -EFAULT; -+ } -+ -+ if (call_null) { -+ void(*f)(void) = NULL; -+ printk("calling NULL pointer.\n"); -+ f(); -+ return -EFAULT; -+ } -+ if (call_bad) { -+ void(*f)(void) = (void(*)(void))call_bad; -+ printk("calling invalid(?) address %p.\n", f); -+ f(); -+ return -EFAULT; -+ } -+ -+ /* These two depend on the compiler doing tail call optimization. */ -+ if (jump_null) { -+ int(*f)(void) = NULL; -+ printk("jumping to NULL.\n"); -+ return f(); -+ } -+ if (jump_bad) { -+ int(*f)(void) = (int(*)(void))jump_bad; -+ printk("jumping to invalid(?) address %p.\n", f); -+ return f(); -+ } -+ -+ printk("crasher module (%d threads). Testing sizes: ", threads); -+ for (i = 0 ; i < NUM_SIZES ; i++) -+ printk("%d ", sizes[i]); -+ printk("\n"); -+ -+ for (i = 0 ; i < threads ; i++) -+ kernel_thread(crasher_thread, crasher_thread, -+ CLONE_FS | CLONE_FILES); -+ for (i = 0 ; i < threads ; i++) -+ wait_for_completion(&startup); -+ return 0; -+} -+ -+static void __exit crasher_exit(void) -+{ -+ int i; -+ module_exiting = 1; -+ for (i = 0 ; i < threads ; i++) -+ wait_for_completion(&startup); -+ printk("all crasher threads done\n"); -+ return; -+} -+ -+module_init(crasher_init); -+module_exit(crasher_exit); diff --git a/patches.suse/dm-emulate-blkrrpart-ioctl b/patches.suse/dm-emulate-blkrrpart-ioctl deleted file mode 100644 index 395d483..0000000 --- a/patches.suse/dm-emulate-blkrrpart-ioctl +++ /dev/null @@ -1,51 +0,0 @@ -From: Hannes Reinecke -Subject: Emulate BLKRRPART on device-mapper -Patch-mainline: not yet - -Partitions on device-mapper devices are managed by kpartx (if at -all). So if we were just to send out a 'change' event if someone -called BLKRRPART on these devices, kpartx will be triggered via udev -and can manage the partitions accordingly. - -Signed-off-by: Hannes Reinecke - ---- - drivers/md/dm.c | 22 ++++++++++++++-------- - 1 file changed, 14 insertions(+), 8 deletions(-) - ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -421,19 +421,25 @@ static int dm_blk_ioctl(struct block_dev - if (!map || !dm_table_get_size(map)) - goto out; - -- /* We only support devices that have a single target */ -- if (dm_table_get_num_targets(map) != 1) -- goto out; -- -- tgt = dm_table_get_target(map, 0); -- - if (dm_suspended_md(md)) { - r = -EAGAIN; - goto out; - } - -- if (tgt->type->ioctl) -- r = tgt->type->ioctl(tgt, cmd, arg); -+ if (cmd == BLKRRPART) { -+ /* Emulate Re-read partitions table */ -+ kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); -+ r = 0; -+ } else { -+ /* We only support devices that have a single target */ -+ if (dm_table_get_num_targets(map) != 1) -+ goto out; -+ -+ tgt = dm_table_get_target(map, 0); -+ -+ if (tgt->type->ioctl) -+ r = tgt->type->ioctl(tgt, cmd, arg); -+ } - - out: - dm_table_put(map); diff --git a/patches.suse/dm-mpath-accept-failed-paths b/patches.suse/dm-mpath-accept-failed-paths deleted file mode 100644 index 58b6e63..0000000 --- a/patches.suse/dm-mpath-accept-failed-paths +++ /dev/null @@ -1,224 +0,0 @@ -From 950e1951d452d08a5bd95d82d4cad7fa97fa4464 Mon Sep 17 00:00:00 2001 -From: Hannes Reinecke -Date: Thu, 19 Nov 2009 13:54:56 +0100 -Subject: Accept failed paths for multipath maps -References: bnc#458037,bnc#458393 -Patch-mainline: Not yet - -The multipath kernel module is rejecting any map with an invalid -device. However, as the multipathd is processing the events serially -it will try to push a map with invalid devices if more than one -device failed at the same time. -So we can as well accept those maps and make sure to mark the -paths as down. - -Signed-off-by: Hannes Reinecke ---- ---- - drivers/md/dm-mpath.c | 71 ++++++++++++++++++++++++++++++++++++++++---------- - drivers/md/dm-mpath.h | 1 - drivers/md/dm-table.c | 7 +++- - 3 files changed, 64 insertions(+), 15 deletions(-) - ---- a/drivers/md/dm-mpath.c -+++ b/drivers/md/dm-mpath.c -@@ -146,7 +146,8 @@ static void deactivate_path(struct work_ - struct pgpath *pgpath = - container_of(work, struct pgpath, deactivate_path); - -- blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); -+ if (pgpath->path.dev) -+ blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); - } - - static struct priority_group *alloc_priority_group(void) -@@ -275,6 +276,11 @@ static int __choose_path_in_pg(struct mu - - m->current_pgpath = path_to_pgpath(path); - -+ if (!m->current_pgpath->path.dev) { -+ m->current_pgpath = NULL; -+ return -ENODEV; -+ } -+ - if (m->current_pg != pg) - __switch_pg(m, m->current_pgpath); - -@@ -593,6 +599,7 @@ static struct pgpath *parse_path(struct - { - int r; - struct pgpath *p; -+ char *path; - struct multipath *m = ti->private; - - /* we need at least a path arg */ -@@ -605,14 +612,37 @@ static struct pgpath *parse_path(struct - if (!p) - return ERR_PTR(-ENOMEM); - -- r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), -+ path = shift(as); -+ r = dm_get_device(ti, path, dm_table_get_mode(ti->table), - &p->path.dev); - if (r) { -- ti->error = "error getting device"; -- goto bad; -+ unsigned major, minor; -+ -+ /* Try to add a failed device */ -+ if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) { -+ dev_t dev; -+ -+ /* Extract the major/minor numbers */ -+ dev = MKDEV(major, minor); -+ if (MAJOR(dev) != major || MINOR(dev) != minor) { -+ /* Nice try, didn't work */ -+ DMWARN("Invalid device path %s", path); -+ ti->error = "error converting devnum"; -+ goto bad; -+ } -+ DMWARN("adding disabled device %d:%d", major, minor); -+ p->path.dev = NULL; -+ format_dev_t(p->path.pdev, dev); -+ p->is_active = 0; -+ } else { -+ ti->error = "error getting device"; -+ goto bad; -+ } -+ } else { -+ memcpy(p->path.pdev, p->path.dev->name, 16); - } - -- if (m->hw_handler_name) { -+ if (m->hw_handler_name && p->path.dev) { - struct request_queue *q = bdev_get_queue(p->path.dev->bdev); - - r = scsi_dh_attach(q, m->hw_handler_name); -@@ -649,6 +679,11 @@ static struct pgpath *parse_path(struct - goto bad; - } - -+ if (!p->is_active) { -+ ps->type->fail_path(ps, &p->path); -+ p->fail_count++; -+ m->nr_valid_paths--; -+ } - return p; - - bad: -@@ -978,7 +1013,7 @@ static int fail_path(struct pgpath *pgpa - if (!pgpath->is_active) - goto out; - -- DMWARN("Failing path %s.", pgpath->path.dev->name); -+ DMWARN("Failing path %s.", pgpath->path.pdev); - - pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); - pgpath->is_active = 0; -@@ -990,7 +1025,7 @@ static int fail_path(struct pgpath *pgpa - m->current_pgpath = NULL; - - dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, -- pgpath->path.dev->name, m->nr_valid_paths); -+ pgpath->path.pdev, m->nr_valid_paths); - - schedule_work(&m->trigger_event); - queue_work(kmultipathd, &pgpath->deactivate_path); -@@ -1015,6 +1050,12 @@ static int reinstate_path(struct pgpath - if (pgpath->is_active) - goto out; - -+ if (!pgpath->path.dev) { -+ DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev); -+ r = -ENODEV; -+ goto out; -+ } -+ - if (!pgpath->pg->ps.type->reinstate_path) { - DMWARN("Reinstate path not supported by path selector %s", - pgpath->pg->ps.type->name); -@@ -1037,7 +1078,7 @@ static int reinstate_path(struct pgpath - } - - dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, -- pgpath->path.dev->name, m->nr_valid_paths); -+ pgpath->path.pdev, m->nr_valid_paths); - - schedule_work(&m->trigger_event); - -@@ -1057,6 +1098,9 @@ static int action_dev(struct multipath * - struct pgpath *pgpath; - struct priority_group *pg; - -+ if (!dev) -+ return 0; -+ - list_for_each_entry(pg, &m->priority_groups, list) { - list_for_each_entry(pgpath, &pg->pgpaths, list) { - if (pgpath->path.dev == dev) -@@ -1241,8 +1285,9 @@ static void activate_path(struct work_st - struct pgpath *pgpath = - container_of(work, struct pgpath, activate_path); - -- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), -- pg_init_done, pgpath); -+ if (pgpath->path.dev) -+ scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), -+ pg_init_done, pgpath); - } - - /* -@@ -1426,7 +1471,7 @@ static int multipath_status(struct dm_ta - pg->ps.type->info_args); - - list_for_each_entry(p, &pg->pgpaths, list) { -- DMEMIT("%s %s %u ", p->path.dev->name, -+ DMEMIT("%s %s %u ", p->path.pdev, - p->is_active ? "A" : "F", - p->fail_count); - if (pg->ps.type->status) -@@ -1452,7 +1497,7 @@ static int multipath_status(struct dm_ta - pg->ps.type->table_args); - - list_for_each_entry(p, &pg->pgpaths, list) { -- DMEMIT("%s ", p->path.dev->name); -+ DMEMIT("%s ", p->path.pdev); - if (pg->ps.type->status) - sz += pg->ps.type->status(&pg->ps, - &p->path, type, result + sz, -@@ -1544,7 +1589,7 @@ static int multipath_ioctl(struct dm_tar - if (!m->current_pgpath) - __choose_pgpath(m, 0); - -- if (m->current_pgpath) { -+ if (m->current_pgpath && m->current_pgpath->path.dev) { - bdev = m->current_pgpath->path.dev->bdev; - mode = m->current_pgpath->path.dev->mode; - } ---- a/drivers/md/dm-mpath.h -+++ b/drivers/md/dm-mpath.h -@@ -12,6 +12,7 @@ - struct dm_dev; - - struct dm_path { -+ char pdev[16]; /* Requested physical device */ - struct dm_dev *dev; /* Read-only */ - void *pscontext; /* For path-selector use */ - }; ---- a/drivers/md/dm-table.c -+++ b/drivers/md/dm-table.c -@@ -541,9 +541,12 @@ int dm_get_device(struct dm_target *ti, - */ - void dm_put_device(struct dm_target *ti, struct dm_dev *d) - { -- struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, -- dm_dev); -+ struct dm_dev_internal *dd; - -+ if (!d) -+ return; -+ -+ dd = container_of(d, struct dm_dev_internal, dm_dev); - if (atomic_dec_and_test(&dd->count)) { - close_dev(dd, ti->table->md); - list_del(&dd->list); diff --git a/patches.suse/dm-mpath-detach-existing-hardware-handler b/patches.suse/dm-mpath-detach-existing-hardware-handler deleted file mode 100644 index 1429f05..0000000 --- a/patches.suse/dm-mpath-detach-existing-hardware-handler +++ /dev/null @@ -1,59 +0,0 @@ -From 27d169318cbd6c8647f689e8dcff08920040408a Mon Sep 17 00:00:00 2001 -From: Hannes Reinecke -Date: Thu, 19 Nov 2009 14:39:24 +0100 -Subject: [PATCH] multipath: detach existing hardware handler if none was specified -Patch-mainline: not yet - -When no hardware handler was specified in the multipath configuration -we should be detaching any existing ones. Otherwise unpredictable -results will happen. - -Signed-off-by: Hannes Reinecke ---- - drivers/md/dm-mpath.c | 32 ++++++++++++++++++-------------- - 1 file changed, 18 insertions(+), 14 deletions(-) - ---- a/drivers/md/dm-mpath.c -+++ b/drivers/md/dm-mpath.c -@@ -642,23 +642,27 @@ static struct pgpath *parse_path(struct - memcpy(p->path.pdev, p->path.dev->name, 16); - } - -- if (m->hw_handler_name && p->path.dev) { -+ if (p->path.dev) { - struct request_queue *q = bdev_get_queue(p->path.dev->bdev); - -- r = scsi_dh_attach(q, m->hw_handler_name); -- if (r == -EBUSY) { -- /* -- * Already attached to different hw_handler, -- * try to reattach with correct one. -- */ -- scsi_dh_detach(q); -+ if (m->hw_handler_name) { - r = scsi_dh_attach(q, m->hw_handler_name); -- } -- -- if (r < 0) { -- ti->error = "error attaching hardware handler"; -- dm_put_device(ti, p->path.dev); -- goto bad; -+ if (r == -EBUSY) { -+ /* -+ * Already attached to different hw_handler, -+ * try to reattach with correct one. -+ */ -+ scsi_dh_detach(q); -+ r = scsi_dh_attach(q, m->hw_handler_name); -+ } -+ if (r < 0) { -+ ti->error = "error attaching hardware handler"; -+ dm_put_device(ti, p->path.dev); -+ goto bad; -+ } -+ } else { -+ /* Play safe and detach hardware handler */ -+ scsi_dh_detach(q); - } - - if (m->hw_handler_params) { diff --git a/patches.suse/dm-mpath-evaluate-request-result-and-sense b/patches.suse/dm-mpath-evaluate-request-result-and-sense deleted file mode 100644 index 9d91a57..0000000 --- a/patches.suse/dm-mpath-evaluate-request-result-and-sense +++ /dev/null @@ -1,158 +0,0 @@ -From: Hannes Reinecke -Subject: multipath: Evaluate request result and sense code -References: FATE#303695,bnc#433920,bnc#442001 -Patch-mainline: not yet - -Currently we're updating the request result upon completion -only for BLK_PC requests. This makes it impossible for the -upper layers to reliable detect the real cause for an -I/O failure. By attaching the result and the sense to all -requests we can update multipathing to make some more elaborate -choices on how to handle I/O errors. -This also solves a potential data corruption with multipathing -and persistent reservations. When queue_if_no_path is active -multipath will queue any I/O failure (including those failed -with RESERVATION CONFLICT) until the reservation status changes. -But by then I/O might have been ongoing on the other paths, -thus the delayed submission will severely corrupt your data. - -Signed-off-by: Hannes Reinecke - ---- - drivers/md/dm-mpath.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++ - drivers/scsi/scsi_lib.c | 28 +++++++++++--------------- - 2 files changed, 63 insertions(+), 16 deletions(-) - ---- a/drivers/md/dm-mpath.c -+++ b/drivers/md/dm-mpath.c -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - #include - - #define DM_MSG_PREFIX "multipath" -@@ -104,6 +105,7 @@ struct multipath { - struct dm_mpath_io { - struct pgpath *pgpath; - size_t nr_bytes; -+ char sense[SCSI_SENSE_BUFFERSIZE]; - }; - - typedef int (*action_fn) (struct pgpath *pgpath); -@@ -997,6 +999,9 @@ static int multipath_map(struct dm_targe - - map_context->ptr = mpio; - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; -+ /* Always attach a sense buffer */ -+ if (!clone->sense) -+ clone->sense = mpio->sense; - r = map_io(m, clone, mpio, 0); - if (r < 0 || r == DM_MAPIO_REQUEUE) - mempool_free(mpio, m->mpio_pool); -@@ -1295,6 +1300,44 @@ static void activate_path(struct work_st - } - - /* -+ * Evaluate scsi return code -+ */ -+static int eval_scsi_error(int result, char *sense, int sense_len) -+{ -+ struct scsi_sense_hdr sshdr; -+ int r = DM_ENDIO_REQUEUE; -+ -+ if (host_byte(result) != DID_OK) -+ return r; -+ -+ if (msg_byte(result) != COMMAND_COMPLETE) -+ return r; -+ -+ if (status_byte(result) == RESERVATION_CONFLICT) -+ /* Do not retry here, possible data corruption */ -+ return -EIO; -+ -+#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE) -+ if (status_byte(result) == CHECK_CONDITION && -+ !scsi_normalize_sense(sense, sense_len, &sshdr)) { -+ -+ switch (sshdr.sense_key) { -+ case MEDIUM_ERROR: -+ case DATA_PROTECT: -+ case BLANK_CHECK: -+ case COPY_ABORTED: -+ case VOLUME_OVERFLOW: -+ case MISCOMPARE: -+ r = -EIO; -+ break; -+ } -+ } -+#endif -+ -+ return r; -+} -+ -+/* - * end_io handling - */ - static int do_end_io(struct multipath *m, struct request *clone, -@@ -1320,6 +1363,10 @@ static int do_end_io(struct multipath *m - if (error == -EOPNOTSUPP) - return error; - -+ r = eval_scsi_error(clone->errors, clone->sense, clone->sense_len); -+ if (r != DM_ENDIO_REQUEUE) -+ return r; -+ - if (clone->cmd_flags & REQ_DISCARD) - /* - * Pass all discard request failures up. -@@ -1355,6 +1402,10 @@ static int multipath_end_io(struct dm_ta - if (ps->type->end_io) - ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); - } -+ if (clone->sense == mpio->sense) { -+ clone->sense = NULL; -+ clone->sense_len = 0; -+ } - mempool_free(mpio, m->mpio_pool); - - return r; ---- a/drivers/scsi/scsi_lib.c -+++ b/drivers/scsi/scsi_lib.c -@@ -722,23 +722,19 @@ void scsi_io_completion(struct scsi_cmnd - sense_deferred = scsi_sense_is_deferred(&sshdr); - } - -- if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ -- req->errors = result; -- if (result) { -- if (sense_valid && req->sense) { -- /* -- * SG_IO wants current and deferred errors -- */ -- int len = 8 + cmd->sense_buffer[7]; -+ req->errors = result; -+ if (sense_valid && req->sense) { -+ int len = 8 + cmd->sense_buffer[7]; - -- if (len > SCSI_SENSE_BUFFERSIZE) -- len = SCSI_SENSE_BUFFERSIZE; -- memcpy(req->sense, cmd->sense_buffer, len); -- req->sense_len = len; -- } -- if (!sense_deferred) -- error = -EIO; -- } -+ if (len > SCSI_SENSE_BUFFERSIZE) -+ len = SCSI_SENSE_BUFFERSIZE; -+ memcpy(req->sense, cmd->sense_buffer, len); -+ req->sense_len = len; -+ } -+ -+ if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ -+ if ((result) && (!sense_deferred)) -+ error = -EIO; - - req->resid_len = scsi_get_resid(cmd); - diff --git a/patches.suse/dm-mpath-leastpending-path-update b/patches.suse/dm-mpath-leastpending-path-update deleted file mode 100644 index 88d84e4..0000000 --- a/patches.suse/dm-mpath-leastpending-path-update +++ /dev/null @@ -1,301 +0,0 @@ -Subject: Update least-pending-IO dynamic load balancer -From: Hannes Reinecke -Date: Wed Jan 7 09:26:30 2009 +0100: -References: bnc#444199 -Patch-mainline: not yet - -Attached patch provides "Least pending IO" dynamic load balancing policy for -bio based device mapper multipath. This load balancing policy considers the -number of unserviced requests pending on a path and selects the path with least -count for pending service request. - -We find this policy more useful especially when the SAN environment has -heterogeneous components. Ex, when there is one 8GB HBA and one 2GB HBA -connected to the same server, 8GB HBA could be utilized better with this -algorithm. - -This patch includes the update as posted in the bugzilla, -based on the review comments received in the dm-devel mailing list. - -Signed-off-by: Sakshi Chaitanya Veni -Signed-off-by: Vijayakumar Balasubramanian -Signed-off-by: Senthil Kumar V -Signed-off-by: Hannes Reinecke - ---- - drivers/md/Makefile | 2 - drivers/md/dm-least-pending.c | 258 ++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 259 insertions(+), 1 deletion(-) - ---- a/drivers/md/Makefile -+++ b/drivers/md/Makefile -@@ -29,7 +29,7 @@ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o - obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o - obj-$(CONFIG_DM_CRYPT) += dm-crypt.o - obj-$(CONFIG_DM_DELAY) += dm-delay.o --obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o -+obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o dm-least-pending.o - obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o - obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o - obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o ---- /dev/null -+++ b/drivers/md/dm-least-pending.c -@@ -0,0 +1,258 @@ -+/* -+ * (C) Copyright 2008 Hewlett-Packard Development Company, L.P -+ * -+ * This file is released under the GPL. -+ */ -+ -+#include "dm-path-selector.h" -+ -+#include -+ -+#define DM_MSG_PREFIX "multipath least-pending" -+ -+/*----------------------------------------------------------------- -+* Path-handling code, paths are held in lists -+*---------------------------------------------------------------*/ -+struct path_info { -+ struct list_head list; -+ struct dm_path *path; -+ unsigned repeat_count; -+ atomic_t io_count; -+}; -+ -+static void free_paths(struct list_head *paths) -+{ -+ struct path_info *pi, *next; -+ -+ list_for_each_entry_safe(pi, next, paths, list) { -+ list_del(&pi->list); -+ kfree(pi); -+ } -+} -+ -+/*----------------------------------------------------------------- -+ * Least-pending selector -+ *---------------------------------------------------------------*/ -+ -+#define LPP_MIN_IO 1 -+ -+struct selector { -+ struct list_head valid_paths; -+ struct list_head invalid_paths; -+}; -+ -+static struct selector *alloc_selector(void) -+{ -+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); -+ -+ if (s) { -+ INIT_LIST_HEAD(&s->valid_paths); -+ INIT_LIST_HEAD(&s->invalid_paths); -+ } -+ -+ return s; -+} -+ -+static int lpp_create(struct path_selector *ps, unsigned argc, char **argv) -+{ -+ struct selector *s; -+ -+ s = alloc_selector(); -+ if (!s) -+ return -ENOMEM; -+ -+ ps->context = s; -+ return 0; -+} -+ -+static void lpp_destroy(struct path_selector *ps) -+{ -+ struct selector *s = ps->context; -+ -+ free_paths(&s->valid_paths); -+ free_paths(&s->invalid_paths); -+ kfree(s); -+ ps->context = NULL; -+} -+ -+static int lpp_status(struct path_selector *ps, struct dm_path *path, -+ status_type_t type, char *result, unsigned int maxlen) -+{ -+ struct path_info *pi; -+ int sz = 0; -+ -+ if (!path) -+ switch (type) { -+ case STATUSTYPE_INFO: -+ DMEMIT("1 "); -+ break; -+ case STATUSTYPE_TABLE: -+ DMEMIT("0 "); -+ break; -+ } -+ else { -+ pi = path->pscontext; -+ switch (type) { -+ case STATUSTYPE_INFO: -+ DMEMIT("%u:%u ", pi->repeat_count, -+ atomic_read(&pi->io_count)); -+ break; -+ case STATUSTYPE_TABLE: -+ break; -+ } -+ } -+ -+ return sz; -+} -+ -+/* -+ * Called during initialisation to register each path with an -+ * optional repeat_count. -+ */ -+static int lpp_add_path(struct path_selector *ps, struct dm_path *path, -+ int argc, char **argv, char **error) -+{ -+ struct selector *s = ps->context; -+ struct path_info *pi; -+ unsigned repeat_count = LPP_MIN_IO; -+ -+ if (argc > 1) { -+ *error = "least-pending ps: incorrect number of arguments"; -+ return -EINVAL; -+ } -+ -+ /* First path argument is number of I/Os before switching path */ -+ if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) { -+ *error = "least-pending ps: invalid repeat count"; -+ return -EINVAL; -+ } -+ -+ /* allocate the path */ -+ pi = kmalloc(sizeof(*pi), GFP_KERNEL); -+ if (!pi) { -+ *error = "least-pending ps: Error allocating path context"; -+ return -ENOMEM; -+ } -+ -+ pi->path = path; -+ pi->repeat_count = repeat_count; -+ atomic_set(&pi->io_count, 0); -+ -+ path->pscontext = pi; -+ -+ list_add(&pi->list, &s->valid_paths); -+ -+ return 0; -+} -+ -+static void lpp_fail_path(struct path_selector *ps, struct dm_path *p) -+{ -+ struct selector *s = ps->context; -+ struct path_info *pi = p->pscontext; -+ -+ if (!pi) -+ return; -+ -+ atomic_set(&pi->io_count, 0); -+ -+ list_move(&pi->list, &s->invalid_paths); -+} -+ -+static int lpp_reinstate_path(struct path_selector *ps, struct dm_path *p) -+{ -+ struct selector *s = ps->context; -+ struct path_info *pi = p->pscontext; -+ -+ if (!pi) -+ return 1; -+ -+ list_move(&pi->list, &s->valid_paths); -+ -+ return 0; -+} -+ -+static struct dm_path *lpp_select_path(struct path_selector *ps, -+ unsigned *repeat_count, -+ size_t nr_bytes) -+{ -+ struct selector *s = ps->context; -+ struct path_info *pi, *next, *least_io_path = NULL; -+ struct list_head *paths; -+ -+ if (list_empty(&s->valid_paths)) -+ return NULL; -+ -+ paths = &s->valid_paths; -+ -+ list_for_each_entry_safe(pi, next, paths, list) { -+ if (!least_io_path || atomic_read(&least_io_path->io_count) < atomic_read(&pi->io_count)) -+ least_io_path = pi; -+ if (!atomic_read(&least_io_path->io_count)) -+ break; -+ } -+ -+ if (!least_io_path) -+ return NULL; -+ -+ atomic_inc(&least_io_path->io_count); -+ *repeat_count = least_io_path->repeat_count; -+ -+ return least_io_path->path; -+} -+ -+static int lpp_end_io(struct path_selector *ps, struct dm_path *path, -+ size_t nr_bytes) -+{ -+ struct path_info *pi = NULL; -+ -+ pi = path->pscontext; -+ if (!pi) -+ return 1; -+ -+ atomic_dec(&pi->io_count); -+ -+ return 0; -+} -+ -+static struct path_selector_type lpp_ps = { -+ .name = "least-pending", -+ .module = THIS_MODULE, -+ .table_args = 1, -+ .info_args = 0, -+ .create = lpp_create, -+ .destroy = lpp_destroy, -+ .status = lpp_status, -+ .add_path = lpp_add_path, -+ .fail_path = lpp_fail_path, -+ .reinstate_path = lpp_reinstate_path, -+ .select_path = lpp_select_path, -+ .end_io = lpp_end_io, -+}; -+ -+static int __init dm_lpp_init(void) -+{ -+ int r = dm_register_path_selector(&lpp_ps); -+ -+ if (r < 0) -+ DMERR("register failed %d", r); -+ -+ DMINFO("version 1.0.0 loaded"); -+ -+ return r; -+} -+ -+static void __exit dm_lpp_exit(void) -+{ -+ int r = dm_unregister_path_selector(&lpp_ps); -+ -+ if (r < 0) -+ DMERR("unregister failed %d", r); -+} -+ -+module_init(dm_lpp_init); -+module_exit(dm_lpp_exit); -+ -+MODULE_DESCRIPTION(DM_NAME " least-pending multipath path selector"); -+MODULE_AUTHOR("Sakshi Chaitanya Veni "); -+MODULE_LICENSE("GPL"); -+ diff --git a/patches.suse/dm-mpath-no-activate-for-offlined-paths b/patches.suse/dm-mpath-no-activate-for-offlined-paths deleted file mode 100644 index cc885dd..0000000 --- a/patches.suse/dm-mpath-no-activate-for-offlined-paths +++ /dev/null @@ -1,85 +0,0 @@ -From: Hannes Reinecke -Subject: DM-MPIO fails to tresspass LUNs on CLARiiON arrays -Reference: bnc#484529 -Patch-mainline: Not yet - -On Clariion arrays we fail to send the trespass command correctly. -We're trying to send the trespass command to via an disabled path, -causing the device handler to loop trying to send the command on -an invalid path. - -Signed-off-by: Hannes Reinecke - ---- - drivers/md/dm-mpath.c | 12 +++++++++--- - drivers/md/dm-table.c | 12 ++++++++---- - 2 files changed, 17 insertions(+), 7 deletions(-) - ---- a/drivers/md/dm-mpath.c -+++ b/drivers/md/dm-mpath.c -@@ -1230,8 +1230,9 @@ static void pg_init_done(void *data, int - errors = 0; - break; - } -- DMERR("Could not failover the device: Handler scsi_dh_%s " -- "Error %d.", m->hw_handler_name, errors); -+ DMERR("Count not failover device %s: Handler scsi_dh_%s " -+ "was not loaded.", pgpath->path.pdev, -+ m->hw_handler_name); - /* - * Fail path for now, so we do not ping pong - */ -@@ -1244,6 +1245,10 @@ static void pg_init_done(void *data, int - */ - bypass_pg(m, pg, 1); - break; -+ case SCSI_DH_DEV_OFFLINED: -+ DMWARN("Device %s offlined.", pgpath->path.pdev); -+ errors = 0; -+ break; - case SCSI_DH_RETRY: - /* Wait before retrying. */ - delay_retry = 1; -@@ -1264,7 +1269,8 @@ static void pg_init_done(void *data, int - spin_lock_irqsave(&m->lock, flags); - if (errors) { - if (pgpath == m->current_pgpath) { -- DMERR("Could not failover device. Error %d.", errors); -+ DMERR("Could not failover device %s, error %d.", -+ pgpath->path.pdev, errors); - m->current_pgpath = NULL; - m->current_pg = NULL; - } ---- a/drivers/md/dm-table.c -+++ b/drivers/md/dm-table.c -@@ -414,14 +414,18 @@ static int upgrade_mode(struct dm_dev_in - - dd_new = dd_old = *dd; - -- dd_new.dm_dev.mode |= new_mode; -+ dd_new.dm_dev.mode = new_mode; - dd_new.dm_dev.bdev = NULL; - - r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); -- if (r) -+ if (r == -EROFS) { -+ dd_new.dm_dev.mode &= ~FMODE_WRITE; -+ r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); -+ } -+ if (!r) - return r; - -- dd->dm_dev.mode |= new_mode; -+ dd->dm_dev.mode = new_mode; - close_dev(&dd_old, md); - - return 0; -@@ -483,7 +487,7 @@ static int __table_get_device(struct dm_ - atomic_set(&dd->count, 0); - list_add(&dd->list, &t->devices); - -- } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { -+ } else if (dd->dm_dev.mode != mode) { - r = upgrade_mode(dd, mode, t->md); - if (r) - return r; diff --git a/patches.suse/dm-mpath-no-partitions-feature b/patches.suse/dm-mpath-no-partitions-feature deleted file mode 100644 index a12a696..0000000 --- a/patches.suse/dm-mpath-no-partitions-feature +++ /dev/null @@ -1,67 +0,0 @@ -From: Hannes Reinecke -Subject: Disable partitions scan for multipathed devices -References: bnc#402922,bnc#514767 -Patch-mainline: not yet - -When multipath devices are being used as disks for VM Guests -any partition scanning / setup should be done within the VM Guest, -not from host. So we need to switch off partitions scanning via -kpartx there. -For this I've implemented a new feature 'no_partitions' which -just serves as a notifier to kpartx to _not_ create partitions -on these devices. - -Patch ported to SLES11. - -Signed-off-by: Hannes Reinecke - ---- - drivers/md/dm-mpath.c | 12 +++++++++++- - 1 file changed, 11 insertions(+), 1 deletion(-) - ---- a/drivers/md/dm-mpath.c -+++ b/drivers/md/dm-mpath.c -@@ -57,6 +57,8 @@ struct priority_group { - struct list_head pgpaths; - }; - -+#define FEATURE_NO_PARTITIONS 1 -+ - /* Multipath context */ - struct multipath { - struct list_head list; -@@ -83,6 +85,7 @@ struct multipath { - unsigned pg_init_retries; /* Number of times to retry pg_init */ - unsigned pg_init_count; /* Number of times pg_init called */ - unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ -+ unsigned features; /* Additional selected features */ - - struct work_struct process_queued_ios; - struct list_head queued_ios; -@@ -852,6 +855,10 @@ static int parse_features(struct arg_set - continue; - } - -+ if (!strnicmp(param_name, MESG_STR("no_partitions"))) { -+ m->features |= FEATURE_NO_PARTITIONS; -+ continue; -+ } - if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && - (argc >= 1)) { - r = read_param(_params + 1, shift(as), -@@ -1486,11 +1493,14 @@ static int multipath_status(struct dm_ta - else { - DMEMIT("%u ", m->queue_if_no_path + - (m->pg_init_retries > 0) * 2 + -- (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2); -+ (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + -+ (m->features & FEATURE_NO_PARTITIONS)); - if (m->queue_if_no_path) - DMEMIT("queue_if_no_path "); - if (m->pg_init_retries) - DMEMIT("pg_init_retries %u ", m->pg_init_retries); -+ if (m->features & FEATURE_NO_PARTITIONS) -+ DMEMIT("no_partitions "); - if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) - DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); - } diff --git a/patches.suse/dm-mpath-null-pgs b/patches.suse/dm-mpath-null-pgs deleted file mode 100644 index 09902d0..0000000 --- a/patches.suse/dm-mpath-null-pgs +++ /dev/null @@ -1,27 +0,0 @@ -From: Hannes Reinecke -Subject: Allow zero paths for multipath priority groups -References: bnc#372684 -Patch-mainline: not yet - -For correct handling of the all-paths-down scenario we have to -allow zero paths as a valid argument for priority groups. - -Signed-off-by: Hannes Reinecke - ---- - drivers/md/dm-mpath.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/md/dm-mpath.c -+++ b/drivers/md/dm-mpath.c -@@ -870,8 +870,8 @@ static int multipath_ctr(struct dm_targe - { - /* target parameters */ - static struct param _params[] = { -- {1, 1024, "invalid number of priority groups"}, -- {1, 1024, "invalid initial priority group number"}, -+ {0, 1024, "invalid number of priority groups"}, -+ {0, 1024, "invalid initial priority group number"}, - }; - - int r; diff --git a/patches.suse/dm-raid45-26-Nov-2009.patch b/patches.suse/dm-raid45-26-Nov-2009.patch deleted file mode 100644 index fe0af4f..0000000 --- a/patches.suse/dm-raid45-26-Nov-2009.patch +++ /dev/null @@ -1,5286 +0,0 @@ -From: Heinz Mauelshagen -Subject: DMRAID45 module -Patch-mainline: No. https://www.redhat.com/archives/dm-devel/2009-November/msg00270.html -References: bnc#615906 - bnc#565962 - -X-URL: http://people.redhat.com/~heinzm/sw/dm/dm-raid45/ -Patch-mainline: No. https://www.redhat.com/archives/dm-devel/2009-November/msg00270.html -Date: Thu, 26 Nov 2009 14:45:12 +0100 - - DM-RAID 45 module. - - This driver is used for "Fake RAID" devices. - -Signed-off-by: Heinz Mauelshagen -Signed-off-by: Nikanth Karthikesan - ---- - drivers/md/Kconfig | 9 - drivers/md/Makefile | 1 - drivers/md/dm-memcache.c | 302 ++ - drivers/md/dm-memcache.h | 68 - drivers/md/dm-raid45.c | 4721 +++++++++++++++++++++++++++++++++++++++++ - drivers/md/dm-raid45.h | 30 - drivers/md/dm-region-hash.c | 21 - drivers/md/dm.c | 1 - include/linux/dm-region-hash.h | 4 - 9 files changed, 5154 insertions(+), 3 deletions(-) - ---- a/drivers/md/Kconfig -+++ b/drivers/md/Kconfig -@@ -321,6 +321,15 @@ config DM_DELAY - - If unsure, say N. - -+config DM_RAID45 -+ tristate "RAID 4/5 target (EXPERIMENTAL)" -+ depends on BLK_DEV_DM && EXPERIMENTAL -+ select ASYNC_XOR -+ ---help--- -+ A target that supports RAID4 and RAID5 mappings. -+ -+ If unsure, say N. -+ - config DM_UEVENT - bool "DM uevents (EXPERIMENTAL)" - depends on BLK_DEV_DM && EXPERIMENTAL ---- a/drivers/md/Makefile -+++ b/drivers/md/Makefile -@@ -37,6 +37,7 @@ obj-$(CONFIG_DM_MIRROR) += dm-mirror.o - obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o - obj-$(CONFIG_DM_ZERO) += dm-zero.o - obj-$(CONFIG_DM_RAID) += dm-raid.o -+obj-$(CONFIG_DM_RAID45) += dm-raid45.o dm-log.o dm-memcache.o - - ifeq ($(CONFIG_DM_UEVENT),y) - dm-mod-objs += dm-uevent.o ---- /dev/null -+++ b/drivers/md/dm-memcache.c -@@ -0,0 +1,302 @@ -+/* -+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * Device-mapper memory object handling: -+ * -+ * o allocate/free total_pages in a per client page pool. -+ * -+ * o allocate/free memory objects with chunks (1..n) of -+ * pages_per_chunk pages hanging off. -+ * -+ * This file is released under the GPL. -+ */ -+ -+#define DM_MEM_CACHE_VERSION "0.2" -+ -+#include "dm.h" -+#include "dm-memcache.h" -+#include -+#include -+ -+struct dm_mem_cache_client { -+ spinlock_t lock; -+ mempool_t *objs_pool; -+ struct page_list *free_list; -+ unsigned objects; -+ unsigned chunks; -+ unsigned pages_per_chunk; -+ unsigned free_pages; -+ unsigned total_pages; -+}; -+ -+/* -+ * Free pages and page_list elements of client. -+ */ -+static void free_cache_pages(struct page_list *list) -+{ -+ while (list) { -+ struct page_list *pl = list; -+ -+ list = pl->next; -+ BUG_ON(!pl->page); -+ __free_page(pl->page); -+ kfree(pl); -+ } -+} -+ -+/* -+ * Alloc number of pages and page_list elements as required by client. -+ */ -+static struct page_list *alloc_cache_pages(unsigned pages) -+{ -+ struct page_list *pl, *ret = NULL; -+ struct page *page; -+ -+ while (pages--) { -+ page = alloc_page(GFP_NOIO); -+ if (!page) -+ goto err; -+ -+ pl = kmalloc(sizeof(*pl), GFP_NOIO); -+ if (!pl) { -+ __free_page(page); -+ goto err; -+ } -+ -+ pl->page = page; -+ pl->next = ret; -+ ret = pl; -+ } -+ -+ return ret; -+ -+err: -+ free_cache_pages(ret); -+ return NULL; -+} -+ -+/* -+ * Allocate page_list elements from the pool to chunks of the memory object. -+ */ -+static void alloc_chunks(struct dm_mem_cache_client *cl, -+ struct dm_mem_cache_object *obj) -+{ -+ unsigned chunks = cl->chunks; -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ local_irq_disable(); -+ while (chunks--) { -+ unsigned p = cl->pages_per_chunk; -+ -+ obj[chunks].pl = NULL; -+ -+ while (p--) { -+ struct page_list *pl; -+ -+ /* Take next element from free list */ -+ spin_lock(&cl->lock); -+ pl = cl->free_list; -+ BUG_ON(!pl); -+ cl->free_list = pl->next; -+ spin_unlock(&cl->lock); -+ -+ pl->next = obj[chunks].pl; -+ obj[chunks].pl = pl; -+ } -+ } -+ -+ local_irq_restore(flags); -+} -+ -+/* -+ * Free page_list elements putting them back onto free list -+ */ -+static void free_chunks(struct dm_mem_cache_client *cl, -+ struct dm_mem_cache_object *obj) -+{ -+ unsigned chunks = cl->chunks; -+ unsigned long flags; -+ struct page_list *next, *pl; -+ -+ local_irq_save(flags); -+ local_irq_disable(); -+ while (chunks--) { -+ for (pl = obj[chunks].pl; pl; pl = next) { -+ next = pl->next; -+ -+ spin_lock(&cl->lock); -+ pl->next = cl->free_list; -+ cl->free_list = pl; -+ cl->free_pages++; -+ spin_unlock(&cl->lock); -+ } -+ } -+ -+ local_irq_restore(flags); -+} -+ -+/* -+ * Create/destroy dm memory cache client resources. -+ */ -+struct dm_mem_cache_client * -+dm_mem_cache_client_create(unsigned objects, unsigned chunks, -+ unsigned pages_per_chunk) -+{ -+ unsigned total_pages = objects * chunks * pages_per_chunk; -+ struct dm_mem_cache_client *client; -+ -+ BUG_ON(!total_pages); -+ client = kzalloc(sizeof(*client), GFP_KERNEL); -+ if (!client) -+ return ERR_PTR(-ENOMEM); -+ -+ client->objs_pool = mempool_create_kmalloc_pool(objects, -+ chunks * sizeof(struct dm_mem_cache_object)); -+ if (!client->objs_pool) -+ goto err; -+ -+ client->free_list = alloc_cache_pages(total_pages); -+ if (!client->free_list) -+ goto err1; -+ -+ spin_lock_init(&client->lock); -+ client->objects = objects; -+ client->chunks = chunks; -+ client->pages_per_chunk = pages_per_chunk; -+ client->free_pages = client->total_pages = total_pages; -+ return client; -+ -+err1: -+ mempool_destroy(client->objs_pool); -+err: -+ kfree(client); -+ return ERR_PTR(-ENOMEM); -+} -+EXPORT_SYMBOL(dm_mem_cache_client_create); -+ -+void dm_mem_cache_client_destroy(struct dm_mem_cache_client *cl) -+{ -+ BUG_ON(cl->free_pages != cl->total_pages); -+ free_cache_pages(cl->free_list); -+ mempool_destroy(cl->objs_pool); -+ kfree(cl); -+} -+EXPORT_SYMBOL(dm_mem_cache_client_destroy); -+ -+/* -+ * Grow a clients cache by an amount of pages. -+ * -+ * Don't call from interrupt context! -+ */ -+int dm_mem_cache_grow(struct dm_mem_cache_client *cl, unsigned objects) -+{ -+ unsigned pages = objects * cl->chunks * cl->pages_per_chunk; -+ struct page_list *pl, *last; -+ -+ BUG_ON(!pages); -+ pl = alloc_cache_pages(pages); -+ if (!pl) -+ return -ENOMEM; -+ -+ last = pl; -+ while (last->next) -+ last = last->next; -+ -+ spin_lock_irq(&cl->lock); -+ last->next = cl->free_list; -+ cl->free_list = pl; -+ cl->free_pages += pages; -+ cl->total_pages += pages; -+ cl->objects += objects; -+ spin_unlock_irq(&cl->lock); -+ -+ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO); -+ return 0; -+} -+EXPORT_SYMBOL(dm_mem_cache_grow); -+ -+/* Shrink a clients cache by an amount of pages */ -+int dm_mem_cache_shrink(struct dm_mem_cache_client *cl, unsigned objects) -+{ -+ int r; -+ unsigned pages = objects * cl->chunks * cl->pages_per_chunk, p = pages; -+ unsigned long flags; -+ struct page_list *last = NULL, *pl, *pos; -+ -+ BUG_ON(!pages); -+ -+ spin_lock_irqsave(&cl->lock, flags); -+ pl = pos = cl->free_list; -+ while (p-- && pos->next) { -+ last = pos; -+ pos = pos->next; -+ } -+ -+ if (++p) -+ r = -ENOMEM; -+ else { -+ r = 0; -+ cl->free_list = pos; -+ cl->free_pages -= pages; -+ cl->total_pages -= pages; -+ cl->objects -= objects; -+ last->next = NULL; -+ } -+ spin_unlock_irqrestore(&cl->lock, flags); -+ -+ if (!r) { -+ free_cache_pages(pl); -+ mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO); -+ } -+ -+ return r; -+} -+EXPORT_SYMBOL(dm_mem_cache_shrink); -+ -+/* -+ * Allocate/free a memory object -+ * -+ * Can be called from interrupt context -+ */ -+struct dm_mem_cache_object *dm_mem_cache_alloc(struct dm_mem_cache_client *cl) -+{ -+ int r = 0; -+ unsigned pages = cl->chunks * cl->pages_per_chunk; -+ unsigned long flags; -+ struct dm_mem_cache_object *obj; -+ -+ obj = mempool_alloc(cl->objs_pool, GFP_NOIO); -+ if (!obj) -+ return ERR_PTR(-ENOMEM); -+ -+ spin_lock_irqsave(&cl->lock, flags); -+ if (pages > cl->free_pages) -+ r = -ENOMEM; -+ else -+ cl->free_pages -= pages; -+ spin_unlock_irqrestore(&cl->lock, flags); -+ -+ if (r) { -+ mempool_free(obj, cl->objs_pool); -+ return ERR_PTR(r); -+ } -+ -+ alloc_chunks(cl, obj); -+ return obj; -+} -+EXPORT_SYMBOL(dm_mem_cache_alloc); -+ -+void dm_mem_cache_free(struct dm_mem_cache_client *cl, -+ struct dm_mem_cache_object *obj) -+{ -+ free_chunks(cl, obj); -+ mempool_free(obj, cl->objs_pool); -+} -+EXPORT_SYMBOL(dm_mem_cache_free); -+ -+MODULE_DESCRIPTION(DM_NAME " dm memory cache"); -+MODULE_AUTHOR("Heinz Mauelshagen "); -+MODULE_LICENSE("GPL"); ---- /dev/null -+++ b/drivers/md/dm-memcache.h -@@ -0,0 +1,68 @@ -+/* -+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * Device-mapper memory object handling: -+ * -+ * o allocate/free total_pages in a per client page pool. -+ * -+ * o allocate/free memory objects with chunks (1..n) of -+ * pages_per_chunk pages hanging off. -+ * -+ * This file is released under the GPL. -+ */ -+ -+#ifndef _DM_MEM_CACHE_H -+#define _DM_MEM_CACHE_H -+ -+#define DM_MEM_CACHE_H_VERSION "0.1" -+ -+#include "dm.h" -+#include -+ -+static inline struct page_list *pl_elem(struct page_list *pl, unsigned p) -+{ -+ while (pl && p--) -+ pl = pl->next; -+ -+ return pl; -+} -+ -+struct dm_mem_cache_object { -+ struct page_list *pl; /* Dynamically allocated array */ -+ void *private; /* Caller context reference */ -+}; -+ -+struct dm_mem_cache_client; -+ -+/* -+ * Create/destroy dm memory cache client resources. -+ * -+ * On creation, a number of @objects with @chunks of -+ * @pages_per_chunk pages will be allocated. -+ */ -+struct dm_mem_cache_client * -+dm_mem_cache_client_create(unsigned objects, unsigned chunks, -+ unsigned pages_per_chunk); -+void dm_mem_cache_client_destroy(struct dm_mem_cache_client *client); -+ -+/* -+ * Grow/shrink a dm memory cache client resources -+ * by @objetcs amount of objects. -+ */ -+int dm_mem_cache_grow(struct dm_mem_cache_client *client, unsigned objects); -+int dm_mem_cache_shrink(struct dm_mem_cache_client *client, unsigned objects); -+ -+/* -+ * Allocate/free a memory object -+ * -+ * On allocation one object with an amount of chunks and -+ * an amount of pages per chunk will be returned on success. -+ */ -+struct dm_mem_cache_object * -+dm_mem_cache_alloc(struct dm_mem_cache_client *client); -+void dm_mem_cache_free(struct dm_mem_cache_client *client, -+ struct dm_mem_cache_object *object); -+ -+#endif ---- /dev/null -+++ b/drivers/md/dm-raid45.c -@@ -0,0 +1,4721 @@ -+/* -+ * Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen -+ * -+ * This file is released under the GPL. -+ * -+ * -+ * Linux 2.6 Device Mapper RAID4 and RAID5 target. -+ * -+ * Tested-by: Intel; Marcin.Labun@intel.com, krzysztof.wojcik@intel.com -+ * -+ * -+ * Supports the following ATARAID vendor solutions (and SNIA DDF): -+ * -+ * Adaptec HostRAID ASR -+ * SNIA DDF1 -+ * Hiphpoint 37x -+ * Hiphpoint 45x -+ * Intel IMSM -+ * Jmicron ATARAID -+ * LSI Logic MegaRAID -+ * NVidia RAID -+ * Promise FastTrack -+ * Silicon Image Medley -+ * VIA Software RAID -+ * -+ * via the dmraid application. -+ * -+ * -+ * Features: -+ * -+ * o RAID4 with dedicated and selectable parity device -+ * o RAID5 with rotating parity (left+right, symmetric+asymmetric) -+ * o recovery of out of sync device for initial -+ * RAID set creation or after dead drive replacement -+ * o run time optimization of xor algorithm used to calculate parity -+ * -+ * -+ * Thanks to MD for: -+ * o the raid address calculation algorithm -+ * o the base of the biovec <-> page list copier. -+ * -+ * -+ * Uses region hash to keep track of how many writes are in flight to -+ * regions in order to use dirty log to keep state of regions to recover: -+ * -+ * o clean regions (those which are synchronized -+ * and don't have write io in flight) -+ * o dirty regions (those with write io in flight) -+ * -+ * -+ * On startup, any dirty regions are migrated to the -+ * 'nosync' state and are subject to recovery by the daemon. -+ * -+ * See raid_ctr() for table definition. -+ * -+ * ANALYZEME: recovery bandwidth -+ */ -+ -+static const char *version = "v0.2597k"; -+ -+#include "dm.h" -+#include "dm-memcache.h" -+#include "dm-raid45.h" -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+ -+/* -+ * Configurable parameters -+ */ -+ -+/* Minimum/maximum and default # of selectable stripes. */ -+#define STRIPES_MIN 8 -+#define STRIPES_MAX 16384 -+#define STRIPES_DEFAULT 80 -+ -+/* Maximum and default chunk size in sectors if not set in constructor. */ -+#define CHUNK_SIZE_MIN 8 -+#define CHUNK_SIZE_MAX 16384 -+#define CHUNK_SIZE_DEFAULT 64 -+ -+/* Default io size in sectors if not set in constructor. */ -+#define IO_SIZE_MIN CHUNK_SIZE_MIN -+#define IO_SIZE_DEFAULT IO_SIZE_MIN -+ -+/* Recover io size default in sectors. */ -+#define RECOVER_IO_SIZE_MIN 64 -+#define RECOVER_IO_SIZE_DEFAULT 256 -+ -+/* Default, minimum and maximum percentage of recover io bandwidth. */ -+#define BANDWIDTH_DEFAULT 10 -+#define BANDWIDTH_MIN 1 -+#define BANDWIDTH_MAX 100 -+ -+/* # of parallel recovered regions */ -+#define RECOVERY_STRIPES_MIN 1 -+#define RECOVERY_STRIPES_MAX 64 -+#define RECOVERY_STRIPES_DEFAULT RECOVERY_STRIPES_MIN -+/* -+ * END Configurable parameters -+ */ -+ -+#define TARGET "dm-raid45" -+#define DAEMON "kraid45d" -+#define DM_MSG_PREFIX TARGET -+ -+#define SECTORS_PER_PAGE (PAGE_SIZE >> SECTOR_SHIFT) -+ -+/* Amount/size for __xor(). */ -+#define XOR_SIZE PAGE_SIZE -+ -+/* Ticks to run xor_speed() test for. */ -+#define XOR_SPEED_TICKS 5 -+ -+/* Check value in range. */ -+#define range_ok(i, min, max) (i >= min && i <= max) -+ -+/* Structure access macros. */ -+/* Derive raid_set from stripe_cache pointer. */ -+#define RS(x) container_of(x, struct raid_set, sc) -+ -+/* Page reference. */ -+#define PAGE(stripe, p) ((stripe)->obj[p].pl->page) -+ -+/* Stripe chunk reference. */ -+#define CHUNK(stripe, p) ((stripe)->chunk + p) -+ -+/* Bio list reference. */ -+#define BL(stripe, p, rw) (stripe->chunk[p].bl + rw) -+#define BL_CHUNK(chunk, rw) (chunk->bl + rw) -+ -+/* Page list reference. */ -+#define PL(stripe, p) (stripe->obj[p].pl) -+/* END: structure access macros. */ -+ -+/* Factor out to dm-bio-list.h */ -+static inline void bio_list_push(struct bio_list *bl, struct bio *bio) -+{ -+ bio->bi_next = bl->head; -+ bl->head = bio; -+ -+ if (!bl->tail) -+ bl->tail = bio; -+} -+ -+/* Factor out to dm.h */ -+#define TI_ERR_RET(str, ret) \ -+ do { ti->error = str; return ret; } while (0); -+#define TI_ERR(str) TI_ERR_RET(str, -EINVAL) -+ -+/* Macro to define access IO flags access inline functions. */ -+#define BITOPS(name, what, var, flag) \ -+static inline int TestClear ## name ## what(struct var *v) \ -+{ return test_and_clear_bit(flag, &v->io.flags); } \ -+static inline int TestSet ## name ## what(struct var *v) \ -+{ return test_and_set_bit(flag, &v->io.flags); } \ -+static inline void Clear ## name ## what(struct var *v) \ -+{ clear_bit(flag, &v->io.flags); } \ -+static inline void Set ## name ## what(struct var *v) \ -+{ set_bit(flag, &v->io.flags); } \ -+static inline int name ## what(struct var *v) \ -+{ return test_bit(flag, &v->io.flags); } -+ -+/*----------------------------------------------------------------- -+ * Stripe cache -+ * -+ * Cache for all reads and writes to raid sets (operational or degraded) -+ * -+ * We need to run all data to and from a RAID set through this cache, -+ * because parity chunks need to get calculated from data chunks -+ * or, in the degraded/resynchronization case, missing chunks need -+ * to be reconstructed using the other chunks of the stripe. -+ *---------------------------------------------------------------*/ -+/* Unique kmem cache name suffix # counter. */ -+static atomic_t _stripe_sc_nr = ATOMIC_INIT(-1); /* kmem cache # counter. */ -+ -+/* A chunk within a stripe (holds bios hanging off). */ -+/* IO status flags for chunks of a stripe. */ -+enum chunk_flags { -+ CHUNK_DIRTY, /* Pages of chunk dirty; need writing. */ -+ CHUNK_ERROR, /* IO error on any chunk page. */ -+ CHUNK_IO, /* Allow/prohibit IO on chunk pages. */ -+ CHUNK_LOCKED, /* Chunk pages locked during IO. */ -+ CHUNK_MUST_IO, /* Chunk must io. */ -+ CHUNK_UNLOCK, /* Enforce chunk unlock. */ -+ CHUNK_UPTODATE, /* Chunk pages are uptodate. */ -+}; -+ -+#if READ != 0 || WRITE != 1 -+#error dm-raid45: READ/WRITE != 0/1 used as index!!! -+#endif -+ -+enum bl_type { -+ WRITE_QUEUED = WRITE + 1, -+ WRITE_MERGED, -+ NR_BL_TYPES, /* Must be last one! */ -+}; -+struct stripe_chunk { -+ atomic_t cnt; /* Reference count. */ -+ struct stripe *stripe; /* Backpointer to stripe for endio(). */ -+ /* Bio lists for reads, writes, and writes merged. */ -+ struct bio_list bl[NR_BL_TYPES]; -+ struct { -+ unsigned long flags; /* IO status flags. */ -+ } io; -+}; -+ -+/* Define chunk bit operations. */ -+BITOPS(Chunk, Dirty, stripe_chunk, CHUNK_DIRTY) -+BITOPS(Chunk, Error, stripe_chunk, CHUNK_ERROR) -+BITOPS(Chunk, Io, stripe_chunk, CHUNK_IO) -+BITOPS(Chunk, Locked, stripe_chunk, CHUNK_LOCKED) -+BITOPS(Chunk, MustIo, stripe_chunk, CHUNK_MUST_IO) -+BITOPS(Chunk, Unlock, stripe_chunk, CHUNK_UNLOCK) -+BITOPS(Chunk, Uptodate, stripe_chunk, CHUNK_UPTODATE) -+ -+/* -+ * Stripe linked list indexes. Keep order, because the stripe -+ * and the stripe cache rely on the first 3! -+ */ -+enum list_types { -+ LIST_FLUSH, /* Stripes to flush for io. */ -+ LIST_ENDIO, /* Stripes to endio. */ -+ LIST_LRU, /* Least recently used stripes. */ -+ SC_NR_LISTS, /* # of lists in stripe cache. */ -+ LIST_HASH = SC_NR_LISTS, /* Hashed stripes. */ -+ LIST_RECOVER = LIST_HASH, /* For recovery type stripes only. */ -+ STRIPE_NR_LISTS,/* To size array in struct stripe. */ -+}; -+ -+/* Adressing region recovery. */ -+struct recover_addr { -+ struct dm_region *reg; /* Actual region to recover. */ -+ sector_t pos; /* Position within region to recover. */ -+ sector_t end; /* End of region to recover. */ -+}; -+ -+/* A stripe: the io object to handle all reads and writes to a RAID set. */ -+struct stripe { -+ atomic_t cnt; /* Reference count. */ -+ struct stripe_cache *sc; /* Backpointer to stripe cache. */ -+ -+ /* -+ * 4 linked lists: -+ * o io list to flush io -+ * o endio list -+ * o LRU list to put stripes w/o reference count on -+ * o stripe cache hash -+ */ -+ struct list_head lists[STRIPE_NR_LISTS]; -+ -+ sector_t key; /* Hash key. */ -+ region_t region; /* Region stripe is mapped to. */ -+ -+ struct { -+ unsigned long flags; /* Stripe state flags (see below). */ -+ -+ /* -+ * Pending ios in flight: -+ * -+ * used to control move of stripe to endio list -+ */ -+ atomic_t pending; -+ -+ /* Sectors to read and write for multi page stripe sets. */ -+ unsigned size; -+ } io; -+ -+ /* Address region recovery. */ -+ struct recover_addr *recover; -+ -+ /* Lock on stripe (Future: for clustering). */ -+ void *lock; -+ -+ struct { -+ unsigned short parity; /* Parity chunk index. */ -+ short recover; /* Recovery chunk index. */ -+ } idx; -+ -+ /* -+ * This stripe's memory cache object (dm-mem-cache); -+ * i.e. the io chunk pages. -+ */ -+ struct dm_mem_cache_object *obj; -+ -+ /* Array of stripe sets (dynamically allocated). */ -+ struct stripe_chunk chunk[0]; -+}; -+ -+/* States stripes can be in (flags field). */ -+enum stripe_states { -+ STRIPE_ERROR, /* io error on stripe. */ -+ STRIPE_MERGED, /* Writes got merged to be written. */ -+ STRIPE_RBW, /* Read-before-write stripe. */ -+ STRIPE_RECONSTRUCT, /* Reconstruct of a missing chunk required. */ -+ STRIPE_RECONSTRUCTED, /* Reconstructed of a missing chunk. */ -+ STRIPE_RECOVER, /* Stripe used for RAID set recovery. */ -+}; -+ -+/* Define stripe bit operations. */ -+BITOPS(Stripe, Error, stripe, STRIPE_ERROR) -+BITOPS(Stripe, Merged, stripe, STRIPE_MERGED) -+BITOPS(Stripe, RBW, stripe, STRIPE_RBW) -+BITOPS(Stripe, Reconstruct, stripe, STRIPE_RECONSTRUCT) -+BITOPS(Stripe, Reconstructed, stripe, STRIPE_RECONSTRUCTED) -+BITOPS(Stripe, Recover, stripe, STRIPE_RECOVER) -+ -+/* A stripe hash. */ -+struct stripe_hash { -+ struct list_head *hash; -+ unsigned buckets; -+ unsigned mask; -+ unsigned prime; -+ unsigned shift; -+}; -+ -+enum sc_lock_types { -+ LOCK_ENDIO, /* Protect endio list. */ -+ NR_LOCKS, /* To size array in struct stripe_cache. */ -+}; -+ -+/* A stripe cache. */ -+struct stripe_cache { -+ /* Stripe hash. */ -+ struct stripe_hash hash; -+ -+ spinlock_t locks[NR_LOCKS]; /* Locks to protect lists. */ -+ -+ /* Stripes with io to flush, stripes to endio and LRU lists. */ -+ struct list_head lists[SC_NR_LISTS]; -+ -+ /* Slab cache to allocate stripes from. */ -+ struct { -+ struct kmem_cache *cache; /* Cache itself. */ -+ char name[32]; /* Unique name. */ -+ } kc; -+ -+ struct dm_io_client *dm_io_client; /* dm-io client resource context. */ -+ -+ /* dm-mem-cache client resource context. */ -+ struct dm_mem_cache_client *mem_cache_client; -+ -+ int stripes_parm; /* # stripes parameter from constructor. */ -+ atomic_t stripes; /* actual # of stripes in cache. */ -+ atomic_t stripes_to_set; /* # of stripes to resize cache to. */ -+ atomic_t stripes_last; /* last # of stripes in cache. */ -+ atomic_t active_stripes; /* actual # of active stripes in cache. */ -+ -+ /* REMOVEME: */ -+ atomic_t active_stripes_max; /* actual # of active stripes in cache. */ -+}; -+ -+/* Flag specs for raid_dev */ ; -+enum raid_dev_flags { -+ DEV_FAILED, /* Device failed. */ -+ DEV_IO_QUEUED, /* Io got queued to device. */ -+}; -+ -+/* The raid device in a set. */ -+struct raid_dev { -+ struct dm_dev *dev; -+ sector_t start; /* Offset to map to. */ -+ struct { /* Using struct to be able to BITOPS(). */ -+ unsigned long flags; /* raid_dev_flags. */ -+ } io; -+}; -+ -+BITOPS(Dev, Failed, raid_dev, DEV_FAILED) -+BITOPS(Dev, IoQueued, raid_dev, DEV_IO_QUEUED) -+ -+/* Flags spec for raid_set. */ -+enum raid_set_flags { -+ RS_CHECK_OVERWRITE, /* Check for chunk overwrites. */ -+ RS_DEAD, /* RAID set inoperational. */ -+ RS_DEAD_ENDIO_MESSAGE, /* RAID set dead endio one-off message. */ -+ RS_DEGRADED, /* Io errors on RAID device. */ -+ RS_DEVEL_STATS, /* REMOVEME: display status information. */ -+ RS_ENFORCE_PARITY_CREATION,/* Enforce parity creation. */ -+ RS_PROHIBIT_WRITES, /* Prohibit writes on device failure. */ -+ RS_RECOVER, /* Do recovery. */ -+ RS_RECOVERY_BANDWIDTH, /* Allow recovery bandwidth (delayed bios). */ -+ RS_SC_BUSY, /* Stripe cache busy -> send an event. */ -+ RS_SUSPEND, /* Suspend RAID set. */ -+}; -+ -+/* REMOVEME: devel stats counters. */ -+enum stats_types { -+ S_BIOS_READ, -+ S_BIOS_ADDED_READ, -+ S_BIOS_ENDIO_READ, -+ S_BIOS_WRITE, -+ S_BIOS_ADDED_WRITE, -+ S_BIOS_ENDIO_WRITE, -+ S_CAN_MERGE, -+ S_CANT_MERGE, -+ S_CONGESTED, -+ S_DM_IO_READ, -+ S_DM_IO_WRITE, -+ S_BANDWIDTH, -+ S_BARRIER, -+ S_BIO_COPY_PL_NEXT, -+ S_DEGRADED, -+ S_DELAYED_BIOS, -+ S_FLUSHS, -+ S_HITS_1ST, -+ S_IOS_POST, -+ S_INSCACHE, -+ S_MAX_LOOKUP, -+ S_CHUNK_LOCKED, -+ S_NO_BANDWIDTH, -+ S_NOT_CONGESTED, -+ S_NO_RW, -+ S_NOSYNC, -+ S_OVERWRITE, -+ S_PROHIBITCHUNKIO, -+ S_RECONSTRUCT_EI, -+ S_RECONSTRUCT_DEV, -+ S_RECONSTRUCT_SET, -+ S_RECONSTRUCTED, -+ S_REQUEUE, -+ S_STRIPE_ERROR, -+ S_SUM_DELAYED_BIOS, -+ S_XORS, -+ S_NR_STATS, /* # of stats counters. Must be last! */ -+}; -+ -+/* Status type -> string mappings. */ -+struct stats_map { -+ const enum stats_types type; -+ const char *str; -+}; -+ -+static struct stats_map stats_map[] = { -+ { S_BIOS_READ, "r=" }, -+ { S_BIOS_ADDED_READ, "/" }, -+ { S_BIOS_ENDIO_READ, "/" }, -+ { S_BIOS_WRITE, " w=" }, -+ { S_BIOS_ADDED_WRITE, "/" }, -+ { S_BIOS_ENDIO_WRITE, "/" }, -+ { S_DM_IO_READ, " rc=" }, -+ { S_DM_IO_WRITE, " wc=" }, -+ { S_BANDWIDTH, "\nbw=" }, -+ { S_NO_BANDWIDTH, " no_bw=" }, -+ { S_BARRIER, "\nbarrier=" }, -+ { S_BIO_COPY_PL_NEXT, "\nbio_cp_next=" }, -+ { S_CAN_MERGE, "\nmerge=" }, -+ { S_CANT_MERGE, "/no_merge=" }, -+ { S_CHUNK_LOCKED, "\nchunk_locked=" }, -+ { S_CONGESTED, "\ncgst=" }, -+ { S_NOT_CONGESTED, "/not_cgst=" }, -+ { S_DEGRADED, "\ndegraded=" }, -+ { S_DELAYED_BIOS, "\ndel_bios=" }, -+ { S_SUM_DELAYED_BIOS, "/sum_del_bios=" }, -+ { S_FLUSHS, "\nflushs=" }, -+ { S_HITS_1ST, "\nhits_1st=" }, -+ { S_IOS_POST, " ios_post=" }, -+ { S_INSCACHE, " inscache=" }, -+ { S_MAX_LOOKUP, " maxlookup=" }, -+ { S_NO_RW, "\nno_rw=" }, -+ { S_NOSYNC, " nosync=" }, -+ { S_OVERWRITE, " ovr=" }, -+ { S_PROHIBITCHUNKIO, " prhbt_io=" }, -+ { S_RECONSTRUCT_EI, "\nrec_ei=" }, -+ { S_RECONSTRUCT_DEV, " rec_dev=" }, -+ { S_RECONSTRUCT_SET, " rec_set=" }, -+ { S_RECONSTRUCTED, " rec=" }, -+ { S_REQUEUE, " requeue=" }, -+ { S_STRIPE_ERROR, " stripe_err=" }, -+ { S_XORS, " xors=" }, -+}; -+ -+/* -+ * A RAID set. -+ */ -+#define dm_rh_client dm_region_hash -+enum count_type { IO_WORK = 0, IO_RECOVER, IO_NR_COUNT }; -+typedef void (*xor_function_t)(unsigned count, unsigned long **data); -+struct raid_set { -+ struct dm_target *ti; /* Target pointer. */ -+ -+ struct { -+ unsigned long flags; /* State flags. */ -+ struct mutex in_lock; /* Protects central input list below. */ -+ struct mutex xor_lock; /* Protects xor algorithm set. */ -+ struct bio_list in; /* Pending ios (central input list). */ -+ struct bio_list work; /* ios work set. */ -+ wait_queue_head_t suspendq; /* suspend synchronization. */ -+ atomic_t in_process; /* counter of queued bios (suspendq). */ -+ atomic_t in_process_max;/* counter of queued bios max. */ -+ -+ /* io work. */ -+ struct workqueue_struct *wq; -+ struct delayed_work dws_do_raid; /* For main worker. */ -+ struct work_struct ws_do_table_event; /* For event worker. */ -+ } io; -+ -+ /* Stripe locking abstraction. */ -+ struct dm_raid45_locking_type *locking; -+ -+ struct stripe_cache sc; /* Stripe cache for this set. */ -+ -+ /* Xor optimization. */ -+ struct { -+ struct xor_func *f; -+ unsigned chunks; -+ unsigned speed; -+ } xor; -+ -+ /* Recovery parameters. */ -+ struct recover { -+ struct dm_dirty_log *dl; /* Dirty log. */ -+ struct dm_rh_client *rh; /* Region hash. */ -+ -+ struct dm_io_client *dm_io_client; /* recovery dm-io client. */ -+ /* dm-mem-cache client resource context for recovery stripes. */ -+ struct dm_mem_cache_client *mem_cache_client; -+ -+ struct list_head stripes; /* List of recovery stripes. */ -+ -+ region_t nr_regions; -+ region_t nr_regions_to_recover; -+ region_t nr_regions_recovered; -+ unsigned long start_jiffies; -+ unsigned long end_jiffies; -+ -+ unsigned bandwidth; /* Recovery bandwidth [%]. */ -+ unsigned bandwidth_work; /* Recovery bandwidth [factor]. */ -+ unsigned bandwidth_parm; /* " constructor parm. */ -+ unsigned io_size; /* recovery io size <= region size. */ -+ unsigned io_size_parm; /* recovery io size ctr parameter. */ -+ unsigned recovery; /* Recovery allowed/prohibited. */ -+ unsigned recovery_stripes; /* # of parallel recovery stripes. */ -+ -+ /* recovery io throttling. */ -+ atomic_t io_count[IO_NR_COUNT]; /* counter recover/regular io.*/ -+ unsigned long last_jiffies; -+ } recover; -+ -+ /* RAID set parameters. */ -+ struct { -+ struct raid_type *raid_type; /* RAID type (eg, RAID4). */ -+ unsigned raid_parms; /* # variable raid parameters. */ -+ -+ unsigned chunk_size; /* Sectors per chunk. */ -+ unsigned chunk_size_parm; -+ unsigned chunk_shift; /* rsector chunk size shift. */ -+ -+ unsigned io_size; /* Sectors per io. */ -+ unsigned io_size_parm; -+ unsigned io_mask; /* Mask for bio_copy_page_list(). */ -+ unsigned io_inv_mask; /* Mask for raid_address(). */ -+ -+ sector_t sectors_per_dev; /* Sectors per device. */ -+ -+ atomic_t failed_devs; /* Amount of devices failed. */ -+ -+ /* Index of device to initialize. */ -+ int dev_to_init; -+ int dev_to_init_parm; -+ -+ /* Raid devices dynamically allocated. */ -+ unsigned raid_devs; /* # of RAID devices below. */ -+ unsigned data_devs; /* # of RAID data devices. */ -+ -+ int ei; /* index of failed RAID device. */ -+ -+ /* Index of dedicated parity device (i.e. RAID4). */ -+ int pi; -+ int pi_parm; /* constructor parm for status output. */ -+ } set; -+ -+ /* REMOVEME: devel stats counters. */ -+ atomic_t stats[S_NR_STATS]; -+ -+ /* Dynamically allocated temporary pointers for xor(). */ -+ unsigned long **data; -+ -+ /* Dynamically allocated RAID devices. Alignment? */ -+ struct raid_dev dev[0]; -+}; -+ -+/* Define RAID set bit operations. */ -+BITOPS(RS, Bandwidth, raid_set, RS_RECOVERY_BANDWIDTH) -+BITOPS(RS, CheckOverwrite, raid_set, RS_CHECK_OVERWRITE) -+BITOPS(RS, Dead, raid_set, RS_DEAD) -+BITOPS(RS, DeadEndioMessage, raid_set, RS_DEAD_ENDIO_MESSAGE) -+BITOPS(RS, Degraded, raid_set, RS_DEGRADED) -+BITOPS(RS, DevelStats, raid_set, RS_DEVEL_STATS) -+BITOPS(RS, EnforceParityCreation, raid_set, RS_ENFORCE_PARITY_CREATION) -+BITOPS(RS, ProhibitWrites, raid_set, RS_PROHIBIT_WRITES) -+BITOPS(RS, Recover, raid_set, RS_RECOVER) -+BITOPS(RS, ScBusy, raid_set, RS_SC_BUSY) -+BITOPS(RS, Suspend, raid_set, RS_SUSPEND) -+#undef BITOPS -+ -+/*----------------------------------------------------------------- -+ * Raid-4/5 set structures. -+ *---------------------------------------------------------------*/ -+/* RAID level definitions. */ -+enum raid_level { -+ raid4, -+ raid5, -+}; -+ -+/* Symmetric/Asymmetric, Left/Right parity rotating algorithms. */ -+enum raid_algorithm { -+ none, -+ left_asym, -+ right_asym, -+ left_sym, -+ right_sym, -+}; -+ -+struct raid_type { -+ const char *name; /* RAID algorithm. */ -+ const char *descr; /* Descriptor text for logging. */ -+ const unsigned parity_devs; /* # of parity devices. */ -+ const unsigned minimal_devs; /* minimal # of devices in set. */ -+ const enum raid_level level; /* RAID level. */ -+ const enum raid_algorithm algorithm; /* RAID algorithm. */ -+}; -+ -+/* Supported raid types and properties. */ -+static struct raid_type raid_types[] = { -+ {"raid4", "RAID4 (dedicated parity disk)", 1, 3, raid4, none}, -+ {"raid5_la", "RAID5 (left asymmetric)", 1, 3, raid5, left_asym}, -+ {"raid5_ra", "RAID5 (right asymmetric)", 1, 3, raid5, right_asym}, -+ {"raid5_ls", "RAID5 (left symmetric)", 1, 3, raid5, left_sym}, -+ {"raid5_rs", "RAID5 (right symmetric)", 1, 3, raid5, right_sym}, -+}; -+ -+/* Address as calculated by raid_address(). */ -+struct raid_address { -+ sector_t key; /* Hash key (address of stripe % chunk_size). */ -+ unsigned di, pi; /* Data and parity disks index. */ -+}; -+ -+/* REMOVEME: reset statistics counters. */ -+static void stats_reset(struct raid_set *rs) -+{ -+ unsigned s = S_NR_STATS; -+ -+ while (s--) -+ atomic_set(rs->stats + s, 0); -+} -+ -+/*---------------------------------------------------------------- -+ * RAID set management routines. -+ *--------------------------------------------------------------*/ -+/* -+ * Begin small helper functions. -+ */ -+/* No need to be called from region hash indirectly at dm_rh_dec(). */ -+static void wake_dummy(void *context) {} -+ -+/* Return # of io reference. */ -+static int io_ref(struct raid_set *rs) -+{ -+ return atomic_read(&rs->io.in_process); -+} -+ -+/* Get an io reference. */ -+static void io_get(struct raid_set *rs) -+{ -+ int p = atomic_inc_return(&rs->io.in_process); -+ -+ if (p > atomic_read(&rs->io.in_process_max)) -+ atomic_set(&rs->io.in_process_max, p); /* REMOVEME: max. */ -+} -+ -+/* Put the io reference and conditionally wake io waiters. */ -+static void io_put(struct raid_set *rs) -+{ -+ /* Intel: rebuild data corrupter? */ -+ if (atomic_dec_and_test(&rs->io.in_process)) -+ wake_up(&rs->io.suspendq); -+ else -+ BUG_ON(io_ref(rs) < 0); -+} -+ -+/* Wait until all io has been processed. */ -+static void wait_ios(struct raid_set *rs) -+{ -+ wait_event(rs->io.suspendq, !io_ref(rs)); -+} -+ -+/* Queue (optionally delayed) io work. */ -+static void wake_do_raid_delayed(struct raid_set *rs, unsigned long delay) -+{ -+ queue_delayed_work(rs->io.wq, &rs->io.dws_do_raid, delay); -+} -+ -+/* Queue io work immediately (called from region hash too). */ -+static void wake_do_raid(void *context) -+{ -+ struct raid_set *rs = context; -+ -+ queue_work(rs->io.wq, &rs->io.dws_do_raid.work); -+} -+ -+/* Calculate device sector offset. */ -+static sector_t _sector(struct raid_set *rs, struct bio *bio) -+{ -+ sector_t sector = bio->bi_sector; -+ -+ sector_div(sector, rs->set.data_devs); -+ return sector; -+} -+ -+/* Return # of active stripes in stripe cache. */ -+static int sc_active(struct stripe_cache *sc) -+{ -+ return atomic_read(&sc->active_stripes); -+} -+ -+/* Stripe cache busy indicator. */ -+static int sc_busy(struct raid_set *rs) -+{ -+ return sc_active(&rs->sc) > -+ atomic_read(&rs->sc.stripes) - (STRIPES_MIN / 2); -+} -+ -+/* Set chunks states. */ -+enum chunk_dirty_type { CLEAN, DIRTY, ERROR }; -+static void chunk_set(struct stripe_chunk *chunk, enum chunk_dirty_type type) -+{ -+ switch (type) { -+ case CLEAN: -+ ClearChunkDirty(chunk); -+ break; -+ case DIRTY: -+ SetChunkDirty(chunk); -+ break; -+ case ERROR: -+ SetChunkError(chunk); -+ SetStripeError(chunk->stripe); -+ return; -+ default: -+ BUG(); -+ } -+ -+ SetChunkUptodate(chunk); -+ SetChunkIo(chunk); -+ ClearChunkError(chunk); -+} -+ -+/* Return region state for a sector. */ -+static int region_state(struct raid_set *rs, sector_t sector, -+ enum dm_rh_region_states state) -+{ -+ struct dm_rh_client *rh = rs->recover.rh; -+ region_t region = dm_rh_sector_to_region(rh, sector); -+ -+ return !!(dm_rh_get_state(rh, region, 1) & state); -+} -+ -+/* -+ * Return true in case a chunk should be read/written -+ * -+ * Conditions to read/write: -+ * o chunk not uptodate -+ * o chunk dirty -+ * -+ * Conditios to avoid io: -+ * o io already ongoing on chunk -+ * o io explitely prohibited -+ */ -+static int chunk_io(struct stripe_chunk *chunk) -+{ -+ /* 2nd run optimization (flag set below on first run). */ -+ if (TestClearChunkMustIo(chunk)) -+ return 1; -+ -+ /* Avoid io if prohibited or a locked chunk. */ -+ if (!ChunkIo(chunk) || ChunkLocked(chunk)) -+ return 0; -+ -+ if (!ChunkUptodate(chunk) || ChunkDirty(chunk)) { -+ SetChunkMustIo(chunk); /* 2nd run optimization. */ -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/* Call a function on each chunk needing io unless device failed. */ -+static unsigned for_each_io_dev(struct stripe *stripe, -+ void (*f_io)(struct stripe *stripe, unsigned p)) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned p, r = 0; -+ -+ for (p = 0; p < rs->set.raid_devs; p++) { -+ if (chunk_io(CHUNK(stripe, p)) && !DevFailed(rs->dev + p)) { -+ f_io(stripe, p); -+ r++; -+ } -+ } -+ -+ return r; -+} -+ -+/* -+ * Index of device to calculate parity on. -+ * -+ * Either the parity device index *or* the selected -+ * device to init after a spare replacement. -+ */ -+static int dev_for_parity(struct stripe *stripe, int *sync) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ int r = region_state(rs, stripe->key, DM_RH_NOSYNC | DM_RH_RECOVERING); -+ -+ *sync = !r; -+ -+ /* Reconstruct a particular device ?. */ -+ if (r && rs->set.dev_to_init > -1) -+ return rs->set.dev_to_init; -+ else if (rs->set.raid_type->level == raid4) -+ return rs->set.pi; -+ else if (!StripeRecover(stripe)) -+ return stripe->idx.parity; -+ else -+ return -1; -+} -+ -+/* RAID set congested function. */ -+static int rs_congested(void *congested_data, int bdi_bits) -+{ -+ int r; -+ unsigned p; -+ struct raid_set *rs = congested_data; -+ -+ if (sc_busy(rs) || RSSuspend(rs) || RSProhibitWrites(rs)) -+ r = 1; -+ else for (r = 0, p = rs->set.raid_devs; !r && p--; ) { -+ /* If any of our component devices are overloaded. */ -+ struct request_queue *q = bdev_get_queue(rs->dev[p].dev->bdev); -+ -+ r |= bdi_congested(&q->backing_dev_info, bdi_bits); -+ } -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (r ? S_CONGESTED : S_NOT_CONGESTED)); -+ return r; -+} -+ -+/* RAID device degrade check. */ -+static void rs_check_degrade_dev(struct raid_set *rs, -+ struct stripe *stripe, unsigned p) -+{ -+ if (TestSetDevFailed(rs->dev + p)) -+ return; -+ -+ /* Through an event in case of member device errors. */ -+ if ((atomic_inc_return(&rs->set.failed_devs) > -+ rs->set.raid_type->parity_devs) && -+ !TestSetRSDead(rs)) { -+ /* Display RAID set dead message once. */ -+ unsigned p; -+ char buf[BDEVNAME_SIZE]; -+ -+ DMERR("FATAL: too many devices failed -> RAID set broken"); -+ for (p = 0; p < rs->set.raid_devs; p++) { -+ if (DevFailed(rs->dev + p)) -+ DMERR("device /dev/%s failed", -+ bdevname(rs->dev[p].dev->bdev, buf)); -+ } -+ } -+ -+ /* Only log the first member error. */ -+ if (!TestSetRSDegraded(rs)) { -+ char buf[BDEVNAME_SIZE]; -+ -+ /* Store index for recovery. */ -+ rs->set.ei = p; -+ DMERR("CRITICAL: %sio error on device /dev/%s " -+ "in region=%llu; DEGRADING RAID set\n", -+ stripe ? "" : "FAKED ", -+ bdevname(rs->dev[p].dev->bdev, buf), -+ (unsigned long long) (stripe ? stripe->key : 0)); -+ DMERR("further device error messages suppressed"); -+ } -+ -+ /* Prohibit further writes to allow for userpace to update metadata. */ -+ SetRSProhibitWrites(rs); -+ schedule_work(&rs->io.ws_do_table_event); -+} -+ -+/* RAID set degrade check. */ -+static void rs_check_degrade(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned p = rs->set.raid_devs; -+ -+ while (p--) { -+ if (ChunkError(CHUNK(stripe, p))) -+ rs_check_degrade_dev(rs, stripe, p); -+ } -+} -+ -+/* Lookup a RAID device by name or by major:minor number. */ -+static int raid_dev_lookup(struct raid_set *rs, struct raid_dev *dev_lookup) -+{ -+ unsigned p; -+ struct raid_dev *dev; -+ -+ /* -+ * Must be an incremental loop, because the device array -+ * can have empty slots still on calls from raid_ctr() -+ */ -+ for (dev = rs->dev, p = 0; -+ dev->dev && p < rs->set.raid_devs; -+ dev++, p++) { -+ if (dev_lookup->dev->bdev->bd_dev == dev->dev->bdev->bd_dev) -+ return p; -+ } -+ -+ return -ENODEV; -+} -+/* -+ * End small helper functions. -+ */ -+ -+/* -+ * Stripe hash functions -+ */ -+/* Initialize/destroy stripe hash. */ -+static int hash_init(struct stripe_hash *hash, unsigned stripes) -+{ -+ unsigned buckets = roundup_pow_of_two(stripes >> 1); -+ static unsigned hash_primes[] = { -+ /* Table of primes for hash_fn/table size optimization. */ -+ 1, 2, 3, 7, 13, 27, 53, 97, 193, 389, 769, -+ 1543, 3079, 6151, 12289, 24593, 49157, 98317, -+ }; -+ -+ /* Allocate stripe hash buckets. */ -+ hash->hash = vmalloc(buckets * sizeof(*hash->hash)); -+ if (!hash->hash) -+ return -ENOMEM; -+ -+ hash->buckets = buckets; -+ hash->mask = buckets - 1; -+ hash->shift = ffs(buckets); -+ if (hash->shift > ARRAY_SIZE(hash_primes)) -+ hash->shift = ARRAY_SIZE(hash_primes) - 1; -+ -+ BUG_ON(hash->shift < 2); -+ hash->prime = hash_primes[hash->shift]; -+ -+ /* Initialize buckets. */ -+ while (buckets--) -+ INIT_LIST_HEAD(hash->hash + buckets); -+ return 0; -+} -+ -+static void hash_exit(struct stripe_hash *hash) -+{ -+ if (hash->hash) { -+ vfree(hash->hash); -+ hash->hash = NULL; -+ } -+} -+ -+static unsigned hash_fn(struct stripe_hash *hash, sector_t key) -+{ -+ return (unsigned) (((key * hash->prime) >> hash->shift) & hash->mask); -+} -+ -+static struct list_head *hash_bucket(struct stripe_hash *hash, sector_t key) -+{ -+ return hash->hash + hash_fn(hash, key); -+} -+ -+/* Insert an entry into a hash. */ -+static void stripe_insert(struct stripe_hash *hash, struct stripe *stripe) -+{ -+ list_add(stripe->lists + LIST_HASH, hash_bucket(hash, stripe->key)); -+} -+ -+/* Lookup an entry in the stripe hash. */ -+static struct stripe *stripe_lookup(struct stripe_cache *sc, sector_t key) -+{ -+ unsigned look = 0; -+ struct stripe *stripe; -+ struct list_head *bucket = hash_bucket(&sc->hash, key); -+ -+ list_for_each_entry(stripe, bucket, lists[LIST_HASH]) { -+ look++; -+ -+ if (stripe->key == key) { -+ /* REMOVEME: statisics. */ -+ if (look > atomic_read(RS(sc)->stats + S_MAX_LOOKUP)) -+ atomic_set(RS(sc)->stats + S_MAX_LOOKUP, look); -+ return stripe; -+ } -+ } -+ -+ return NULL; -+} -+ -+/* Resize the stripe cache hash on size changes. */ -+static int sc_hash_resize(struct stripe_cache *sc) -+{ -+ /* Resize indicated ? */ -+ if (atomic_read(&sc->stripes) != atomic_read(&sc->stripes_last)) { -+ int r; -+ struct stripe_hash hash; -+ -+ r = hash_init(&hash, atomic_read(&sc->stripes)); -+ if (r) -+ return r; -+ -+ if (sc->hash.hash) { -+ unsigned b = sc->hash.buckets; -+ struct list_head *pos, *tmp; -+ -+ /* Walk old buckets and insert into new. */ -+ while (b--) { -+ list_for_each_safe(pos, tmp, sc->hash.hash + b) -+ stripe_insert(&hash, -+ list_entry(pos, struct stripe, -+ lists[LIST_HASH])); -+ } -+ -+ } -+ -+ hash_exit(&sc->hash); -+ memcpy(&sc->hash, &hash, sizeof(sc->hash)); -+ atomic_set(&sc->stripes_last, atomic_read(&sc->stripes)); -+ } -+ -+ return 0; -+} -+/* End hash stripe hash function. */ -+ -+/* List add, delete, push and pop functions. */ -+/* Add stripe to flush list. */ -+#define DEL_LIST(lh) \ -+ if (!list_empty(lh)) \ -+ list_del_init(lh); -+ -+/* Delete stripe from hash. */ -+static void stripe_hash_del(struct stripe *stripe) -+{ -+ DEL_LIST(stripe->lists + LIST_HASH); -+} -+ -+/* Return stripe reference count. */ -+static inline int stripe_ref(struct stripe *stripe) -+{ -+ return atomic_read(&stripe->cnt); -+} -+ -+static void stripe_flush_add(struct stripe *stripe) -+{ -+ struct stripe_cache *sc = stripe->sc; -+ struct list_head *lh = stripe->lists + LIST_FLUSH; -+ -+ if (!StripeReconstruct(stripe) && list_empty(lh)) -+ list_add_tail(lh, sc->lists + LIST_FLUSH); -+} -+ -+/* -+ * Add stripe to LRU (inactive) list. -+ * -+ * Need lock, because of concurrent access from message interface. -+ */ -+static void stripe_lru_add(struct stripe *stripe) -+{ -+ if (!StripeRecover(stripe)) { -+ struct list_head *lh = stripe->lists + LIST_LRU; -+ -+ if (list_empty(lh)) -+ list_add_tail(lh, stripe->sc->lists + LIST_LRU); -+ } -+} -+ -+#define POP_LIST(list) \ -+ do { \ -+ if (list_empty(sc->lists + (list))) \ -+ stripe = NULL; \ -+ else { \ -+ stripe = list_first_entry(sc->lists + (list), \ -+ struct stripe, \ -+ lists[(list)]); \ -+ list_del_init(stripe->lists + (list)); \ -+ } \ -+ } while (0); -+ -+/* Pop an available stripe off the LRU list. */ -+static struct stripe *stripe_lru_pop(struct stripe_cache *sc) -+{ -+ struct stripe *stripe; -+ -+ POP_LIST(LIST_LRU); -+ return stripe; -+} -+ -+/* Pop an available stripe off the io list. */ -+static struct stripe *stripe_io_pop(struct stripe_cache *sc) -+{ -+ struct stripe *stripe; -+ -+ POP_LIST(LIST_FLUSH); -+ return stripe; -+} -+ -+/* Push a stripe safely onto the endio list to be handled by do_endios(). */ -+static void stripe_endio_push(struct stripe *stripe) -+{ -+ unsigned long flags; -+ struct stripe_cache *sc = stripe->sc; -+ struct list_head *stripe_list = stripe->lists + LIST_ENDIO, -+ *sc_list = sc->lists + LIST_ENDIO; -+ spinlock_t *lock = sc->locks + LOCK_ENDIO; -+ -+ /* This runs in parallel with do_endios(). */ -+ spin_lock_irqsave(lock, flags); -+ if (list_empty(stripe_list)) -+ list_add_tail(stripe_list, sc_list); -+ spin_unlock_irqrestore(lock, flags); -+ -+ wake_do_raid(RS(sc)); /* Wake myself. */ -+} -+ -+/* Pop a stripe off safely off the endio list. */ -+static struct stripe *stripe_endio_pop(struct stripe_cache *sc) -+{ -+ struct stripe *stripe; -+ spinlock_t *lock = sc->locks + LOCK_ENDIO; -+ -+ /* This runs in parallel with endio(). */ -+ spin_lock_irq(lock); -+ POP_LIST(LIST_ENDIO) -+ spin_unlock_irq(lock); -+ return stripe; -+} -+#undef POP_LIST -+ -+/* -+ * Stripe cache locking functions -+ */ -+/* Dummy lock function for single host RAID4+5. */ -+static void *no_lock(sector_t key, enum dm_lock_type type) -+{ -+ return &no_lock; -+} -+ -+/* Dummy unlock function for single host RAID4+5. */ -+static void no_unlock(void *lock_handle) -+{ -+} -+ -+/* No locking (for single host RAID 4+5). */ -+static struct dm_raid45_locking_type locking_none = { -+ .lock = no_lock, -+ .unlock = no_unlock, -+}; -+ -+/* Lock a stripe (for clustering). */ -+static int -+stripe_lock(struct stripe *stripe, int rw, sector_t key) -+{ -+ stripe->lock = RS(stripe->sc)->locking->lock(key, rw == READ ? DM_RAID45_SHARED : DM_RAID45_EX); -+ return stripe->lock ? 0 : -EPERM; -+} -+ -+/* Unlock a stripe (for clustering). */ -+static void stripe_unlock(struct stripe *stripe) -+{ -+ RS(stripe->sc)->locking->unlock(stripe->lock); -+ stripe->lock = NULL; -+} -+ -+/* Test io pending on stripe. */ -+static int stripe_io_ref(struct stripe *stripe) -+{ -+ return atomic_read(&stripe->io.pending); -+} -+ -+static void stripe_io_get(struct stripe *stripe) -+{ -+ if (atomic_inc_return(&stripe->io.pending) == 1) -+ /* REMOVEME: statistics */ -+ atomic_inc(&stripe->sc->active_stripes); -+ else -+ BUG_ON(stripe_io_ref(stripe) < 0); -+} -+ -+static void stripe_io_put(struct stripe *stripe) -+{ -+ if (atomic_dec_and_test(&stripe->io.pending)) { -+ if (unlikely(StripeRecover(stripe))) -+ /* Don't put recovery stripe on endio list. */ -+ wake_do_raid(RS(stripe->sc)); -+ else -+ /* Add regular stripe to endio list and wake daemon. */ -+ stripe_endio_push(stripe); -+ -+ /* REMOVEME: statistics */ -+ atomic_dec(&stripe->sc->active_stripes); -+ } else -+ BUG_ON(stripe_io_ref(stripe) < 0); -+} -+ -+/* Take stripe reference out. */ -+static int stripe_get(struct stripe *stripe) -+{ -+ int r; -+ struct list_head *lh = stripe->lists + LIST_LRU; -+ -+ /* Delete stripe from LRU (inactive) list if on. */ -+ DEL_LIST(lh); -+ BUG_ON(stripe_ref(stripe) < 0); -+ -+ /* Lock stripe on first reference */ -+ r = (atomic_inc_return(&stripe->cnt) == 1) ? -+ stripe_lock(stripe, WRITE, stripe->key) : 0; -+ -+ return r; -+} -+#undef DEL_LIST -+ -+/* Return references on a chunk. */ -+static int chunk_ref(struct stripe_chunk *chunk) -+{ -+ return atomic_read(&chunk->cnt); -+} -+ -+/* Take out reference on a chunk. */ -+static int chunk_get(struct stripe_chunk *chunk) -+{ -+ return atomic_inc_return(&chunk->cnt); -+} -+ -+/* Drop reference on a chunk. */ -+static void chunk_put(struct stripe_chunk *chunk) -+{ -+ BUG_ON(atomic_dec_return(&chunk->cnt) < 0); -+} -+ -+/* -+ * Drop reference on a stripe. -+ * -+ * Move it to list of LRU stripes if zero. -+ */ -+static void stripe_put(struct stripe *stripe) -+{ -+ if (atomic_dec_and_test(&stripe->cnt)) { -+ BUG_ON(stripe_io_ref(stripe)); -+ stripe_unlock(stripe); -+ } else -+ BUG_ON(stripe_ref(stripe) < 0); -+} -+ -+/* Helper needed by for_each_io_dev(). */ -+static void stripe_get_references(struct stripe *stripe, unsigned p) -+{ -+ -+ /* -+ * Another one to reference the stripe in -+ * order to protect vs. LRU list moves. -+ */ -+ io_get(RS(stripe->sc)); /* Global io references. */ -+ stripe_get(stripe); -+ stripe_io_get(stripe); /* One for each chunk io. */ -+} -+ -+/* Helper for endio() to put all take references. */ -+static void stripe_put_references(struct stripe *stripe) -+{ -+ stripe_io_put(stripe); /* One for each chunk io. */ -+ stripe_put(stripe); -+ io_put(RS(stripe->sc)); -+} -+ -+/* -+ * Stripe cache functions. -+ */ -+/* -+ * Invalidate all chunks (i.e. their pages) of a stripe. -+ * -+ * I only keep state for the whole chunk. -+ */ -+static inline void stripe_chunk_invalidate(struct stripe_chunk *chunk) -+{ -+ chunk->io.flags = 0; -+} -+ -+static void -+stripe_chunks_invalidate(struct stripe *stripe) -+{ -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) -+ stripe_chunk_invalidate(CHUNK(stripe, p)); -+} -+ -+/* Prepare stripe for (re)use. */ -+static void stripe_invalidate(struct stripe *stripe) -+{ -+ stripe->io.flags = 0; -+ stripe->idx.parity = stripe->idx.recover = -1; -+ stripe_chunks_invalidate(stripe); -+} -+ -+/* -+ * Allow io on all chunks of a stripe. -+ * If not set, IO will not occur; i.e. it's prohibited. -+ * -+ * Actual IO submission for allowed chunks depends -+ * on their !uptodate or dirty state. -+ */ -+static void stripe_allow_io(struct stripe *stripe) -+{ -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) -+ SetChunkIo(CHUNK(stripe, p)); -+} -+ -+/* Initialize a stripe. */ -+static void stripe_init(struct stripe_cache *sc, struct stripe *stripe) -+{ -+ unsigned i, p = RS(sc)->set.raid_devs; -+ -+ /* Work all io chunks. */ -+ while (p--) { -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ -+ atomic_set(&chunk->cnt, 0); -+ chunk->stripe = stripe; -+ i = ARRAY_SIZE(chunk->bl); -+ while (i--) -+ bio_list_init(chunk->bl + i); -+ } -+ -+ stripe->sc = sc; -+ -+ i = ARRAY_SIZE(stripe->lists); -+ while (i--) -+ INIT_LIST_HEAD(stripe->lists + i); -+ -+ stripe->io.size = RS(sc)->set.io_size; -+ atomic_set(&stripe->cnt, 0); -+ atomic_set(&stripe->io.pending, 0); -+ stripe_invalidate(stripe); -+} -+ -+/* Number of pages per chunk. */ -+static inline unsigned chunk_pages(unsigned sectors) -+{ -+ return dm_div_up(sectors, SECTORS_PER_PAGE); -+} -+ -+/* Number of pages per stripe. */ -+static inline unsigned stripe_pages(struct raid_set *rs, unsigned io_size) -+{ -+ return chunk_pages(io_size) * rs->set.raid_devs; -+} -+ -+/* Initialize part of page_list (recovery). */ -+static void stripe_zero_pl_part(struct stripe *stripe, int p, -+ unsigned start, unsigned count) -+{ -+ unsigned o = start / SECTORS_PER_PAGE, pages = chunk_pages(count); -+ /* Get offset into the page_list. */ -+ struct page_list *pl = pl_elem(PL(stripe, p), o); -+ -+ BUG_ON(!pl); -+ while (pl && pages--) { -+ BUG_ON(!pl->page); -+ memset(page_address(pl->page), 0, PAGE_SIZE); -+ pl = pl->next; -+ } -+} -+ -+/* Initialize parity chunk of stripe. */ -+static void stripe_zero_chunk(struct stripe *stripe, int p) -+{ -+ if (p > -1) -+ stripe_zero_pl_part(stripe, p, 0, stripe->io.size); -+} -+ -+/* Return dynamic stripe structure size. */ -+static size_t stripe_size(struct raid_set *rs) -+{ -+ return sizeof(struct stripe) + -+ rs->set.raid_devs * sizeof(struct stripe_chunk); -+} -+ -+/* Allocate a stripe and its memory object. */ -+/* XXX adjust to cope with stripe cache and recovery stripe caches. */ -+enum grow { SC_GROW, SC_KEEP }; -+static struct stripe *stripe_alloc(struct stripe_cache *sc, -+ struct dm_mem_cache_client *mc, -+ enum grow grow) -+{ -+ int r; -+ struct stripe *stripe; -+ -+ stripe = kmem_cache_zalloc(sc->kc.cache, GFP_KERNEL); -+ if (stripe) { -+ /* Grow the dm-mem-cache by one object. */ -+ if (grow == SC_GROW) { -+ r = dm_mem_cache_grow(mc, 1); -+ if (r) -+ goto err_free; -+ } -+ -+ stripe->obj = dm_mem_cache_alloc(mc); -+ if (IS_ERR(stripe->obj)) -+ goto err_shrink; -+ -+ stripe_init(sc, stripe); -+ } -+ -+ return stripe; -+ -+err_shrink: -+ if (grow == SC_GROW) -+ dm_mem_cache_shrink(mc, 1); -+err_free: -+ kmem_cache_free(sc->kc.cache, stripe); -+ return NULL; -+} -+ -+/* -+ * Free a stripes memory object, shrink the -+ * memory cache and free the stripe itself. -+ */ -+static void stripe_free(struct stripe *stripe, struct dm_mem_cache_client *mc) -+{ -+ dm_mem_cache_free(mc, stripe->obj); -+ dm_mem_cache_shrink(mc, 1); -+ kmem_cache_free(stripe->sc->kc.cache, stripe); -+} -+ -+/* Free the recovery stripe. */ -+static void stripe_recover_free(struct raid_set *rs) -+{ -+ struct recover *rec = &rs->recover; -+ struct dm_mem_cache_client *mc; -+ -+ mc = rec->mem_cache_client; -+ rec->mem_cache_client = NULL; -+ if (mc) { -+ struct stripe *stripe; -+ -+ while (!list_empty(&rec->stripes)) { -+ stripe = list_first_entry(&rec->stripes, struct stripe, -+ lists[LIST_RECOVER]); -+ list_del(stripe->lists + LIST_RECOVER); -+ kfree(stripe->recover); -+ stripe_free(stripe, mc); -+ } -+ -+ dm_mem_cache_client_destroy(mc); -+ dm_io_client_destroy(rec->dm_io_client); -+ rec->dm_io_client = NULL; -+ } -+} -+ -+/* Grow stripe cache. */ -+static int sc_grow(struct stripe_cache *sc, unsigned stripes, enum grow grow) -+{ -+ int r = 0; -+ -+ /* Try to allocate this many (additional) stripes. */ -+ while (stripes--) { -+ struct stripe *stripe = -+ stripe_alloc(sc, sc->mem_cache_client, grow); -+ -+ if (likely(stripe)) { -+ stripe_lru_add(stripe); -+ atomic_inc(&sc->stripes); -+ } else { -+ r = -ENOMEM; -+ break; -+ } -+ } -+ -+ return r ? r : sc_hash_resize(sc); -+} -+ -+/* Shrink stripe cache. */ -+static int sc_shrink(struct stripe_cache *sc, unsigned stripes) -+{ -+ int r = 0; -+ -+ /* Try to get unused stripe from LRU list. */ -+ while (stripes--) { -+ struct stripe *stripe; -+ -+ stripe = stripe_lru_pop(sc); -+ if (stripe) { -+ /* An LRU stripe may never have ios pending! */ -+ BUG_ON(stripe_io_ref(stripe)); -+ BUG_ON(stripe_ref(stripe)); -+ atomic_dec(&sc->stripes); -+ /* Remove from hash if on before deletion. */ -+ stripe_hash_del(stripe); -+ stripe_free(stripe, sc->mem_cache_client); -+ } else { -+ r = -ENOENT; -+ break; -+ } -+ } -+ -+ /* Check if stats are still sane. */ -+ if (atomic_read(&sc->active_stripes_max) > -+ atomic_read(&sc->stripes)) -+ atomic_set(&sc->active_stripes_max, 0); -+ -+ if (r) -+ return r; -+ -+ return atomic_read(&sc->stripes) ? sc_hash_resize(sc) : 0; -+} -+ -+/* Create stripe cache and recovery. */ -+static int sc_init(struct raid_set *rs, unsigned stripes) -+{ -+ unsigned i, r, rstripes; -+ struct stripe_cache *sc = &rs->sc; -+ struct stripe *stripe; -+ struct recover *rec = &rs->recover; -+ struct mapped_device *md; -+ struct gendisk *disk; -+ -+ -+ /* Initialize lists and locks. */ -+ i = ARRAY_SIZE(sc->lists); -+ while (i--) -+ INIT_LIST_HEAD(sc->lists + i); -+ -+ INIT_LIST_HEAD(&rec->stripes); -+ -+ /* Initialize endio and LRU list locks. */ -+ i = NR_LOCKS; -+ while (i--) -+ spin_lock_init(sc->locks + i); -+ -+ /* Initialize atomic variables. */ -+ atomic_set(&sc->stripes, 0); -+ atomic_set(&sc->stripes_to_set, 0); -+ atomic_set(&sc->active_stripes, 0); -+ atomic_set(&sc->active_stripes_max, 0); /* REMOVEME: statistics. */ -+ -+ /* -+ * We need a runtime unique # to suffix the kmem cache name -+ * because we'll have one for each active RAID set. -+ */ -+ md = dm_table_get_md(rs->ti->table); -+ disk = dm_disk(md); -+ snprintf(sc->kc.name, sizeof(sc->kc.name), "%s-%d.%d", TARGET, -+ disk->first_minor, atomic_inc_return(&_stripe_sc_nr)); -+ dm_put(md); -+ sc->kc.cache = kmem_cache_create(sc->kc.name, stripe_size(rs), -+ 0, 0, NULL); -+ if (!sc->kc.cache) -+ return -ENOMEM; -+ -+ /* Create memory cache client context for RAID stripe cache. */ -+ sc->mem_cache_client = -+ dm_mem_cache_client_create(stripes, rs->set.raid_devs, -+ chunk_pages(rs->set.io_size)); -+ if (IS_ERR(sc->mem_cache_client)) -+ return PTR_ERR(sc->mem_cache_client); -+ -+ /* Create memory cache client context for RAID recovery stripe(s). */ -+ rstripes = rec->recovery_stripes; -+ rec->mem_cache_client = -+ dm_mem_cache_client_create(rstripes, rs->set.raid_devs, -+ chunk_pages(rec->io_size)); -+ if (IS_ERR(rec->mem_cache_client)) -+ return PTR_ERR(rec->mem_cache_client); -+ -+ /* Create dm-io client context for IO stripes. */ -+ sc->dm_io_client = -+ dm_io_client_create((stripes > 32 ? 32 : stripes) * -+ rs->set.raid_devs * -+ chunk_pages(rs->set.io_size)); -+ if (IS_ERR(sc->dm_io_client)) -+ return PTR_ERR(sc->dm_io_client); -+ -+ /* FIXME: intermingeled with stripe cache initialization. */ -+ /* Create dm-io client context for recovery stripes. */ -+ rec->dm_io_client = -+ dm_io_client_create(rstripes * rs->set.raid_devs * -+ chunk_pages(rec->io_size)); -+ if (IS_ERR(rec->dm_io_client)) -+ return PTR_ERR(rec->dm_io_client); -+ -+ /* Allocate stripes for set recovery. */ -+ while (rstripes--) { -+ stripe = stripe_alloc(sc, rec->mem_cache_client, SC_KEEP); -+ if (!stripe) -+ return -ENOMEM; -+ -+ stripe->recover = kzalloc(sizeof(*stripe->recover), GFP_KERNEL); -+ if (!stripe->recover) { -+ stripe_free(stripe, rec->mem_cache_client); -+ return -ENOMEM; -+ } -+ -+ SetStripeRecover(stripe); -+ stripe->io.size = rec->io_size; -+ list_add_tail(stripe->lists + LIST_RECOVER, &rec->stripes); -+ /* Don't add recovery stripes to LRU list! */ -+ } -+ -+ /* -+ * Allocate the stripe objetcs from the -+ * cache and add them to the LRU list. -+ */ -+ r = sc_grow(sc, stripes, SC_KEEP); -+ if (!r) -+ atomic_set(&sc->stripes_last, stripes); -+ -+ return r; -+} -+ -+/* Destroy the stripe cache. */ -+static void sc_exit(struct stripe_cache *sc) -+{ -+ struct raid_set *rs = RS(sc); -+ -+ if (sc->kc.cache) { -+ stripe_recover_free(rs); -+ BUG_ON(sc_shrink(sc, atomic_read(&sc->stripes))); -+ kmem_cache_destroy(sc->kc.cache); -+ sc->kc.cache = NULL; -+ -+ if (sc->mem_cache_client && !IS_ERR(sc->mem_cache_client)) -+ dm_mem_cache_client_destroy(sc->mem_cache_client); -+ -+ if (sc->dm_io_client && !IS_ERR(sc->dm_io_client)) -+ dm_io_client_destroy(sc->dm_io_client); -+ -+ hash_exit(&sc->hash); -+ } -+} -+ -+/* -+ * Calculate RAID address -+ * -+ * Delivers tuple with the index of the data disk holding the chunk -+ * in the set, the parity disks index and the start of the stripe -+ * within the address space of the set (used as the stripe cache hash key). -+ */ -+/* thx MD. */ -+static struct raid_address *raid_address(struct raid_set *rs, sector_t sector, -+ struct raid_address *addr) -+{ -+ sector_t stripe, tmp; -+ -+ /* -+ * chunk_number = sector / chunk_size -+ * stripe_number = chunk_number / data_devs -+ * di = stripe % data_devs; -+ */ -+ stripe = sector >> rs->set.chunk_shift; -+ addr->di = sector_div(stripe, rs->set.data_devs); -+ -+ switch (rs->set.raid_type->level) { -+ case raid4: -+ addr->pi = rs->set.pi; -+ goto check_shift_di; -+ case raid5: -+ tmp = stripe; -+ addr->pi = sector_div(tmp, rs->set.raid_devs); -+ -+ switch (rs->set.raid_type->algorithm) { -+ case left_asym: /* Left asymmetric. */ -+ addr->pi = rs->set.data_devs - addr->pi; -+ case right_asym: /* Right asymmetric. */ -+check_shift_di: -+ if (addr->di >= addr->pi) -+ addr->di++; -+ break; -+ case left_sym: /* Left symmetric. */ -+ addr->pi = rs->set.data_devs - addr->pi; -+ case right_sym: /* Right symmetric. */ -+ addr->di = (addr->pi + addr->di + 1) % -+ rs->set.raid_devs; -+ break; -+ case none: /* Ain't happen: RAID4 algorithm placeholder. */ -+ BUG(); -+ } -+ } -+ -+ /* -+ * Start offset of the stripes chunk on any single device of the RAID -+ * set, adjusted in case io size differs from chunk size. -+ */ -+ addr->key = (stripe << rs->set.chunk_shift) + -+ (sector & rs->set.io_inv_mask); -+ return addr; -+} -+ -+/* -+ * Copy data across between stripe pages and bio vectors. -+ * -+ * Pay attention to data alignment in stripe and bio pages. -+ */ -+static void bio_copy_page_list(int rw, struct stripe *stripe, -+ struct page_list *pl, struct bio *bio) -+{ -+ unsigned i, page_offset; -+ void *page_addr; -+ struct raid_set *rs = RS(stripe->sc); -+ struct bio_vec *bv; -+ -+ /* Get start page in page list for this sector. */ -+ i = (bio->bi_sector & rs->set.io_mask) / SECTORS_PER_PAGE; -+ pl = pl_elem(pl, i); -+ BUG_ON(!pl); -+ BUG_ON(!pl->page); -+ -+ page_addr = page_address(pl->page); -+ page_offset = to_bytes(bio->bi_sector & (SECTORS_PER_PAGE - 1)); -+ -+ /* Walk all segments and copy data across between bio_vecs and pages. */ -+ bio_for_each_segment(bv, bio, i) { -+ int len = bv->bv_len, size; -+ unsigned bio_offset = 0; -+ void *bio_addr = __bio_kmap_atomic(bio, i, KM_USER0); -+redo: -+ size = (page_offset + len > PAGE_SIZE) ? -+ PAGE_SIZE - page_offset : len; -+ -+ if (rw == READ) -+ memcpy(bio_addr + bio_offset, -+ page_addr + page_offset, size); -+ else -+ memcpy(page_addr + page_offset, -+ bio_addr + bio_offset, size); -+ -+ page_offset += size; -+ if (page_offset == PAGE_SIZE) { -+ /* -+ * We reached the end of the chunk page -> -+ * need to refer to the next one to copy more data. -+ */ -+ len -= size; -+ if (len) { -+ /* Get next page. */ -+ pl = pl->next; -+ BUG_ON(!pl); -+ BUG_ON(!pl->page); -+ page_addr = page_address(pl->page); -+ page_offset = 0; -+ bio_offset += size; -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_BIO_COPY_PL_NEXT); -+ goto redo; -+ } -+ } -+ -+ __bio_kunmap_atomic(bio_addr, KM_USER0); -+ } -+} -+ -+/* -+ * Xor optimization macros. -+ */ -+/* Xor data pointer declaration and initialization macros. */ -+#define DECLARE_2 unsigned long *d0 = data[0], *d1 = data[1] -+#define DECLARE_3 DECLARE_2, *d2 = data[2] -+#define DECLARE_4 DECLARE_3, *d3 = data[3] -+#define DECLARE_5 DECLARE_4, *d4 = data[4] -+#define DECLARE_6 DECLARE_5, *d5 = data[5] -+#define DECLARE_7 DECLARE_6, *d6 = data[6] -+#define DECLARE_8 DECLARE_7, *d7 = data[7] -+ -+/* Xor unrole macros. */ -+#define D2(n) d0[n] = d0[n] ^ d1[n] -+#define D3(n) D2(n) ^ d2[n] -+#define D4(n) D3(n) ^ d3[n] -+#define D5(n) D4(n) ^ d4[n] -+#define D6(n) D5(n) ^ d5[n] -+#define D7(n) D6(n) ^ d6[n] -+#define D8(n) D7(n) ^ d7[n] -+ -+#define X_2(macro, offset) macro(offset); macro(offset + 1); -+#define X_4(macro, offset) X_2(macro, offset); X_2(macro, offset + 2); -+#define X_8(macro, offset) X_4(macro, offset); X_4(macro, offset + 4); -+#define X_16(macro, offset) X_8(macro, offset); X_8(macro, offset + 8); -+#define X_32(macro, offset) X_16(macro, offset); X_16(macro, offset + 16); -+#define X_64(macro, offset) X_32(macro, offset); X_32(macro, offset + 32); -+ -+/* Define a _xor_#chunks_#xors_per_run() function. */ -+#define _XOR(chunks, xors_per_run) \ -+static void _xor ## chunks ## _ ## xors_per_run(unsigned long **data) \ -+{ \ -+ unsigned end = XOR_SIZE / sizeof(data[0]), i; \ -+ DECLARE_ ## chunks; \ -+\ -+ for (i = 0; i < end; i += xors_per_run) { \ -+ X_ ## xors_per_run(D ## chunks, i); \ -+ } \ -+} -+ -+/* Define xor functions for 2 - 8 chunks and xors per run. */ -+#define MAKE_XOR_PER_RUN(xors_per_run) \ -+ _XOR(2, xors_per_run); _XOR(3, xors_per_run); \ -+ _XOR(4, xors_per_run); _XOR(5, xors_per_run); \ -+ _XOR(6, xors_per_run); _XOR(7, xors_per_run); \ -+ _XOR(8, xors_per_run); -+ -+MAKE_XOR_PER_RUN(8) /* Define _xor_*_8() functions. */ -+MAKE_XOR_PER_RUN(16) /* Define _xor_*_16() functions. */ -+MAKE_XOR_PER_RUN(32) /* Define _xor_*_32() functions. */ -+MAKE_XOR_PER_RUN(64) /* Define _xor_*_64() functions. */ -+ -+#define MAKE_XOR(xors_per_run) \ -+struct { \ -+ void (*f)(unsigned long **); \ -+} static xor_funcs ## xors_per_run[] = { \ -+ { NULL }, /* NULL pointers to optimize indexing in xor(). */ \ -+ { NULL }, \ -+ { _xor2_ ## xors_per_run }, \ -+ { _xor3_ ## xors_per_run }, \ -+ { _xor4_ ## xors_per_run }, \ -+ { _xor5_ ## xors_per_run }, \ -+ { _xor6_ ## xors_per_run }, \ -+ { _xor7_ ## xors_per_run }, \ -+ { _xor8_ ## xors_per_run }, \ -+}; \ -+\ -+static void xor_ ## xors_per_run(unsigned n, unsigned long **data) \ -+{ \ -+ /* Call respective function for amount of chunks. */ \ -+ xor_funcs ## xors_per_run[n].f(data); \ -+} -+ -+/* Define xor_8() - xor_64 functions. */ -+MAKE_XOR(8) -+MAKE_XOR(16) -+MAKE_XOR(32) -+MAKE_XOR(64) -+/* -+ * END xor optimization macros. -+ */ -+ -+/* Maximum number of chunks, which can be xor'ed in one go. */ -+#define XOR_CHUNKS_MAX (ARRAY_SIZE(xor_funcs8) - 1) -+ -+/* xor_blocks wrapper to allow for using that crypto library function. */ -+static void xor_blocks_wrapper(unsigned n, unsigned long **data) -+{ -+ BUG_ON(n < 2 || n > MAX_XOR_BLOCKS + 1); -+ xor_blocks(n - 1, XOR_SIZE, (void *) data[0], (void **) data + 1); -+} -+ -+struct xor_func { -+ xor_function_t f; -+ const char *name; -+} static xor_funcs[] = { -+ { xor_64, "xor_64" }, -+ { xor_32, "xor_32" }, -+ { xor_16, "xor_16" }, -+ { xor_8, "xor_8" }, -+ { xor_blocks_wrapper, "xor_blocks" }, -+}; -+ -+/* -+ * Check, if chunk has to be xored in/out: -+ * -+ * o if writes are queued -+ * o if writes are merged -+ * o if stripe is to be reconstructed -+ * o if recovery stripe -+ */ -+static inline int chunk_must_xor(struct stripe_chunk *chunk) -+{ -+ if (ChunkUptodate(chunk)) { -+ BUG_ON(!bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED)) && -+ !bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED))); -+ -+ if (!bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED)) || -+ !bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED))) -+ return 1; -+ -+ if (StripeReconstruct(chunk->stripe) || -+ StripeRecover(chunk->stripe)) -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/* -+ * Calculate crc. -+ * -+ * This indexes into the chunks of a stripe and their pages. -+ * -+ * All chunks will be xored into the indexed (@pi) -+ * chunk in maximum groups of xor.chunks. -+ * -+ */ -+static void xor(struct stripe *stripe, unsigned pi, unsigned sector) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned max_chunks = rs->xor.chunks, n = 1, -+ o = sector / SECTORS_PER_PAGE, /* Offset into the page_list. */ -+ p = rs->set.raid_devs; -+ unsigned long **d = rs->data; -+ xor_function_t xor_f = rs->xor.f->f; -+ -+ BUG_ON(sector > stripe->io.size); -+ -+ /* Address of parity page to xor into. */ -+ d[0] = page_address(pl_elem(PL(stripe, pi), o)->page); -+ -+ while (p--) { -+ /* Preset pointers to data pages. */ -+ if (p != pi && chunk_must_xor(CHUNK(stripe, p))) -+ d[n++] = page_address(pl_elem(PL(stripe, p), o)->page); -+ -+ /* If max chunks -> xor. */ -+ if (n == max_chunks) { -+ mutex_lock(&rs->io.xor_lock); -+ xor_f(n, d); -+ mutex_unlock(&rs->io.xor_lock); -+ n = 1; -+ } -+ } -+ -+ /* If chunks -> xor. */ -+ if (n > 1) { -+ mutex_lock(&rs->io.xor_lock); -+ xor_f(n, d); -+ mutex_unlock(&rs->io.xor_lock); -+ } -+} -+ -+/* Common xor loop through all stripe page lists. */ -+static void common_xor(struct stripe *stripe, sector_t count, -+ unsigned off, unsigned pi) -+{ -+ unsigned sector; -+ -+ BUG_ON(!count); -+ for (sector = off; sector < count; sector += SECTORS_PER_PAGE) -+ xor(stripe, pi, sector); -+ -+ /* Set parity page uptodate and clean. */ -+ chunk_set(CHUNK(stripe, pi), CLEAN); -+ atomic_inc(RS(stripe->sc)->stats + S_XORS); /* REMOVEME: statistics. */ -+} -+ -+/* -+ * Calculate parity sectors on intact stripes. -+ * -+ * Need to calculate raid address for recover stripe, because its -+ * chunk sizes differs and is typically larger than io chunk size. -+ */ -+static void parity_xor(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ int size_differs = stripe->io.size != rs->set.io_size; -+ unsigned chunk_size = rs->set.chunk_size, io_size = stripe->io.size, -+ xor_size = chunk_size > io_size ? io_size : chunk_size; -+ sector_t off; -+ -+ /* This can be the recover stripe with a larger io size. */ -+ for (off = 0; off < io_size; off += xor_size) { -+ /* -+ * Recover stripe is likely bigger than regular io -+ * ones and has no precalculated parity disk index -> -+ * need to calculate RAID address. -+ */ -+ if (unlikely(size_differs)) { -+ struct raid_address addr; -+ -+ raid_address(rs, (stripe->key + off) * -+ rs->set.data_devs, &addr); -+ stripe->idx.parity = addr.pi; -+ stripe_zero_pl_part(stripe, addr.pi, off, xor_size); -+ } -+ -+ common_xor(stripe, xor_size, off, stripe->idx.parity); -+ chunk_set(CHUNK(stripe, stripe->idx.parity), DIRTY); -+ } -+} -+ -+/* Reconstruct missing chunk. */ -+static void stripe_reconstruct(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ int p = rs->set.raid_devs, pr = stripe->idx.recover; -+ -+ BUG_ON(pr < 0); -+ -+ /* Check if all but the chunk to be reconstructed are uptodate. */ -+ while (p--) -+ BUG_ON(p != pr && !ChunkUptodate(CHUNK(stripe, p))); -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (RSDegraded(rs) ? S_RECONSTRUCT_EI : -+ S_RECONSTRUCT_DEV)); -+ /* Zero chunk to be reconstructed. */ -+ stripe_zero_chunk(stripe, pr); -+ common_xor(stripe, stripe->io.size, 0, pr); -+} -+ -+/* -+ * Recovery io throttling -+ */ -+/* Conditionally reset io counters. */ -+static int recover_io_reset(struct raid_set *rs) -+{ -+ unsigned long j = jiffies; -+ -+ /* Pay attention to jiffies overflows. */ -+ if (j > rs->recover.last_jiffies + HZ || -+ j < rs->recover.last_jiffies) { -+ atomic_set(rs->recover.io_count + IO_WORK, 0); -+ atomic_set(rs->recover.io_count + IO_RECOVER, 0); -+ rs->recover.last_jiffies = j; -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/* Count ios. */ -+static void recover_io_count(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ -+ atomic_inc(rs->recover.io_count + -+ (StripeRecover(stripe) ? IO_RECOVER : IO_WORK)); -+} -+ -+/* Try getting a stripe either from the hash or from the LRU list. */ -+static struct stripe *stripe_find(struct raid_set *rs, -+ struct raid_address *addr) -+{ -+ int r; -+ struct stripe_cache *sc = &rs->sc; -+ struct stripe *stripe; -+ -+ /* Try stripe from hash. */ -+ stripe = stripe_lookup(sc, addr->key); -+ if (stripe) { -+ r = stripe_get(stripe); -+ if (r) -+ goto get_lock_failed; -+ -+ atomic_inc(rs->stats + S_HITS_1ST); /* REMOVEME: statistics. */ -+ } else { -+ /* Not in hash -> try to get an LRU stripe. */ -+ stripe = stripe_lru_pop(sc); -+ if (stripe) { -+ /* -+ * An LRU stripe may not be referenced -+ * and may never have ios pending! -+ */ -+ BUG_ON(stripe_ref(stripe)); -+ BUG_ON(stripe_io_ref(stripe)); -+ -+ /* Remove from hash if on before reuse. */ -+ stripe_hash_del(stripe); -+ -+ /* Invalidate before reinserting with changed key. */ -+ stripe_invalidate(stripe); -+ -+ stripe->key = addr->key; -+ stripe->region = dm_rh_sector_to_region(rs->recover.rh, -+ addr->key); -+ stripe->idx.parity = addr->pi; -+ r = stripe_get(stripe); -+ if (r) -+ goto get_lock_failed; -+ -+ /* Insert stripe into the stripe hash. */ -+ stripe_insert(&sc->hash, stripe); -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_INSCACHE); -+ } -+ } -+ -+ return stripe; -+ -+get_lock_failed: -+ stripe_put(stripe); -+ return NULL; -+} -+ -+/* -+ * Process end io -+ * -+ * I need to do it here because I can't in interrupt -+ */ -+/* End io all bios on a bio list. */ -+static void bio_list_endio(struct stripe *stripe, struct bio_list *bl, -+ int p, int error) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ struct bio *bio; -+ struct page_list *pl = PL(stripe, p); -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ -+ /* Update region counters. */ -+ while ((bio = bio_list_pop(bl))) { -+ if (bio_data_dir(bio) == WRITE) -+ /* Drop io pending count for any writes. */ -+ dm_rh_dec(rs->recover.rh, stripe->region); -+ else if (!error) -+ /* Copy data accross. */ -+ bio_copy_page_list(READ, stripe, pl, bio); -+ -+ bio_endio(bio, error); -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (bio_data_dir(bio) == READ ? -+ S_BIOS_ENDIO_READ : S_BIOS_ENDIO_WRITE)); -+ -+ chunk_put(chunk); -+ stripe_put(stripe); -+ io_put(rs); /* Wake any suspend waiters on last bio. */ -+ } -+} -+ -+/* -+ * End io all reads/writes on a stripe copying -+ * read data accross from stripe to bios and -+ * decrementing region counters for writes. -+ * -+ * Processing of ios depeding on state: -+ * o no chunk error -> endio ok -+ * o degraded: -+ * - chunk error and read -> ignore to be requeued -+ * - chunk error and write -> endio ok -+ * o dead (more than parity_devs failed) and chunk_error-> endio failed -+ */ -+static void stripe_endio(int rw, struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned p = rs->set.raid_devs; -+ int write = (rw != READ); -+ -+ while (p--) { -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ struct bio_list *bl; -+ -+ BUG_ON(ChunkLocked(chunk)); -+ -+ bl = BL_CHUNK(chunk, rw); -+ if (bio_list_empty(bl)) -+ continue; -+ -+ if (unlikely(ChunkError(chunk) || !ChunkUptodate(chunk))) { -+ /* RAID set dead. */ -+ if (unlikely(RSDead(rs))) -+ bio_list_endio(stripe, bl, p, -EIO); -+ /* RAID set degraded. */ -+ else if (write) -+ bio_list_endio(stripe, bl, p, 0); -+ } else { -+ BUG_ON(!RSDegraded(rs) && ChunkDirty(chunk)); -+ bio_list_endio(stripe, bl, p, 0); -+ } -+ } -+} -+ -+/* Fail all ios hanging off all bio lists of a stripe. */ -+static void stripe_fail_io(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned p = rs->set.raid_devs; -+ -+ while (p--) { -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ int i = ARRAY_SIZE(chunk->bl); -+ -+ /* Fail all bios on all bio lists of the stripe. */ -+ while (i--) { -+ struct bio_list *bl = chunk->bl + i; -+ -+ if (!bio_list_empty(bl)) -+ bio_list_endio(stripe, bl, p, -EIO); -+ } -+ } -+ -+ /* Put stripe on LRU list. */ -+ BUG_ON(stripe_io_ref(stripe)); -+ BUG_ON(stripe_ref(stripe)); -+} -+ -+/* Unlock all required chunks. */ -+static void stripe_chunks_unlock(struct stripe *stripe) -+{ -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ struct stripe_chunk *chunk; -+ -+ while (p--) { -+ chunk = CHUNK(stripe, p); -+ -+ if (TestClearChunkUnlock(chunk)) -+ ClearChunkLocked(chunk); -+ } -+} -+ -+/* -+ * Queue reads and writes to a stripe by hanging -+ * their bios off the stripesets read/write lists. -+ */ -+static int stripe_queue_bio(struct raid_set *rs, struct bio *bio, -+ struct bio_list *reject) -+{ -+ struct raid_address addr; -+ struct stripe *stripe; -+ -+ stripe = stripe_find(rs, raid_address(rs, bio->bi_sector, &addr)); -+ if (stripe) { -+ int r = 0, rw = bio_data_dir(bio); -+ -+ /* Distinguish reads and writes. */ -+ bio_list_add(BL(stripe, addr.di, rw), bio); -+ -+ if (rw == READ) -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_BIOS_ADDED_READ); -+ else { -+ /* Inrement pending write count on region. */ -+ dm_rh_inc(rs->recover.rh, stripe->region); -+ r = 1; -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_BIOS_ADDED_WRITE); -+ } -+ -+ /* -+ * Put on io (flush) list in case of -+ * initial bio queued to chunk. -+ */ -+ if (chunk_get(CHUNK(stripe, addr.di)) == 1) -+ stripe_flush_add(stripe); -+ -+ return r; -+ } -+ -+ /* Got no stripe from cache or failed to lock it -> reject bio. */ -+ bio_list_add(reject, bio); -+ atomic_inc(rs->stats + S_IOS_POST); /* REMOVEME: statistics. */ -+ return 0; -+} -+ -+/* -+ * Handle all stripes by handing them to the daemon, because we can't -+ * map their chunk pages to copy the data in interrupt context. -+ * -+ * We don't want to handle them here either, while interrupts are disabled. -+ */ -+ -+/* Read/write endio function for dm-io (interrupt context). */ -+static void endio(unsigned long error, void *context) -+{ -+ struct stripe_chunk *chunk = context; -+ -+ if (unlikely(error)) { -+ chunk_set(chunk, ERROR); -+ /* REMOVEME: statistics. */ -+ atomic_inc(RS(chunk->stripe->sc)->stats + S_STRIPE_ERROR); -+ } else -+ chunk_set(chunk, CLEAN); -+ -+ /* -+ * For recovery stripes, I need to reset locked locked -+ * here, because those aren't processed in do_endios(). -+ */ -+ if (unlikely(StripeRecover(chunk->stripe))) -+ ClearChunkLocked(chunk); -+ else -+ SetChunkUnlock(chunk); -+ -+ /* Indirectly puts stripe on cache's endio list via stripe_io_put(). */ -+ stripe_put_references(chunk->stripe); -+} -+ -+/* Read/Write a chunk asynchronously. */ -+static void stripe_chunk_rw(struct stripe *stripe, unsigned p) -+{ -+ struct stripe_cache *sc = stripe->sc; -+ struct raid_set *rs = RS(sc); -+ struct dm_mem_cache_object *obj = stripe->obj + p; -+ struct page_list *pl = obj->pl; -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ struct raid_dev *dev = rs->dev + p; -+ struct dm_io_region io = { -+ .bdev = dev->dev->bdev, -+ .sector = stripe->key, -+ .count = stripe->io.size, -+ }; -+ struct dm_io_request control = { -+ .bi_rw = ChunkDirty(chunk) ? WRITE : READ, -+ .mem = { -+ .type = DM_IO_PAGE_LIST, -+ .ptr.pl = pl, -+ .offset = 0, -+ }, -+ .notify = { -+ .fn = endio, -+ .context = chunk, -+ }, -+ .client = StripeRecover(stripe) ? rs->recover.dm_io_client : -+ sc->dm_io_client, -+ }; -+ -+ BUG_ON(ChunkLocked(chunk)); -+ BUG_ON(!ChunkUptodate(chunk) && ChunkDirty(chunk)); -+ BUG_ON(ChunkUptodate(chunk) && !ChunkDirty(chunk)); -+ -+ /* -+ * Don't rw past end of device, which can happen, because -+ * typically sectors_per_dev isn't divisible by io_size. -+ */ -+ if (unlikely(io.sector + io.count > rs->set.sectors_per_dev)) -+ io.count = rs->set.sectors_per_dev - io.sector; -+ -+ BUG_ON(!io.count); -+ io.sector += dev->start; /* Add . */ -+ if (RSRecover(rs)) -+ recover_io_count(stripe); /* Recovery io accounting. */ -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (ChunkDirty(chunk) ? S_DM_IO_WRITE : -+ S_DM_IO_READ)); -+ SetChunkLocked(chunk); -+ SetDevIoQueued(dev); -+ BUG_ON(dm_io(&control, 1, &io, NULL)); -+} -+ -+/* -+ * Write dirty or read not uptodate page lists of a stripe. -+ */ -+static int stripe_chunks_rw(struct stripe *stripe) -+{ -+ int r; -+ struct raid_set *rs = RS(stripe->sc); -+ -+ /* -+ * Increment the pending count on the stripe -+ * first, so that we don't race in endio(). -+ * -+ * An inc (IO) is needed for any chunk unless !ChunkIo(chunk): -+ * -+ * o not uptodate -+ * o dirtied by writes merged -+ * o dirtied by parity calculations -+ */ -+ r = for_each_io_dev(stripe, stripe_get_references); -+ if (r) { -+ /* Io needed: chunks are either not uptodate or dirty. */ -+ int max; /* REMOVEME: */ -+ struct stripe_cache *sc = &rs->sc; -+ -+ /* Submit actual io. */ -+ for_each_io_dev(stripe, stripe_chunk_rw); -+ -+ /* REMOVEME: statistics */ -+ max = sc_active(sc); -+ if (atomic_read(&sc->active_stripes_max) < max) -+ atomic_set(&sc->active_stripes_max, max); -+ -+ atomic_inc(rs->stats + S_FLUSHS); -+ /* END REMOVEME: statistics */ -+ } -+ -+ return r; -+} -+ -+/* Merge in all writes hence dirtying respective chunks. */ -+static void stripe_merge_writes(struct stripe *stripe) -+{ -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) { -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ struct bio_list *write = BL_CHUNK(chunk, WRITE_QUEUED); -+ -+ if (!bio_list_empty(write)) { -+ struct bio *bio; -+ struct page_list *pl = stripe->obj[p].pl; -+ -+ /* -+ * We can play with the lists without holding a lock, -+ * because it is just us accessing them anyway. -+ */ -+ bio_list_for_each(bio, write) -+ bio_copy_page_list(WRITE, stripe, pl, bio); -+ -+ bio_list_merge(BL_CHUNK(chunk, WRITE_MERGED), write); -+ bio_list_init(write); -+ chunk_set(chunk, DIRTY); -+ } -+ } -+} -+ -+/* Queue all writes to get merged. */ -+static int stripe_queue_writes(struct stripe *stripe) -+{ -+ int r = 0; -+ unsigned p = RS(stripe->sc)->set.raid_devs; -+ -+ while (p--) { -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ struct bio_list *write = BL_CHUNK(chunk, WRITE); -+ -+ if (!bio_list_empty(write)) { -+ bio_list_merge(BL_CHUNK(chunk, WRITE_QUEUED), write); -+ bio_list_init(write); -+SetChunkIo(chunk); -+ r = 1; -+ } -+ } -+ -+ return r; -+} -+ -+ -+/* Check, if a chunk gets completely overwritten. */ -+static int stripe_check_chunk_overwrite(struct stripe *stripe, unsigned p) -+{ -+ unsigned sectors = 0; -+ struct bio *bio; -+ struct bio_list *bl = BL(stripe, p, WRITE_QUEUED); -+ -+ bio_list_for_each(bio, bl) -+ sectors += bio_sectors(bio); -+ -+ BUG_ON(sectors > RS(stripe->sc)->set.io_size); -+ return sectors == RS(stripe->sc)->set.io_size; -+} -+ -+/* -+ * Avoid io on broken/reconstructed drive in order to -+ * reconstruct date on endio. -+ * -+ * (*1*) We set StripeReconstruct() in here, so that _do_endios() -+ * will trigger a reconstruct call before resetting it. -+ */ -+static int stripe_chunk_set_io_flags(struct stripe *stripe, int pr) -+{ -+ struct stripe_chunk *chunk = CHUNK(stripe, pr); -+ -+ /* -+ * Allow io on all chunks but the indexed one, -+ * because we're either degraded or prohibit it -+ * on the one for later reconstruction. -+ */ -+ /* Includes ClearChunkIo(), ClearChunkUptodate(). */ -+ stripe_chunk_invalidate(chunk); -+ stripe->idx.recover = pr; -+ SetStripeReconstruct(stripe); -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO); -+ return -EPERM; -+} -+ -+/* Chunk locked/uptodate and device failed tests. */ -+static struct stripe_chunk * -+stripe_chunk_check(struct stripe *stripe, unsigned p, unsigned *chunks_uptodate) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ struct stripe_chunk *chunk = CHUNK(stripe, p); -+ -+ /* Can't access active chunks. */ -+ if (ChunkLocked(chunk)) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_CHUNK_LOCKED); -+ return NULL; -+ } -+ -+ /* Can't access broken devive. */ -+ if (ChunkError(chunk) || DevFailed(rs->dev + p)) -+ return NULL; -+ -+ /* Can access uptodate chunks. */ -+ if (ChunkUptodate(chunk)) { -+ (*chunks_uptodate)++; -+ return NULL; -+ } -+ -+ return chunk; -+} -+ -+/* -+ * Degraded/reconstruction mode. -+ * -+ * Check stripe state to figure which chunks don't need IO. -+ * -+ * Returns 0 for fully operational, -EPERM for degraded/resynchronizing. -+ */ -+static int stripe_check_reconstruct(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ -+ if (RSDead(rs)) { -+ ClearStripeReconstruct(stripe); -+ ClearStripeReconstructed(stripe); -+ stripe_allow_io(stripe); -+ return 0; -+ } -+ -+ /* Avoid further reconstruction setting, when already set. */ -+ if (StripeReconstruct(stripe)) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_RECONSTRUCT_SET); -+ return -EBUSY; -+ } -+ -+ /* Initially allow io on all chunks. */ -+ stripe_allow_io(stripe); -+ -+ /* Return if stripe is already reconstructed. */ -+ if (StripeReconstructed(stripe)) { -+ atomic_inc(rs->stats + S_RECONSTRUCTED); -+ return 0; -+ } -+ -+ /* -+ * Degraded/reconstruction mode (device failed) -> -+ * avoid io on the failed device. -+ */ -+ if (unlikely(RSDegraded(rs))) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_DEGRADED); -+ /* Allow IO on all devices but the dead one. */ -+ BUG_ON(rs->set.ei < 0); -+ return stripe_chunk_set_io_flags(stripe, rs->set.ei); -+ } else { -+ int sync, pi = dev_for_parity(stripe, &sync); -+ -+ /* -+ * Reconstruction mode (ie. a particular (replaced) device or -+ * some (rotating) parity chunk is being resynchronized) -> -+ * o make sure all needed chunks are read in -+ * o cope with 3/4 disk array special case where it -+ * doesn't make a difference to read in parity -+ * to xor data in/out -+ */ -+ if (RSEnforceParityCreation(rs) || !sync) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_NOSYNC); -+ /* Allow IO on all devs but the one to reconstruct. */ -+ return stripe_chunk_set_io_flags(stripe, pi); -+ } -+ } -+ -+ return 0; -+} -+ -+/* -+ * Check, if stripe is ready to merge writes. -+ * I.e. if all chunks present to allow to merge bios. -+ * -+ * We prohibit io on: -+ * -+ * o chunks without bios -+ * o chunks which get completely written over -+ */ -+static int stripe_merge_possible(struct stripe *stripe, int nosync) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned chunks_overwrite = 0, chunks_prohibited = 0, -+ chunks_uptodate = 0, p = rs->set.raid_devs; -+ -+ /* Walk all chunks. */ -+ while (p--) { -+ struct stripe_chunk *chunk; -+ -+ /* Prohibit io on broken devices. */ -+ if (DevFailed(rs->dev + p)) { -+ chunk = CHUNK(stripe, p); -+ goto prohibit_io; -+ } -+ -+ /* We can't optimize any further if no chunk. */ -+ chunk = stripe_chunk_check(stripe, p, &chunks_uptodate); -+ if (!chunk || nosync) -+ continue; -+ -+ /* -+ * We have a chunk, which is not uptodate. -+ * -+ * If this is not parity and we don't have -+ * reads queued, we can optimize further. -+ */ -+ if (p != stripe->idx.parity && -+ bio_list_empty(BL_CHUNK(chunk, READ)) && -+ bio_list_empty(BL_CHUNK(chunk, WRITE_MERGED))) { -+ if (bio_list_empty(BL_CHUNK(chunk, WRITE_QUEUED))) -+ goto prohibit_io; -+ else if (RSCheckOverwrite(rs) && -+ stripe_check_chunk_overwrite(stripe, p)) -+ /* Completely overwritten chunk. */ -+ chunks_overwrite++; -+ } -+ -+ /* Allow io for chunks with bios and overwritten ones. */ -+ SetChunkIo(chunk); -+ continue; -+ -+prohibit_io: -+ /* No io for broken devices or for chunks w/o bios. */ -+ ClearChunkIo(chunk); -+ chunks_prohibited++; -+ /* REMOVEME: statistics. */ -+ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO); -+ } -+ -+ /* All data chunks will get written over. */ -+ if (chunks_overwrite == rs->set.data_devs) -+ atomic_inc(rs->stats + S_OVERWRITE); /* REMOVEME: statistics.*/ -+ else if (chunks_uptodate + chunks_prohibited < rs->set.raid_devs) { -+ /* We don't have enough chunks to merge. */ -+ atomic_inc(rs->stats + S_CANT_MERGE); /* REMOVEME: statistics.*/ -+ return -EPERM; -+ } -+ -+ /* -+ * If we have all chunks up to date or overwrite them, we -+ * just zero the parity chunk and let stripe_rw() recreate it. -+ */ -+ if (chunks_uptodate == rs->set.raid_devs || -+ chunks_overwrite == rs->set.data_devs) { -+ stripe_zero_chunk(stripe, stripe->idx.parity); -+ BUG_ON(StripeReconstruct(stripe)); -+ SetStripeReconstruct(stripe); /* Enforce xor in caller. */ -+ } else { -+ /* -+ * With less chunks, we xor parity out. -+ * -+ * (*4*) We rely on !StripeReconstruct() in chunk_must_xor(), -+ * so that only chunks with queued or merged writes -+ * are being xored. -+ */ -+ parity_xor(stripe); -+ } -+ -+ /* -+ * We do have enough chunks to merge. -+ * All chunks are uptodate or get written over. -+ */ -+ atomic_inc(rs->stats + S_CAN_MERGE); /* REMOVEME: statistics. */ -+ return 0; -+} -+ -+/* -+ * Avoid reading chunks in case we're fully operational. -+ * -+ * We prohibit io on any chunks without bios but the parity chunk. -+ */ -+static void stripe_avoid_reads(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ unsigned dummy = 0, p = rs->set.raid_devs; -+ -+ /* Walk all chunks. */ -+ while (p--) { -+ struct stripe_chunk *chunk = -+ stripe_chunk_check(stripe, p, &dummy); -+ -+ if (!chunk) -+ continue; -+ -+ /* If parity or any bios pending -> allow io. */ -+ if (chunk_ref(chunk) || p == stripe->idx.parity) -+ SetChunkIo(chunk); -+ else { -+ ClearChunkIo(chunk); -+ /* REMOVEME: statistics. */ -+ atomic_inc(RS(stripe->sc)->stats + S_PROHIBITCHUNKIO); -+ } -+ } -+} -+ -+/* -+ * Read/write a stripe. -+ * -+ * All stripe read/write activity goes through this function -+ * unless recovery, which has to call stripe_chunk_rw() directly. -+ * -+ * Make sure we don't try already merged stripes in order -+ * to avoid data corruption. -+ * -+ * Check the state of the RAID set and if degraded (or -+ * resynchronizing for reads), read in all other chunks but -+ * the one on the dead/resynchronizing device in order to be -+ * able to reconstruct the missing one in _do_endios(). -+ * -+ * Can be called on active stripes in order -+ * to dispatch new io on inactive chunks. -+ * -+ * States to cover: -+ * o stripe to read and/or write -+ * o stripe with error to reconstruct -+ */ -+static int stripe_rw(struct stripe *stripe) -+{ -+ int nosync, r; -+ struct raid_set *rs = RS(stripe->sc); -+ -+ /* -+ * Check, if a chunk needs to be reconstructed -+ * because of a degraded set or a region out of sync. -+ */ -+ nosync = stripe_check_reconstruct(stripe); -+ switch (nosync) { -+ case -EBUSY: -+ return 0; /* Wait for stripe reconstruction to finish. */ -+ case -EPERM: -+ goto io; -+ } -+ -+ /* -+ * If we don't have merged writes pending, we can schedule -+ * queued writes to be merged next without corrupting data. -+ */ -+ if (!StripeMerged(stripe)) { -+ r = stripe_queue_writes(stripe); -+ if (r) -+ /* Writes got queued -> flag RBW. */ -+ SetStripeRBW(stripe); -+ } -+ -+ /* -+ * Merge all writes hanging off uptodate/overwritten -+ * chunks of the stripe. -+ */ -+ if (StripeRBW(stripe)) { -+ r = stripe_merge_possible(stripe, nosync); -+ if (!r) { /* Merge possible. */ -+ struct stripe_chunk *chunk; -+ -+ /* -+ * I rely on valid parity in order -+ * to xor a fraction of chunks out -+ * of parity and back in. -+ */ -+ stripe_merge_writes(stripe); /* Merge writes in. */ -+ parity_xor(stripe); /* Update parity. */ -+ ClearStripeReconstruct(stripe); /* Reset xor enforce. */ -+ SetStripeMerged(stripe); /* Writes merged. */ -+ ClearStripeRBW(stripe); /* Disable RBW. */ -+ -+ /* -+ * REMOVEME: sanity check on parity chunk -+ * states after writes got merged. -+ */ -+ chunk = CHUNK(stripe, stripe->idx.parity); -+ BUG_ON(ChunkLocked(chunk)); -+ BUG_ON(!ChunkUptodate(chunk)); -+ BUG_ON(!ChunkDirty(chunk)); -+ BUG_ON(!ChunkIo(chunk)); -+ } -+ } else if (!nosync && !StripeMerged(stripe)) -+ /* Read avoidance if not degraded/resynchronizing/merged. */ -+ stripe_avoid_reads(stripe); -+ -+io: -+ /* Now submit any reads/writes for non-uptodate or dirty chunks. */ -+ r = stripe_chunks_rw(stripe); -+ if (!r) { -+ /* -+ * No io submitted because of chunk io -+ * prohibited or locked chunks/failed devices -+ * -> push to end io list for processing. -+ */ -+ stripe_endio_push(stripe); -+ atomic_inc(rs->stats + S_NO_RW); /* REMOVEME: statistics. */ -+ } -+ -+ return r; -+} -+ -+/* -+ * Recovery functions -+ */ -+/* Read a stripe off a raid set for recovery. */ -+static int stripe_recover_read(struct stripe *stripe, int pi) -+{ -+ BUG_ON(stripe_io_ref(stripe)); -+ -+ /* Invalidate all chunks so that they get read in. */ -+ stripe_chunks_invalidate(stripe); -+ stripe_allow_io(stripe); /* Allow io on all recovery chunks. */ -+ -+ /* -+ * If we are reconstructing a perticular device, we can avoid -+ * reading the respective chunk in, because we're going to -+ * reconstruct it anyway. -+ * -+ * We can't do that for resynchronization of rotating parity, -+ * because the recovery stripe chunk size is typically larger -+ * than the sets chunk size. -+ */ -+ if (pi > -1) -+ ClearChunkIo(CHUNK(stripe, pi)); -+ -+ return stripe_chunks_rw(stripe); -+} -+ -+/* Write a stripe to a raid set for recovery. */ -+static int stripe_recover_write(struct stripe *stripe, int pi) -+{ -+ BUG_ON(stripe_io_ref(stripe)); -+ -+ /* -+ * If this is a reconstruct of a particular device, then -+ * reconstruct the respective chunk, else create parity chunk. -+ */ -+ if (pi > -1) { -+ stripe_zero_chunk(stripe, pi); -+ common_xor(stripe, stripe->io.size, 0, pi); -+ chunk_set(CHUNK(stripe, pi), DIRTY); -+ } else -+ parity_xor(stripe); -+ -+ return stripe_chunks_rw(stripe); -+} -+ -+/* Read/write a recovery stripe. */ -+static int stripe_recover_rw(struct stripe *stripe) -+{ -+ int r = 0, sync = 0; -+ -+ /* Read/write flip-flop. */ -+ if (TestClearStripeRBW(stripe)) { -+ SetStripeMerged(stripe); -+ stripe->key = stripe->recover->pos; -+ r = stripe_recover_read(stripe, dev_for_parity(stripe, &sync)); -+ BUG_ON(!r); -+ } else if (TestClearStripeMerged(stripe)) { -+ r = stripe_recover_write(stripe, dev_for_parity(stripe, &sync)); -+ BUG_ON(!r); -+ } -+ -+ BUG_ON(sync); -+ return r; -+} -+ -+/* Recover bandwidth available ?. */ -+static int recover_bandwidth(struct raid_set *rs) -+{ -+ int r, work; -+ -+ /* On reset or when bios delayed -> allow recovery. */ -+ r = recover_io_reset(rs); -+ if (r || RSBandwidth(rs)) -+ goto out; -+ -+ work = atomic_read(rs->recover.io_count + IO_WORK); -+ if (work) { -+ /* Pay attention to larger recover stripe size. */ -+ int recover = atomic_read(rs->recover.io_count + IO_RECOVER) * -+ rs->recover.io_size / rs->set.io_size; -+ -+ /* -+ * Don't use more than given bandwidth -+ * of the work io for recovery. -+ */ -+ if (recover > work / rs->recover.bandwidth_work) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_NO_BANDWIDTH); -+ return 0; -+ } -+ } -+ -+out: -+ atomic_inc(rs->stats + S_BANDWIDTH); /* REMOVEME: statistics. */ -+ return 1; -+} -+ -+/* Try to get a region to recover. */ -+static int stripe_recover_get_region(struct stripe *stripe) -+{ -+ struct raid_set *rs = RS(stripe->sc); -+ struct recover *rec = &rs->recover; -+ struct recover_addr *addr = stripe->recover; -+ struct dm_dirty_log *dl = rec->dl; -+ struct dm_rh_client *rh = rec->rh; -+ -+ BUG_ON(!dl); -+ BUG_ON(!rh); -+ -+ /* Return, that we have region first to finish it during suspension. */ -+ if (addr->reg) -+ return 1; -+ -+ if (RSSuspend(rs)) -+ return -EPERM; -+ -+ if (dl->type->get_sync_count(dl) >= rec->nr_regions) -+ return -ENOENT; -+ -+ /* If we don't have enough bandwidth, we don't proceed recovering. */ -+ if (!recover_bandwidth(rs)) -+ return -EAGAIN; -+ -+ /* Start quiescing a region. */ -+ dm_rh_recovery_prepare(rh); -+ addr->reg = dm_rh_recovery_start(rh); -+ if (!addr->reg) -+ return -EAGAIN; -+ -+ addr->pos = dm_rh_region_to_sector(rh, dm_rh_get_region_key(addr->reg)); -+ addr->end = addr->pos + dm_rh_get_region_size(rh); -+ -+ /* -+ * Take one global io reference out for the -+ * whole region, which is going to be released -+ * when the region is completely done with. -+ */ -+ io_get(rs); -+ return 0; -+} -+ -+/* Update region hash state. */ -+enum recover_type { REC_FAILURE = 0, REC_SUCCESS = 1 }; -+static void recover_rh_update(struct stripe *stripe, enum recover_type success) -+{ -+ struct recover_addr *addr = stripe->recover; -+ struct raid_set *rs = RS(stripe->sc); -+ struct recover *rec = &rs->recover; -+ -+ if (!addr->reg) { -+ DMERR("%s- Called w/o region", __func__); -+ return; -+ } -+ -+ dm_rh_recovery_end(addr->reg, success); -+ if (success) -+ rec->nr_regions_recovered++; -+ -+ addr->reg = NULL; -+ -+ /* -+ * Completely done with this region -> -+ * release the 1st io reference. -+ */ -+ io_put(rs); -+} -+ -+/* Set start of recovery state. */ -+static void set_start_recovery(struct raid_set *rs) -+{ -+ /* Initialize recovery. */ -+ rs->recover.start_jiffies = jiffies; -+ rs->recover.end_jiffies = 0; -+} -+ -+/* Set end of recovery state. */ -+static void set_end_recovery(struct raid_set *rs) -+{ -+ ClearRSRecover(rs); -+/* Achtung: nicht mehr zurück setzten -> 'i' belibt in status output und userpace könnte sich darauf verlassen, das es verschiwndet!!!! */ -+ rs->set.dev_to_init = -1; -+ -+ /* Check for jiffies overrun. */ -+ rs->recover.end_jiffies = jiffies; -+ if (rs->recover.end_jiffies < rs->recover.start_jiffies) -+ rs->recover.end_jiffies = ~0; -+} -+ -+/* Handle recovery on one recovery stripe. */ -+static int _do_recovery(struct stripe *stripe) -+{ -+ int r; -+ struct raid_set *rs = RS(stripe->sc); -+ struct recover_addr *addr = stripe->recover; -+ -+ /* If recovery is active -> return. */ -+ if (stripe_io_ref(stripe)) -+ return 1; -+ -+ /* IO error is fatal for recovery -> stop it. */ -+ if (unlikely(StripeError(stripe))) -+ goto err; -+ -+ /* Recovery end required. */ -+ if (unlikely(RSDegraded(rs))) -+ goto err; -+ -+ /* Get a region to recover. */ -+ r = stripe_recover_get_region(stripe); -+ switch (r) { -+ case 0: /* Got a new region: flag initial read before write. */ -+ SetStripeRBW(stripe); -+ case 1: /* Have a region in the works. */ -+ break; -+ case -EAGAIN: -+ /* No bandwidth/quiesced region yet, try later. */ -+ if (!io_ref(rs)) -+ wake_do_raid_delayed(rs, HZ / 4); -+ case -EPERM: -+ /* Suspend. */ -+ return 1; -+ case -ENOENT: /* No more regions to recover. */ -+ schedule_work(&rs->io.ws_do_table_event); -+ return 0; -+ default: -+ BUG(); -+ } -+ -+ /* Read/write a recover stripe. */ -+ r = stripe_recover_rw(stripe); -+ if (r) -+ /* IO initiated. */ -+ return 1; -+ -+ /* Read and write finished-> update recovery position within region. */ -+ addr->pos += stripe->io.size; -+ -+ /* If we're at end of region, update region hash. */ -+ if (addr->pos >= addr->end || -+ addr->pos >= rs->set.sectors_per_dev) -+ recover_rh_update(stripe, REC_SUCCESS); -+ else -+ /* Prepare to read next region segment. */ -+ SetStripeRBW(stripe); -+ -+ /* Schedule myself for another round... */ -+ wake_do_raid(rs); -+ return 1; -+ -+err: -+ /* FIXME: rather try recovering other regions on error? */ -+ rs_check_degrade(stripe); -+ recover_rh_update(stripe, REC_FAILURE); -+ -+ /* Check state of partially recovered array. */ -+ if (RSDegraded(rs) && !RSDead(rs) && -+ rs->set.dev_to_init != -1 && -+ rs->set.ei != rs->set.dev_to_init) { -+ /* Broken drive != drive to recover -> FATAL. */ -+ SetRSDead(rs); -+ DMERR("FATAL: failed device != device to initialize -> " -+ "RAID set broken"); -+ } -+ -+ if (StripeError(stripe) || RSDegraded(rs)) { -+ char buf[BDEVNAME_SIZE]; -+ -+ DMERR("stopping recovery due to " -+ "ERROR on /dev/%s, stripe at offset %llu", -+ bdevname(rs->dev[rs->set.ei].dev->bdev, buf), -+ (unsigned long long) stripe->key); -+ -+ } -+ -+ /* Make sure, that all quiesced regions get released. */ -+ while (addr->reg) { -+ dm_rh_recovery_end(addr->reg, -EIO); -+ addr->reg = dm_rh_recovery_start(rs->recover.rh); -+ } -+ -+ return 0; -+} -+ -+/* Called by main io daemon to recover regions. */ -+static int do_recovery(struct raid_set *rs) -+{ -+ if (RSRecover(rs)) { -+ int r = 0; -+ struct stripe *stripe; -+ -+ list_for_each_entry(stripe, &rs->recover.stripes, -+ lists[LIST_RECOVER]) -+ r += _do_recovery(stripe); -+ -+ if (r) -+ return r; -+ -+ set_end_recovery(rs); -+ stripe_recover_free(rs); -+ } -+ -+ return 0; -+} -+ -+/* -+ * END recovery functions -+ */ -+ -+/* End io process all stripes handed in by endio() callback. */ -+static void _do_endios(struct raid_set *rs, struct stripe *stripe, -+ struct list_head *flush_list) -+{ -+ /* First unlock all required chunks. */ -+ stripe_chunks_unlock(stripe); -+ -+ /* -+ * If an io error on a stripe occured, degrade the RAID set -+ * and try to endio as many bios as possible. If any bios can't -+ * be endio processed, requeue the stripe (stripe_ref() != 0). -+ */ -+ if (TestClearStripeError(stripe)) { -+ /* -+ * FIXME: if read, rewrite the failed chunk after reconstruction -+ * in order to trigger disk bad sector relocation. -+ */ -+ rs_check_degrade(stripe); /* Resets ChunkError(). */ -+ ClearStripeReconstruct(stripe); -+ ClearStripeReconstructed(stripe); -+ -+ /* -+ * FIXME: if write, don't endio writes in flight and don't -+ * allow for new writes until userspace has updated -+ * its metadata. -+ */ -+ } -+ -+ /* Got to reconstruct a missing chunk. */ -+ if (StripeReconstruct(stripe)) { -+ /* -+ * (*2*) We use StripeReconstruct() to allow for -+ * all chunks to be xored into the reconstructed -+ * one (see chunk_must_xor()). -+ */ -+ stripe_reconstruct(stripe); -+ -+ /* -+ * (*3*) Now we reset StripeReconstruct() and flag -+ * StripeReconstructed() to show to stripe_rw(), -+ * that we have reconstructed a missing chunk. -+ */ -+ ClearStripeReconstruct(stripe); -+ SetStripeReconstructed(stripe); -+ -+ /* FIXME: reschedule to be written in case of read. */ -+ /* if (!RSDead && RSDegraded(rs) !StripeRBW(stripe)) { -+ chunk_set(CHUNK(stripe, stripe->idx.recover), DIRTY); -+ stripe_chunks_rw(stripe); -+ } */ -+ -+ stripe->idx.recover = -1; -+ } -+ -+ /* -+ * Now that we eventually got a complete stripe, we -+ * can process the rest of the end ios on reads. -+ */ -+ stripe_endio(READ, stripe); -+ -+ /* End io all merged writes if not prohibited. */ -+ if (!RSProhibitWrites(rs) && StripeMerged(stripe)) { -+ ClearStripeMerged(stripe); -+ stripe_endio(WRITE_MERGED, stripe); -+ } -+ -+ /* If RAID set is dead -> fail any ios to dead drives. */ -+ if (RSDead(rs)) { -+ if (!TestSetRSDeadEndioMessage(rs)) -+ DMERR("RAID set dead: failing ios to dead devices"); -+ -+ stripe_fail_io(stripe); -+ } -+ -+ /* -+ * We have stripe references still, -+ * beacuse of read before writes or IO errors -> -+ * got to put on flush list for processing. -+ */ -+ if (stripe_ref(stripe)) { -+ BUG_ON(!list_empty(stripe->lists + LIST_LRU)); -+ list_add_tail(stripe->lists + LIST_FLUSH, flush_list); -+ atomic_inc(rs->stats + S_REQUEUE); /* REMOVEME: statistics. */ -+ } else -+ stripe_lru_add(stripe); -+} -+ -+/* Pop any endio stripes off of the endio list and belabour them. */ -+static void do_endios(struct raid_set *rs) -+{ -+ struct stripe_cache *sc = &rs->sc; -+ struct stripe *stripe; -+ /* IO flush list for sorted requeued stripes. */ -+ struct list_head flush_list; -+ -+ INIT_LIST_HEAD(&flush_list); -+ -+ while ((stripe = stripe_endio_pop(sc))) { -+ /* Avoid endio on stripes with newly io'ed chunks. */ -+ if (!stripe_io_ref(stripe)) -+ _do_endios(rs, stripe, &flush_list); -+ } -+ -+ /* -+ * Insert any requeued stripes in the proper -+ * order at the beginning of the io (flush) list. -+ */ -+ list_splice(&flush_list, sc->lists + LIST_FLUSH); -+} -+ -+/* Flush any stripes on the io list. */ -+static int do_flush(struct raid_set *rs) -+{ -+ int r = 0; -+ struct stripe *stripe; -+ -+ while ((stripe = stripe_io_pop(&rs->sc))) -+ r += stripe_rw(stripe); /* Read/write stripe. */ -+ -+ return r; -+} -+ -+/* Stripe cache resizing. */ -+static void do_sc_resize(struct raid_set *rs) -+{ -+ unsigned set = atomic_read(&rs->sc.stripes_to_set); -+ -+ if (set) { -+ unsigned cur = atomic_read(&rs->sc.stripes); -+ int r = (set > cur) ? sc_grow(&rs->sc, set - cur, SC_GROW) : -+ sc_shrink(&rs->sc, cur - set); -+ -+ /* Flag end of resizeing if ok. */ -+ if (!r) -+ atomic_set(&rs->sc.stripes_to_set, 0); -+ } -+} -+ -+/* -+ * Process all ios -+ * -+ * We do different things with the io depending -+ * on the state of the region that it is in: -+ * -+ * o reads: hang off stripe cache or postpone if full -+ * -+ * o writes: -+ * -+ * CLEAN/DIRTY/NOSYNC: increment pending and hang io off stripe's stripe set. -+ * In case stripe cache is full or busy, postpone the io. -+ * -+ * RECOVERING: delay the io until recovery of the region completes. -+ * -+ */ -+static void do_ios(struct raid_set *rs, struct bio_list *ios) -+{ -+ int r; -+ unsigned flush = 0, delay = 0; -+ sector_t sector; -+ struct dm_rh_client *rh = rs->recover.rh; -+ struct bio *bio; -+ struct bio_list reject; -+ -+ bio_list_init(&reject); -+ -+ /* -+ * Classify each io: -+ * o delay writes to recovering regions (let reads go through) -+ * o queue io to all other regions -+ */ -+ while ((bio = bio_list_pop(ios))) { -+ /* -+ * In case we get a barrier bio, push it back onto -+ * the input queue unless all work queues are empty -+ * and the stripe cache is inactive. -+ */ -+ if (unlikely(bio_empty_barrier(bio))) { -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + S_BARRIER); -+ if (delay || -+ !list_empty(rs->sc.lists + LIST_FLUSH) || -+ !bio_list_empty(&reject) || -+ sc_active(&rs->sc)) { -+ bio_list_push(ios, bio); -+ break; -+ } -+ } -+ -+ /* If writes prohibited because of failures -> postpone. */ -+ if (RSProhibitWrites(rs) && bio_data_dir(bio) == WRITE) { -+ bio_list_add(&reject, bio); -+ continue; -+ } -+ -+ /* Check for recovering regions. */ -+ sector = _sector(rs, bio); -+ r = region_state(rs, sector, DM_RH_RECOVERING); -+ if (unlikely(r)) { -+ delay++; -+ /* Wait writing to recovering regions. */ -+ dm_rh_delay_by_region(rh, bio, -+ dm_rh_sector_to_region(rh, -+ sector)); -+ /* REMOVEME: statistics.*/ -+ atomic_inc(rs->stats + S_DELAYED_BIOS); -+ atomic_inc(rs->stats + S_SUM_DELAYED_BIOS); -+ -+ /* Force bandwidth tests in recovery. */ -+ SetRSBandwidth(rs); -+ } else { -+ /* -+ * Process ios to non-recovering regions by queueing -+ * them to stripes (does dm_rh_inc()) for writes). -+ */ -+ flush += stripe_queue_bio(rs, bio, &reject); -+ } -+ } -+ -+ if (flush) { -+ /* FIXME: better error handling. */ -+ r = dm_rh_flush(rh); /* Writes got queued -> flush dirty log. */ -+ if (r) -+ DMERR_LIMIT("dirty log flush"); -+ } -+ -+ /* Merge any rejected bios back to the head of the input list. */ -+ bio_list_merge_head(ios, &reject); -+} -+ -+/* Unplug: let any queued io role on the sets devices. */ -+static void do_unplug(struct raid_set *rs) -+{ -+ struct raid_dev *dev = rs->dev + rs->set.raid_devs; -+ -+ while (dev-- > rs->dev) { -+ /* Only call any device unplug function, if io got queued. */ -+ if (TestClearDevIoQueued(dev)) -+ blk_unplug(bdev_get_queue(dev->dev->bdev)); -+ } -+} -+ -+/* Send an event in case we're getting too busy. */ -+static void do_busy_event(struct raid_set *rs) -+{ -+ if (sc_busy(rs)) { -+ if (!TestSetRSScBusy(rs)) -+ schedule_work(&rs->io.ws_do_table_event); -+ } else -+ ClearRSScBusy(rs); -+} -+ -+/* Throw an event. */ -+static void do_table_event(struct work_struct *ws) -+{ -+ struct raid_set *rs = container_of(ws, struct raid_set, -+ io.ws_do_table_event); -+ dm_table_event(rs->ti->table); -+} -+ -+ -+/*----------------------------------------------------------------- -+ * RAID daemon -+ *---------------------------------------------------------------*/ -+/* -+ * o belabour all end ios -+ * o update the region hash states -+ * o optionally shrink the stripe cache -+ * o optionally do recovery -+ * o unplug any component raid devices with queued bios -+ * o grab the input queue -+ * o work an all requeued or new ios and perform stripe cache flushs -+ * o unplug any component raid devices with queued bios -+ * o check, if the stripe cache gets too busy and throw an event if so -+ */ -+static void do_raid(struct work_struct *ws) -+{ -+ int r; -+ struct raid_set *rs = container_of(ws, struct raid_set, -+ io.dws_do_raid.work); -+ struct bio_list *ios = &rs->io.work, *ios_in = &rs->io.in; -+ -+ /* -+ * We always need to end io, so that ios can get errored in -+ * case the set failed and the region counters get decremented -+ * before we update region hash states and go any further. -+ */ -+ do_endios(rs); -+ dm_rh_update_states(rs->recover.rh, 1); -+ -+ /* -+ * Now that we've end io'd, which may have put stripes on the LRU list -+ * to allow for shrinking, we resize the stripe cache if requested. -+ */ -+ do_sc_resize(rs); -+ -+ /* Try to recover regions. */ -+ r = do_recovery(rs); -+ if (r) -+ do_unplug(rs); /* Unplug the sets device queues. */ -+ -+ /* Quickly grab all new ios queued and add them to the work list. */ -+ mutex_lock(&rs->io.in_lock); -+ bio_list_merge(ios, ios_in); -+ bio_list_init(ios_in); -+ mutex_unlock(&rs->io.in_lock); -+ -+ if (!bio_list_empty(ios)) -+ do_ios(rs, ios); /* Got ios to work into the cache. */ -+ -+ r = do_flush(rs); /* Flush any stripes on io list. */ -+ if (r) -+ do_unplug(rs); /* Unplug the sets device queues. */ -+ -+ do_busy_event(rs); /* Check if we got too busy. */ -+} -+ -+/* -+ * Callback for region hash to dispatch -+ * delayed bios queued to recovered regions -+ * (gets called via dm_rh_update_states()). -+ */ -+static void dispatch_delayed_bios(void *context, struct bio_list *bl) -+{ -+ struct raid_set *rs = context; -+ struct bio *bio; -+ -+ /* REMOVEME: statistics; decrement pending delayed bios counter. */ -+ bio_list_for_each(bio, bl) -+ atomic_dec(rs->stats + S_DELAYED_BIOS); -+ -+ /* Merge region hash private list to work list. */ -+ bio_list_merge_head(&rs->io.work, bl); -+ bio_list_init(bl); -+ ClearRSBandwidth(rs); -+} -+ -+/************************************************************* -+ * Constructor helpers -+ *************************************************************/ -+/* Calculate MB/sec. */ -+static unsigned mbpers(struct raid_set *rs, unsigned io_size) -+{ -+ return to_bytes((rs->xor.speed * rs->set.data_devs * -+ io_size * HZ / XOR_SPEED_TICKS) >> 10) >> 10; -+} -+ -+/* -+ * Discover fastest xor algorithm and # of chunks combination. -+ */ -+/* Calculate speed of particular algorithm and # of chunks. */ -+static unsigned xor_speed(struct stripe *stripe) -+{ -+ int ticks = XOR_SPEED_TICKS; -+ unsigned p = RS(stripe->sc)->set.raid_devs, r = 0; -+ unsigned long j; -+ -+ /* Set uptodate so that common_xor()->xor() will belabour chunks. */ -+ while (p--) -+ SetChunkUptodate(CHUNK(stripe, p)); -+ -+ /* Wait for next tick. */ -+ for (j = jiffies; j == jiffies; ); -+ -+ /* Do xors for a few ticks. */ -+ while (ticks--) { -+ unsigned xors = 0; -+ -+ for (j = jiffies; j == jiffies; ) { -+ mb(); -+ common_xor(stripe, stripe->io.size, 0, 0); -+ mb(); -+ xors++; -+ mb(); -+ } -+ -+ if (xors > r) -+ r = xors; -+ } -+ -+ return r; -+} -+ -+/* Define for xor multi recovery stripe optimization runs. */ -+#define DMRAID45_XOR_TEST -+ -+/* Optimize xor algorithm for this RAID set. */ -+static unsigned xor_optimize(struct raid_set *rs) -+{ -+ unsigned chunks_max = 2, speed_max = 0; -+ struct xor_func *f = ARRAY_END(xor_funcs), *f_max = NULL; -+ struct stripe *stripe; -+ unsigned io_size = 0, speed_hm = 0, speed_min = ~0, speed_xor_blocks = 0; -+ -+ BUG_ON(list_empty(&rs->recover.stripes)); -+#ifndef DMRAID45_XOR_TEST -+ stripe = list_first_entry(&rs->recover.stripes, struct stripe, -+ lists[LIST_RECOVER]); -+#endif -+ -+ /* Try all xor functions. */ -+ while (f-- > xor_funcs) { -+ unsigned speed; -+ -+#ifdef DMRAID45_XOR_TEST -+ list_for_each_entry(stripe, &rs->recover.stripes, -+ lists[LIST_RECOVER]) { -+ io_size = stripe->io.size; -+#endif -+ -+ /* Set actual xor function for common_xor(). */ -+ rs->xor.f = f; -+ rs->xor.chunks = (f->f == xor_blocks_wrapper ? -+ (MAX_XOR_BLOCKS + 1) : -+ XOR_CHUNKS_MAX); -+ if (rs->xor.chunks > rs->set.raid_devs) -+ rs->xor.chunks = rs->set.raid_devs; -+ -+ for ( ; rs->xor.chunks > 1; rs->xor.chunks--) { -+ speed = xor_speed(stripe); -+ -+#ifdef DMRAID45_XOR_TEST -+ if (f->f == xor_blocks_wrapper) { -+ if (speed > speed_xor_blocks) -+ speed_xor_blocks = speed; -+ } else if (speed > speed_hm) -+ speed_hm = speed; -+ -+ if (speed < speed_min) -+ speed_min = speed; -+#endif -+ -+ if (speed > speed_max) { -+ speed_max = speed; -+ chunks_max = rs->xor.chunks; -+ f_max = f; -+ } -+ } -+#ifdef DMRAID45_XOR_TEST -+ } -+#endif -+ } -+ -+ /* Memorize optimal parameters. */ -+ rs->xor.f = f_max; -+ rs->xor.chunks = chunks_max; -+#ifdef DMRAID45_XOR_TEST -+ DMINFO("%s stripes=%u/size=%u min=%u xor_blocks=%u hm=%u max=%u", -+ speed_max == speed_hm ? "HM" : "NB", -+ rs->recover.recovery_stripes, io_size, speed_min, -+ speed_xor_blocks, speed_hm, speed_max); -+#endif -+ return speed_max; -+} -+ -+/* -+ * Allocate a RAID context (a RAID set) -+ */ -+/* Structure for variable RAID parameters. */ -+struct variable_parms { -+ int bandwidth; -+ int bandwidth_parm; -+ int chunk_size; -+ int chunk_size_parm; -+ int io_size; -+ int io_size_parm; -+ int stripes; -+ int stripes_parm; -+ int recover_io_size; -+ int recover_io_size_parm; -+ int raid_parms; -+ int recovery; -+ int recovery_stripes; -+ int recovery_stripes_parm; -+}; -+ -+static struct raid_set * -+context_alloc(struct raid_type *raid_type, struct variable_parms *p, -+ unsigned raid_devs, sector_t sectors_per_dev, -+ struct dm_target *ti, unsigned dl_parms, char **argv) -+{ -+ int r; -+ size_t len; -+ sector_t region_size, ti_len; -+ struct raid_set *rs = NULL; -+ struct dm_dirty_log *dl; -+ struct recover *rec; -+ -+ /* -+ * Create the dirty log -+ * -+ * We need to change length for the dirty log constructor, -+ * because we want an amount of regions for all stripes derived -+ * from the single device size, so that we can keep region -+ * size = 2^^n independant of the number of devices -+ */ -+ ti_len = ti->len; -+ ti->len = sectors_per_dev; -+ dl = dm_dirty_log_create(argv[0], ti, dl_parms, argv + 2); -+ ti->len = ti_len; -+ if (!dl) -+ goto bad_dirty_log; -+ -+ /* Chunk size *must* be smaller than region size. */ -+ region_size = dl->type->get_region_size(dl); -+ if (p->chunk_size > region_size) -+ goto bad_chunk_size; -+ -+ /* Recover io size *must* be smaller than region size as well. */ -+ if (p->recover_io_size > region_size) -+ goto bad_recover_io_size; -+ -+ /* Size and allocate the RAID set structure. */ -+ len = sizeof(*rs->data) + sizeof(*rs->dev); -+ if (dm_array_too_big(sizeof(*rs), len, raid_devs)) -+ goto bad_array; -+ -+ len = sizeof(*rs) + raid_devs * len; -+ rs = kzalloc(len, GFP_KERNEL); -+ if (!rs) -+ goto bad_alloc; -+ -+ rec = &rs->recover; -+ atomic_set(&rs->io.in_process, 0); -+ atomic_set(&rs->io.in_process_max, 0); -+ rec->io_size = p->recover_io_size; -+ -+ /* Pointer to data array. */ -+ rs->data = (unsigned long **) -+ ((void *) rs->dev + raid_devs * sizeof(*rs->dev)); -+ rec->dl = dl; -+ rs->set.raid_devs = raid_devs; -+ rs->set.data_devs = raid_devs - raid_type->parity_devs; -+ rs->set.raid_type = raid_type; -+ -+ rs->set.raid_parms = p->raid_parms; -+ rs->set.chunk_size_parm = p->chunk_size_parm; -+ rs->set.io_size_parm = p->io_size_parm; -+ rs->sc.stripes_parm = p->stripes_parm; -+ rec->io_size_parm = p->recover_io_size_parm; -+ rec->bandwidth_parm = p->bandwidth_parm; -+ rec->recovery = p->recovery; -+ rec->recovery_stripes = p->recovery_stripes; -+ -+ /* -+ * Set chunk and io size and respective shifts -+ * (used to avoid divisions) -+ */ -+ rs->set.chunk_size = p->chunk_size; -+ rs->set.chunk_shift = ffs(p->chunk_size) - 1; -+ -+ rs->set.io_size = p->io_size; -+ rs->set.io_mask = p->io_size - 1; -+ /* Mask to adjust address key in case io_size != chunk_size. */ -+ rs->set.io_inv_mask = (p->chunk_size - 1) & ~rs->set.io_mask; -+ -+ rs->set.sectors_per_dev = sectors_per_dev; -+ -+ rs->set.ei = -1; /* Indicate no failed device. */ -+ atomic_set(&rs->set.failed_devs, 0); -+ -+ rs->ti = ti; -+ -+ atomic_set(rec->io_count + IO_WORK, 0); -+ atomic_set(rec->io_count + IO_RECOVER, 0); -+ -+ /* Initialize io lock and queues. */ -+ mutex_init(&rs->io.in_lock); -+ mutex_init(&rs->io.xor_lock); -+ bio_list_init(&rs->io.in); -+ bio_list_init(&rs->io.work); -+ -+ init_waitqueue_head(&rs->io.suspendq); /* Suspend waiters (dm-io). */ -+ -+ rec->nr_regions = dm_sector_div_up(sectors_per_dev, region_size); -+ rec->rh = dm_region_hash_create(rs, dispatch_delayed_bios, -+ wake_dummy, wake_do_raid, 0, p->recovery_stripes, -+ dl, region_size, rec->nr_regions); -+ if (IS_ERR(rec->rh)) -+ goto bad_rh; -+ -+ /* Initialize stripe cache. */ -+ r = sc_init(rs, p->stripes); -+ if (r) -+ goto bad_sc; -+ -+ /* REMOVEME: statistics. */ -+ stats_reset(rs); -+ ClearRSDevelStats(rs); /* Disnable development status. */ -+ return rs; -+ -+bad_dirty_log: -+ TI_ERR_RET("Error creating dirty log", ERR_PTR(-ENOMEM)); -+ -+bad_chunk_size: -+ dm_dirty_log_destroy(dl); -+ TI_ERR_RET("Chunk size larger than region size", ERR_PTR(-EINVAL)); -+ -+bad_recover_io_size: -+ dm_dirty_log_destroy(dl); -+ TI_ERR_RET("Recover stripe io size larger than region size", -+ ERR_PTR(-EINVAL)); -+ -+bad_array: -+ dm_dirty_log_destroy(dl); -+ TI_ERR_RET("Arry too big", ERR_PTR(-EINVAL)); -+ -+bad_alloc: -+ dm_dirty_log_destroy(dl); -+ TI_ERR_RET("Cannot allocate raid context", ERR_PTR(-ENOMEM)); -+ -+bad_rh: -+ dm_dirty_log_destroy(dl); -+ ti->error = DM_MSG_PREFIX "Error creating dirty region hash"; -+ goto free_rs; -+ -+bad_sc: -+ dm_region_hash_destroy(rec->rh); /* Destroys dirty log too. */ -+ sc_exit(&rs->sc); -+ ti->error = DM_MSG_PREFIX "Error creating stripe cache"; -+free_rs: -+ kfree(rs); -+ return ERR_PTR(-ENOMEM); -+} -+ -+/* Free a RAID context (a RAID set). */ -+static void context_free(struct raid_set *rs, unsigned p) -+{ -+ while (p--) -+ dm_put_device(rs->ti, rs->dev[p].dev); -+ -+ sc_exit(&rs->sc); -+ dm_region_hash_destroy(rs->recover.rh); /* Destroys dirty log too. */ -+ kfree(rs); -+} -+ -+/* Create work queue and initialize delayed work. */ -+static int rs_workqueue_init(struct raid_set *rs) -+{ -+ struct dm_target *ti = rs->ti; -+ -+ rs->io.wq = create_singlethread_workqueue(DAEMON); -+ if (!rs->io.wq) -+ TI_ERR_RET("failed to create " DAEMON, -ENOMEM); -+ -+ INIT_DELAYED_WORK(&rs->io.dws_do_raid, do_raid); -+ INIT_WORK(&rs->io.ws_do_table_event, do_table_event); -+ return 0; -+} -+ -+/* Return pointer to raid_type structure for raid name. */ -+static struct raid_type *get_raid_type(char *name) -+{ -+ struct raid_type *r = ARRAY_END(raid_types); -+ -+ while (r-- > raid_types) { -+ if (!strcmp(r->name, name)) -+ return r; -+ } -+ -+ return NULL; -+} -+ -+/* FIXME: factor out to dm core. */ -+static int multiple(sector_t a, sector_t b, sector_t *n) -+{ -+ sector_t r = a; -+ -+ sector_div(r, b); -+ *n = r; -+ return a == r * b; -+} -+ -+/* Log RAID set information to kernel log. */ -+static void rs_log(struct raid_set *rs, unsigned io_size) -+{ -+ unsigned p; -+ char buf[BDEVNAME_SIZE]; -+ -+ for (p = 0; p < rs->set.raid_devs; p++) -+ DMINFO("/dev/%s is raid disk %u%s", -+ bdevname(rs->dev[p].dev->bdev, buf), p, -+ (p == rs->set.pi) ? " (parity)" : ""); -+ -+ DMINFO("%d/%d/%d sectors chunk/io/recovery size, %u stripes\n" -+ "algorithm \"%s\", %u chunks with %uMB/s\n" -+ "%s set with net %u/%u devices", -+ rs->set.chunk_size, rs->set.io_size, rs->recover.io_size, -+ atomic_read(&rs->sc.stripes), -+ rs->xor.f->name, rs->xor.chunks, mbpers(rs, io_size), -+ rs->set.raid_type->descr, rs->set.data_devs, rs->set.raid_devs); -+} -+ -+/* Get all devices and offsets. */ -+static int dev_parms(struct raid_set *rs, char **argv, int *p) -+{ -+ struct dm_target *ti = rs->ti; -+ -+DMINFO("rs->set.sectors_per_dev=%llu", (unsigned long long) rs->set.sectors_per_dev); -+ for (*p = 0; *p < rs->set.raid_devs; (*p)++, argv += 2) { -+ int r; -+ unsigned long long tmp; -+ struct raid_dev *dev = rs->dev + *p; -+ -+ /* Get offset and device. */ -+ if (sscanf(argv[1], "%llu", &tmp) != 1 || -+ tmp > rs->set.sectors_per_dev) -+ TI_ERR("Invalid RAID device offset parameter"); -+ -+ dev->start = tmp; -+ r = dm_get_device(ti, argv[0], dev->start, -+ rs->set.sectors_per_dev, -+ dm_table_get_mode(ti->table), &dev->dev); -+ if (r) -+ TI_ERR_RET("RAID device lookup failure", r); -+ -+ r = raid_dev_lookup(rs, dev); -+ if (r != -ENODEV && r < *p) { -+ (*p)++; /* Ensure dm_put_device() on actual device. */ -+ TI_ERR_RET("Duplicate RAID device", -ENXIO); -+ } -+ } -+ -+ return 0; -+} -+ -+/* Set recovery bandwidth. */ -+static void -+recover_set_bandwidth(struct raid_set *rs, unsigned bandwidth) -+{ -+ rs->recover.bandwidth = bandwidth; -+ rs->recover.bandwidth_work = 100 / bandwidth; -+} -+ -+/* Handle variable number of RAID parameters. */ -+static int get_raid_variable_parms(struct dm_target *ti, char **argv, -+ struct variable_parms *vp) -+{ -+ int p, value; -+ struct { -+ int action; /* -1: skip, 0: no power2 check, 1: power2 check */ -+ char *errmsg; -+ int min, max; -+ int *var, *var2, *var3; -+ } argctr[] = { -+ { 1, -+ "Invalid chunk size; must be -1 or 2^^n and <= 16384", -+ IO_SIZE_MIN, CHUNK_SIZE_MAX, -+ &vp->chunk_size_parm, &vp->chunk_size, &vp->io_size }, -+ { 0, -+ "Invalid number of stripes: must be -1 or >= 8 and <= 16384", -+ STRIPES_MIN, STRIPES_MAX, -+ &vp->stripes_parm, &vp->stripes, NULL }, -+ { 1, -+ "Invalid io size; must -1 or >= 8, 2^^n and less equal " -+ "min(BIO_MAX_SECTORS/2, chunk size)", -+ IO_SIZE_MIN, 0, /* Needs to be updated in loop below. */ -+ &vp->io_size_parm, &vp->io_size, NULL }, -+ { 1, -+ "Invalid recovery io size; must be -1 or " -+ "2^^n and less equal BIO_MAX_SECTORS/2", -+ RECOVER_IO_SIZE_MIN, BIO_MAX_SECTORS / 2, -+ &vp->recover_io_size_parm, &vp->recover_io_size, NULL }, -+ { 0, -+ "Invalid recovery bandwidth percentage; " -+ "must be -1 or > 0 and <= 100", -+ BANDWIDTH_MIN, BANDWIDTH_MAX, -+ &vp->bandwidth_parm, &vp->bandwidth, NULL }, -+ /* Handle sync argument seperately in loop. */ -+ { -1, -+ "Invalid recovery switch; must be \"sync\" or \"nosync\"" }, -+ { 0, -+ "Invalid number of recovery stripes;" -+ "must be -1, > 0 and <= 64", -+ RECOVERY_STRIPES_MIN, RECOVERY_STRIPES_MAX, -+ &vp->recovery_stripes_parm, &vp->recovery_stripes, NULL }, -+ }, *varp; -+ -+ /* Fetch # of variable raid parameters. */ -+ if (sscanf(*(argv++), "%d", &vp->raid_parms) != 1 || -+ !range_ok(vp->raid_parms, 0, 7)) -+ TI_ERR("Bad variable raid parameters number"); -+ -+ /* Preset variable RAID parameters. */ -+ vp->chunk_size = CHUNK_SIZE_DEFAULT; -+ vp->io_size = IO_SIZE_DEFAULT; -+ vp->stripes = STRIPES_DEFAULT; -+ vp->recover_io_size = RECOVER_IO_SIZE_DEFAULT; -+ vp->bandwidth = BANDWIDTH_DEFAULT; -+ vp->recovery = 1; -+ vp->recovery_stripes = RECOVERY_STRIPES_DEFAULT; -+ -+ /* Walk the array of argument constraints for all given ones. */ -+ for (p = 0, varp = argctr; p < vp->raid_parms; p++, varp++) { -+ BUG_ON(varp >= ARRAY_END(argctr)); -+ -+ /* Special case for "[no]sync" string argument. */ -+ if (varp->action < 0) { -+ if (!strcmp(*argv, "sync")) -+ ; -+ else if (!strcmp(*argv, "nosync")) -+ vp->recovery = 0; -+ else -+ TI_ERR(varp->errmsg); -+ -+ argv++; -+ continue; -+ } -+ -+ /* -+ * Special case for io_size depending -+ * on previously set chunk size. -+ */ -+ if (p == 2) -+ varp->max = min(BIO_MAX_SECTORS / 2, vp->chunk_size); -+ -+ if (sscanf(*(argv++), "%d", &value) != 1 || -+ (value != -1 && -+ ((varp->action && !is_power_of_2(value)) || -+ !range_ok(value, varp->min, varp->max)))) -+ TI_ERR(varp->errmsg); -+ -+ *varp->var = value; -+ if (value != -1) { -+ if (varp->var2) -+ *varp->var2 = value; -+ if (varp->var3) -+ *varp->var3 = value; -+ } -+ } -+ -+ return 0; -+} -+ -+/* Parse optional locking parameters. */ -+static int get_raid_locking_parms(struct dm_target *ti, char **argv, -+ int *locking_parms, -+ struct dm_raid45_locking_type **locking_type) -+{ -+ if (!strnicmp(argv[0], "locking", strlen(argv[0]))) { -+ char *lckstr = argv[1]; -+ size_t lcksz = strlen(lckstr); -+ -+ if (!strnicmp(lckstr, "none", lcksz)) { -+ *locking_type = &locking_none; -+ *locking_parms = 2; -+ } else if (!strnicmp(lckstr, "cluster", lcksz)) { -+ DMERR("locking type \"%s\" not yet implemented", -+ lckstr); -+ return -EINVAL; -+ } else { -+ DMERR("unknown locking type \"%s\"", lckstr); -+ return -EINVAL; -+ } -+ } -+ -+ *locking_parms = 0; -+ *locking_type = &locking_none; -+ return 0; -+} -+ -+/* Set backing device read ahead properties of RAID set. */ -+static void rs_set_read_ahead(struct raid_set *rs, -+ unsigned sectors, unsigned stripes) -+{ -+ unsigned ra_pages = dm_div_up(sectors, SECTORS_PER_PAGE); -+ struct mapped_device *md = dm_table_get_md(rs->ti->table); -+ struct backing_dev_info *bdi = &dm_disk(md)->queue->backing_dev_info; -+ -+ /* Set read-ahead for the RAID set and the component devices. */ -+ if (ra_pages) { -+ unsigned p = rs->set.raid_devs; -+ -+ bdi->ra_pages = stripes * ra_pages * rs->set.data_devs; -+ -+ while (p--) { -+ struct request_queue *q = -+ bdev_get_queue(rs->dev[p].dev->bdev); -+ -+ q->backing_dev_info.ra_pages = ra_pages; -+ } -+ } -+ -+ dm_put(md); -+} -+ -+/* Set congested function. */ -+static void rs_set_congested_fn(struct raid_set *rs) -+{ -+ struct mapped_device *md = dm_table_get_md(rs->ti->table); -+ struct backing_dev_info *bdi = &dm_disk(md)->queue->backing_dev_info; -+ -+ /* Set congested function and data. */ -+ bdi->congested_fn = rs_congested; -+ bdi->congested_data = rs; -+ dm_put(md); -+} -+ -+/* -+ * Construct a RAID4/5 mapping: -+ * -+ * log_type #log_params \ -+ * raid_type [#parity_dev] #raid_variable_params \ -+ * [locking "none"/"cluster"] -+ * #raid_devs #dev_to_initialize [ ]{3,} -+ * -+ * log_type = "core"/"disk", -+ * #log_params = 1-3 (1-2 for core dirty log type, 3 for disk dirty log only) -+ * log_params = [dirty_log_path] region_size [[no]sync]) -+ * -+ * raid_type = "raid4", "raid5_la", "raid5_ra", "raid5_ls", "raid5_rs" -+ * -+ * #parity_dev = N if raid_type = "raid4" -+ * o N = -1: pick default = last device -+ * o N >= 0 and < #raid_devs: parity device index -+ * -+ * #raid_variable_params = 0-7; raid_params (-1 = default): -+ * [chunk_size [#stripes [io_size [recover_io_size \ -+ * [%recovery_bandwidth [recovery_switch [#recovery_stripes]]]]]]] -+ * o chunk_size (unit to calculate drive addresses; must be 2^^n, > 8 -+ * and <= CHUNK_SIZE_MAX) -+ * o #stripes is number of stripes allocated to stripe cache -+ * (must be > 1 and < STRIPES_MAX) -+ * o io_size (io unit size per device in sectors; must be 2^^n and > 8) -+ * o recover_io_size (io unit size per device for recovery in sectors; -+ must be 2^^n, > SECTORS_PER_PAGE and <= region_size) -+ * o %recovery_bandwith is the maximum amount spend for recovery during -+ * application io (1-100%) -+ * o recovery switch = [sync|nosync] -+ * o #recovery_stripes is the number of recovery stripes used for -+ * parallel recovery of the RAID set -+ * If raid_variable_params = 0, defaults will be used. -+ * Any raid_variable_param can be set to -1 to apply a default -+ * -+ * #raid_devs = N (N >= 3) -+ * -+ * #dev_to_initialize = N -+ * -1: initialize parity on all devices -+ * >= 0 and < #raid_devs: initialize raid_path; used to force reconstruction -+ * of a failed devices content after replacement -+ * -+ * = device_path (eg, /dev/sdd1) -+ * = begin at offset on -+ * -+ */ -+#define MIN_PARMS 13 -+static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) -+{ -+ int dev_to_init, dl_parms, i, locking_parms, -+ parity_parm, pi = -1, r, raid_devs; -+ sector_t tmp, sectors_per_dev; -+ struct dm_raid45_locking_type *locking; -+ struct raid_set *rs; -+ struct raid_type *raid_type; -+ struct variable_parms parms; -+ -+ /* Ensure minimum number of parameters. */ -+ if (argc < MIN_PARMS) -+ TI_ERR("Not enough parameters"); -+ -+ /* Fetch # of dirty log parameters. */ -+ if (sscanf(argv[1], "%d", &dl_parms) != 1 || -+ !range_ok(dl_parms, 1, 4711)) /* ;-) */ -+ TI_ERR("Bad dirty log parameters number"); -+ -+ /* Check raid_type. */ -+ raid_type = get_raid_type(argv[dl_parms + 2]); -+ if (!raid_type) -+ TI_ERR("Bad raid type"); -+ -+ /* In case of RAID4, parity drive is selectable. */ -+ parity_parm = !!(raid_type->level == raid4); -+ -+ /* Handle variable number of RAID parameters. */ -+ r = get_raid_variable_parms(ti, argv + dl_parms + parity_parm + 3, -+ &parms); -+ if (r) -+ return r; -+ -+ /* Handle any locking parameters. */ -+ r = get_raid_locking_parms(ti, -+ argv + dl_parms + parity_parm + -+ parms.raid_parms + 4, -+ &locking_parms, &locking); -+ if (r) -+ return r; -+ -+ /* # of raid devices. */ -+ i = dl_parms + parity_parm + parms.raid_parms + locking_parms + 4; -+ if (sscanf(argv[i], "%d", &raid_devs) != 1 || -+ raid_devs < raid_type->minimal_devs) -+ TI_ERR("Invalid number of raid devices"); -+ -+ /* In case of RAID4, check parity drive index is in limits. */ -+ if (raid_type->level == raid4) { -+ /* Fetch index of parity device. */ -+ if (sscanf(argv[dl_parms + 3], "%d", &pi) != 1 || -+ (pi != -1 && !range_ok(pi, 0, raid_devs - 1))) -+ TI_ERR("Invalid RAID4 parity device index"); -+ } -+ -+ /* -+ * Index of device to initialize starts at 0 -+ * -+ * o -1 -> don't initialize a selected device; -+ * initialize parity conforming to algorithm -+ * o 0..raid_devs-1 -> initialize respective device -+ * (used for reconstruction of a replaced device) -+ */ -+ if (sscanf(argv[dl_parms + parity_parm + parms.raid_parms + -+ locking_parms + 5], "%d", &dev_to_init) != 1 || -+ !range_ok(dev_to_init, -1, raid_devs - 1)) -+ TI_ERR("Invalid number for raid device to initialize"); -+ -+ /* Check # of raid device arguments. */ -+ if (argc - dl_parms - parity_parm - parms.raid_parms - 6 != -+ 2 * raid_devs) -+ TI_ERR("Wrong number of raid device/offset arguments"); -+ -+ /* -+ * Check that the table length is devisable -+ * w/o rest by (raid_devs - parity_devs) -+ */ -+ if (!multiple(ti->len, raid_devs - raid_type->parity_devs, -+ §ors_per_dev)) -+ TI_ERR("Target length not divisible by number of data devices"); -+ -+ /* -+ * Check that the device size is -+ * devisable w/o rest by chunk size -+ */ -+ if (!multiple(sectors_per_dev, parms.chunk_size, &tmp)) -+ TI_ERR("Device length not divisible by chunk_size"); -+ -+ /**************************************************************** -+ * Now that we checked the constructor arguments -> -+ * let's allocate the RAID set -+ ****************************************************************/ -+ rs = context_alloc(raid_type, &parms, raid_devs, sectors_per_dev, -+ ti, dl_parms, argv); -+ if (IS_ERR(rs)) -+ return PTR_ERR(rs); -+ -+ -+ rs->set.dev_to_init = rs->set.dev_to_init_parm = dev_to_init; -+ rs->set.pi = rs->set.pi_parm = pi; -+ -+ /* Set RAID4 parity drive index. */ -+ if (raid_type->level == raid4) -+ rs->set.pi = (pi == -1) ? rs->set.data_devs : pi; -+ -+ recover_set_bandwidth(rs, parms.bandwidth); -+ -+ /* Use locking type to lock stripe access. */ -+ rs->locking = locking; -+ -+ /* Get the device/offset tupels. */ -+ argv += dl_parms + 6 + parity_parm + parms.raid_parms; -+ r = dev_parms(rs, argv, &i); -+ if (r) -+ goto err; -+ -+ /* Set backing device information (eg. read ahead). */ -+ rs_set_read_ahead(rs, 2 * rs->set.chunk_size /* sectors per device */, -+ 2 /* # of stripes */); -+ rs_set_congested_fn(rs); /* Set congested function. */ -+ SetRSCheckOverwrite(rs); /* Allow chunk overwrite checks. */ -+ rs->xor.speed = xor_optimize(rs); /* Select best xor algorithm. */ -+ -+ /* Set for recovery of any nosync regions. */ -+ if (parms.recovery) -+ SetRSRecover(rs); -+ else { -+ /* -+ * Need to free recovery stripe(s) here in case -+ * of nosync, because xor_optimize uses one. -+ */ -+ set_start_recovery(rs); -+ set_end_recovery(rs); -+ stripe_recover_free(rs); -+ } -+ -+ /* -+ * Enable parity chunk creation enformcement for -+ * little numbers of array members where it doesn'ti -+ * gain us performance to xor parity out and back in as -+ * with larger array member numbers. -+ */ -+ if (rs->set.raid_devs <= rs->set.raid_type->minimal_devs + 1) -+ SetRSEnforceParityCreation(rs); -+ -+ /* -+ * Make sure that dm core only hands maximum io size -+ * length down and pays attention to io boundaries. -+ */ -+ ti->split_io = rs->set.io_size; -+ ti->private = rs; -+ -+ /* Initialize work queue to handle this RAID set's io. */ -+ r = rs_workqueue_init(rs); -+ if (r) -+ goto err; -+ -+ rs_log(rs, rs->recover.io_size); /* Log information about RAID set. */ -+ return 0; -+ -+err: -+ context_free(rs, i); -+ return r; -+} -+ -+/* -+ * Destruct a raid mapping -+ */ -+static void raid_dtr(struct dm_target *ti) -+{ -+ struct raid_set *rs = ti->private; -+ -+ destroy_workqueue(rs->io.wq); -+ context_free(rs, rs->set.raid_devs); -+} -+ -+/* Raid mapping function. */ -+static int raid_map(struct dm_target *ti, struct bio *bio, -+ union map_info *map_context) -+{ -+ /* I don't want to waste stripe cache capacity. */ -+ if (bio_rw(bio) == READA) -+ return -EIO; -+ else { -+ struct raid_set *rs = ti->private; -+ -+ /* -+ * Get io reference to be waiting for to drop -+ * to zero on device suspension/destruction. -+ */ -+ io_get(rs); -+ bio->bi_sector -= ti->begin; /* Remap sector. */ -+ -+ /* Queue io to RAID set. */ -+ mutex_lock(&rs->io.in_lock); -+ bio_list_add(&rs->io.in, bio); -+ mutex_unlock(&rs->io.in_lock); -+ -+ /* Wake daemon to process input list. */ -+ wake_do_raid(rs); -+ -+ /* REMOVEME: statistics. */ -+ atomic_inc(rs->stats + (bio_data_dir(bio) == READ ? -+ S_BIOS_READ : S_BIOS_WRITE)); -+ return DM_MAPIO_SUBMITTED; /* Handle later. */ -+ } -+} -+ -+/* Device suspend. */ -+static void raid_presuspend(struct dm_target *ti) -+{ -+ struct raid_set *rs = ti->private; -+ struct dm_dirty_log *dl = rs->recover.dl; -+ -+ SetRSSuspend(rs); -+ -+ if (RSRecover(rs)) -+ dm_rh_stop_recovery(rs->recover.rh); -+ -+ cancel_delayed_work(&rs->io.dws_do_raid); -+ flush_workqueue(rs->io.wq); -+ wait_ios(rs); /* Wait for completion of all ios being processed. */ -+ -+ if (dl->type->presuspend && dl->type->presuspend(dl)) -+ /* FIXME: need better error handling. */ -+ DMWARN("log presuspend failed"); -+} -+ -+static void raid_postsuspend(struct dm_target *ti) -+{ -+ struct raid_set *rs = ti->private; -+ struct dm_dirty_log *dl = rs->recover.dl; -+ -+ if (dl->type->postsuspend && dl->type->postsuspend(dl)) -+ /* FIXME: need better error handling. */ -+ DMWARN("log postsuspend failed"); -+ -+} -+ -+/* Device resume. */ -+static void raid_resume(struct dm_target *ti) -+{ -+ struct raid_set *rs = ti->private; -+ struct recover *rec = &rs->recover; -+ struct dm_dirty_log *dl = rec->dl; -+ -+DMINFO("%s...", __func__); -+ if (dl->type->resume && dl->type->resume(dl)) -+ /* Resume dirty log. */ -+ /* FIXME: need better error handling. */ -+ DMWARN("log resume failed"); -+ -+ rec->nr_regions_to_recover = -+ rec->nr_regions - dl->type->get_sync_count(dl); -+ -+ /* Restart any unfinished recovery. */ -+ if (RSRecover(rs)) { -+ set_start_recovery(rs); -+ dm_rh_start_recovery(rec->rh); -+ } -+ -+ ClearRSSuspend(rs); -+} -+ -+/* Return stripe cache size. */ -+static unsigned sc_size(struct raid_set *rs) -+{ -+ return to_sector(atomic_read(&rs->sc.stripes) * -+ (sizeof(struct stripe) + -+ (sizeof(struct stripe_chunk) + -+ (sizeof(struct page_list) + -+ to_bytes(rs->set.io_size) * -+ rs->set.raid_devs)) + -+ (rs->recover.end_jiffies ? -+ 0 : rs->recover.recovery_stripes * -+ to_bytes(rs->set.raid_devs * rs->recover.io_size)))); -+} -+ -+/* REMOVEME: status output for development. */ -+static void raid_devel_stats(struct dm_target *ti, char *result, -+ unsigned *size, unsigned maxlen) -+{ -+ unsigned sz = *size; -+ unsigned long j; -+ char buf[BDEVNAME_SIZE], *p; -+ struct stats_map *sm; -+ struct raid_set *rs = ti->private; -+ struct recover *rec = &rs->recover; -+ struct timespec ts; -+ -+ DMEMIT("%s %s=%u bw=%u\n", -+ version, rs->xor.f->name, rs->xor.chunks, rs->recover.bandwidth); -+ DMEMIT("act_ios=%d ", io_ref(rs)); -+ DMEMIT("act_ios_max=%d\n", atomic_read(&rs->io.in_process_max)); -+ DMEMIT("act_stripes=%d ", sc_active(&rs->sc)); -+ DMEMIT("act_stripes_max=%d\n", -+ atomic_read(&rs->sc.active_stripes_max)); -+ -+ for (sm = stats_map; sm < ARRAY_END(stats_map); sm++) -+ DMEMIT("%s%d", sm->str, atomic_read(rs->stats + sm->type)); -+ -+ DMEMIT(" checkovr=%s\n", RSCheckOverwrite(rs) ? "on" : "off"); -+ DMEMIT("sc=%u/%u/%u/%u/%u/%u/%u\n", rs->set.chunk_size, -+ atomic_read(&rs->sc.stripes), rs->set.io_size, -+ rec->recovery_stripes, rec->io_size, rs->sc.hash.buckets, -+ sc_size(rs)); -+ -+ j = (rec->end_jiffies ? rec->end_jiffies : jiffies) - -+ rec->start_jiffies; -+ jiffies_to_timespec(j, &ts); -+ sprintf(buf, "%ld.%ld", ts.tv_sec, ts.tv_nsec); -+ p = strchr(buf, '.'); -+ p[3] = 0; -+ -+ DMEMIT("rg=%llu/%llu/%llu/%u %s\n", -+ (unsigned long long) rec->nr_regions_recovered, -+ (unsigned long long) rec->nr_regions_to_recover, -+ (unsigned long long) rec->nr_regions, rec->bandwidth, buf); -+ -+ *size = sz; -+} -+ -+static int raid_status(struct dm_target *ti, status_type_t type, -+ char *result, unsigned maxlen) -+{ -+ unsigned p, sz = 0; -+ char buf[BDEVNAME_SIZE]; -+ struct raid_set *rs = ti->private; -+ struct dm_dirty_log *dl = rs->recover.dl; -+ int raid_parms[] = { -+ rs->set.chunk_size_parm, -+ rs->sc.stripes_parm, -+ rs->set.io_size_parm, -+ rs->recover.io_size_parm, -+ rs->recover.bandwidth_parm, -+ -2, -+ rs->recover.recovery_stripes, -+ }; -+ -+ switch (type) { -+ case STATUSTYPE_INFO: -+ /* REMOVEME: statistics. */ -+ if (RSDevelStats(rs)) -+ raid_devel_stats(ti, result, &sz, maxlen); -+ -+ DMEMIT("%u ", rs->set.raid_devs); -+ -+ for (p = 0; p < rs->set.raid_devs; p++) -+ DMEMIT("%s ", -+ format_dev_t(buf, rs->dev[p].dev->bdev->bd_dev)); -+ -+ DMEMIT("2 "); -+ for (p = 0; p < rs->set.raid_devs; p++) { -+ DMEMIT("%c", !DevFailed(rs->dev + p) ? 'A' : 'D'); -+ -+ if (p == rs->set.pi) -+ DMEMIT("p"); -+ -+ if (p == rs->set.dev_to_init) -+ DMEMIT("i"); -+ } -+ -+ DMEMIT(" %llu/%llu ", -+ (unsigned long long) dl->type->get_sync_count(dl), -+ (unsigned long long) rs->recover.nr_regions); -+ -+ sz += dl->type->status(dl, type, result+sz, maxlen-sz); -+ break; -+ case STATUSTYPE_TABLE: -+ sz = rs->recover.dl->type->status(rs->recover.dl, type, -+ result, maxlen); -+ DMEMIT("%s %u ", rs->set.raid_type->name, rs->set.raid_parms); -+ -+ for (p = 0; p < rs->set.raid_parms; p++) { -+ if (raid_parms[p] > -2) -+ DMEMIT("%d ", raid_parms[p]); -+ else -+ DMEMIT("%s ", rs->recover.recovery ? -+ "sync" : "nosync"); -+ } -+ -+ DMEMIT("%u %d ", rs->set.raid_devs, rs->set.dev_to_init); -+ -+ for (p = 0; p < rs->set.raid_devs; p++) -+ DMEMIT("%s %llu ", -+ format_dev_t(buf, rs->dev[p].dev->bdev->bd_dev), -+ (unsigned long long) rs->dev[p].start); -+ } -+ -+ return 0; -+} -+ -+/* -+ * Message interface -+ */ -+/* Turn a delta into an absolute value. */ -+static int _absolute(char *action, int act, int r) -+{ -+ size_t len = strlen(action); -+ -+ if (len < 2) -+ len = 2; -+ -+ /* Make delta absolute. */ -+ if (!strncmp("set", action, len)) -+ ; -+ else if (!strncmp("grow", action, len)) -+ r += act; -+ else if (!strncmp("shrink", action, len)) -+ r = act - r; -+ else -+ r = -EINVAL; -+ -+ return r; -+} -+ -+ /* Change recovery io bandwidth. */ -+static int bandwidth_change(struct raid_set *rs, int argc, char **argv, -+ enum raid_set_flags flag) -+{ -+ int act = rs->recover.bandwidth, bandwidth; -+ -+ if (argc != 2) -+ return -EINVAL; -+ -+ if (sscanf(argv[1], "%d", &bandwidth) == 1 && -+ range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) { -+ /* Make delta bandwidth absolute. */ -+ bandwidth = _absolute(argv[0], act, bandwidth); -+ -+ /* Check range. */ -+ if (range_ok(bandwidth, BANDWIDTH_MIN, BANDWIDTH_MAX)) { -+ recover_set_bandwidth(rs, bandwidth); -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+/* Set/reset development feature flags. */ -+static int devel_flags(struct raid_set *rs, int argc, char **argv, -+ enum raid_set_flags flag) -+{ -+ size_t len; -+ -+ if (argc != 1) -+ return -EINVAL; -+ -+ len = strlen(argv[0]); -+ if (len < 2) -+ len = 2; -+ -+ if (!strncmp(argv[0], "on", len)) -+ return test_and_set_bit(flag, &rs->io.flags) ? -EPERM : 0; -+ else if (!strncmp(argv[0], "off", len)) -+ return test_and_clear_bit(flag, &rs->io.flags) ? 0 : -EPERM; -+ else if (!strncmp(argv[0], "reset", len)) { -+ if (flag == RS_DEVEL_STATS) { -+ if (test_bit(flag, &rs->io.flags)) { -+ stats_reset(rs); -+ return 0; -+ } else -+ return -EPERM; -+ } else { -+ set_bit(flag, &rs->io.flags); -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+/* Resize the stripe cache. */ -+static int sc_resize(struct raid_set *rs, int argc, char **argv, -+ enum raid_set_flags flag) -+{ -+ int act, stripes; -+ -+ if (argc != 2) -+ return -EINVAL; -+ -+ /* Deny permission in case the daemon is still resizing!. */ -+ if (atomic_read(&rs->sc.stripes_to_set)) -+ return -EPERM; -+ -+ if (sscanf(argv[1], "%d", &stripes) == 1 && -+ stripes > 0) { -+ act = atomic_read(&rs->sc.stripes); -+ -+ /* Make delta stripes absolute. */ -+ stripes = _absolute(argv[0], act, stripes); -+ -+ /* -+ * Check range and that the # of stripes changes. -+ * We leave the resizing to the wroker. -+ */ -+ if (range_ok(stripes, STRIPES_MIN, STRIPES_MAX) && -+ stripes != atomic_read(&rs->sc.stripes)) { -+ atomic_set(&rs->sc.stripes_to_set, stripes); -+ wake_do_raid(rs); -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+/* Change xor algorithm and number of chunks. */ -+static int xor_set(struct raid_set *rs, int argc, char **argv, -+ enum raid_set_flags flag) -+{ -+ if (argc == 2) { -+ int chunks; -+ char *algorithm = argv[0]; -+ struct xor_func *f = ARRAY_END(xor_funcs); -+ -+ if (sscanf(argv[1], "%d", &chunks) == 1 && -+ range_ok(chunks, 2, XOR_CHUNKS_MAX) && -+ chunks <= rs->set.raid_devs) { -+ while (f-- > xor_funcs) { -+ if (!strcmp(algorithm, f->name)) { -+ unsigned io_size = 0; -+ struct stripe *stripe = stripe_alloc(&rs->sc, rs->sc.mem_cache_client, SC_GROW); -+ -+ DMINFO("xor: %s", f->name); -+ if (f->f == xor_blocks_wrapper && -+ chunks > MAX_XOR_BLOCKS + 1) { -+ DMERR("chunks > MAX_XOR_BLOCKS" -+ " + 1"); -+ break; -+ } -+ -+ mutex_lock(&rs->io.xor_lock); -+ rs->xor.f = f; -+ rs->xor.chunks = chunks; -+ rs->xor.speed = 0; -+ mutex_unlock(&rs->io.xor_lock); -+ -+ if (stripe) { -+ rs->xor.speed = xor_speed(stripe); -+ io_size = stripe->io.size; -+ stripe_free(stripe, rs->sc.mem_cache_client); -+ } -+ -+ rs_log(rs, io_size); -+ return 0; -+ } -+ } -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+/* -+ * Allow writes after they got prohibited because of a device failure. -+ * -+ * This needs to be called after userspace updated metadata state -+ * based on an event being thrown during device failure processing. -+ */ -+static int allow_writes(struct raid_set *rs, int argc, char **argv, -+ enum raid_set_flags flag) -+{ -+ if (TestClearRSProhibitWrites(rs)) { -+DMINFO("%s waking", __func__); -+ wake_do_raid(rs); -+ return 0; -+ } -+ -+ return -EPERM; -+} -+ -+/* Parse the RAID message. */ -+/* -+ * 'all[ow_writes]' -+ * 'ba[ndwidth] {se[t],g[row],sh[rink]} #' # e.g 'ba se 50' -+ * "o[verwrite] {on,of[f],r[eset]}' # e.g. 'o of' -+ * 'sta[tistics] {on,of[f],r[eset]}' # e.g. 'stat of' -+ * 'str[ipecache] {se[t],g[row],sh[rink]} #' # e.g. 'stripe set 1024' -+ * 'xor algorithm #chunks' # e.g. 'xor xor_8 5' -+ * -+ */ -+static int raid_message(struct dm_target *ti, unsigned argc, char **argv) -+{ -+ if (argc) { -+ size_t len = strlen(argv[0]); -+ struct raid_set *rs = ti->private; -+ struct { -+ const char *name; -+ int (*f) (struct raid_set *rs, int argc, char **argv, -+ enum raid_set_flags flag); -+ enum raid_set_flags flag; -+ } msg_descr[] = { -+ { "allow_writes", allow_writes, 0 }, -+ { "bandwidth", bandwidth_change, 0 }, -+ { "overwrite", devel_flags, RS_CHECK_OVERWRITE }, -+ { "statistics", devel_flags, RS_DEVEL_STATS }, -+ { "stripe_cache", sc_resize, 0 }, -+ { "xor", xor_set, 0 }, -+ }, *m = ARRAY_END(msg_descr); -+ -+ if (len < 3) -+ len = 3; -+ -+ while (m-- > msg_descr) { -+ if (!strncmp(argv[0], m->name, len)) -+ return m->f(rs, argc - 1, argv + 1, m->flag); -+ } -+ -+ } -+ -+ return -EINVAL; -+} -+/* -+ * END message interface -+ */ -+ -+/* Provide io hints. */ -+static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) -+{ -+ struct raid_set *rs = ti->private; -+ -+ blk_limits_io_min(limits, rs->set.chunk_size); -+ blk_limits_io_opt(limits, rs->set.chunk_size * rs->set.data_devs); -+} -+ -+static struct target_type raid_target = { -+ .name = "raid45", -+ .version = {1, 0, 0}, -+ .module = THIS_MODULE, -+ .ctr = raid_ctr, -+ .dtr = raid_dtr, -+ .map = raid_map, -+ .presuspend = raid_presuspend, -+ .postsuspend = raid_postsuspend, -+ .resume = raid_resume, -+ .status = raid_status, -+ .message = raid_message, -+ .io_hints = raid_io_hints, -+}; -+ -+static void init_exit(const char *bad_msg, const char *good_msg, int r) -+{ -+ if (r) -+ DMERR("Failed to %sregister target [%d]", bad_msg, r); -+ else -+ DMINFO("%s %s", good_msg, version); -+} -+ -+static int __init dm_raid_init(void) -+{ -+ int r = dm_register_target(&raid_target); -+ -+ init_exit("", "initialized", r); -+ return r; -+} -+ -+static void __exit dm_raid_exit(void) -+{ -+ dm_unregister_target(&raid_target); -+ init_exit("un", "exit", 0); -+} -+ -+/* Module hooks. */ -+module_init(dm_raid_init); -+module_exit(dm_raid_exit); -+ -+MODULE_DESCRIPTION(DM_NAME " raid4/5 target"); -+MODULE_AUTHOR("Heinz Mauelshagen "); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("dm-raid4"); -+MODULE_ALIAS("dm-raid5"); ---- /dev/null -+++ b/drivers/md/dm-raid45.h -@@ -0,0 +1,30 @@ -+/* -+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. -+ * -+ * Module Author: Heinz Mauelshagen (Mauelshagen@RedHat.com) -+ * -+ * Locking definitions for the device-mapper RAID45 target. -+ * -+ * This file is released under the GPL. -+ * -+ */ -+ -+#ifndef _DM_RAID45_H -+#define _DM_RAID45_H -+ -+/* Factor out to dm.h! */ -+#define STR_LEN(ptr, str) (ptr), (str), strlen((ptr)) -+/* Reference to array end. */ -+#define ARRAY_END(a) ((a) + ARRAY_SIZE(a)) -+ -+enum dm_lock_type { DM_RAID45_EX, DM_RAID45_SHARED }; -+ -+struct dm_raid45_locking_type { -+ /* Request a lock on a stripe. */ -+ void* (*lock)(sector_t key, enum dm_lock_type type); -+ -+ /* Release a lock on a stripe. */ -+ void (*unlock)(void *lock_handle); -+}; -+ -+#endif ---- a/drivers/md/dm-region-hash.c -+++ b/drivers/md/dm-region-hash.c -@@ -113,10 +113,11 @@ struct dm_region { - /* - * Conversion fns - */ --static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) -+region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) - { - return sector >> rh->region_shift; - } -+EXPORT_SYMBOL_GPL(dm_rh_sector_to_region); - - sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) - { -@@ -496,7 +497,7 @@ void dm_rh_update_states(struct dm_regio - } - EXPORT_SYMBOL_GPL(dm_rh_update_states); - --static void rh_inc(struct dm_region_hash *rh, region_t region) -+void dm_rh_inc(struct dm_region_hash *rh, region_t region) - { - struct dm_region *reg; - -@@ -518,6 +519,7 @@ static void rh_inc(struct dm_region_hash - - read_unlock(&rh->hash_lock); - } -+EXPORT_SYMBOL_GPL(dm_rh_inc); - - void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) - { -@@ -526,7 +528,7 @@ void dm_rh_inc_pending(struct dm_region_ - for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio->bi_rw & REQ_FLUSH) - continue; -- rh_inc(rh, dm_rh_bio_to_region(rh, bio)); -+ dm_rh_inc(rh, dm_rh_bio_to_region(rh, bio)); - } - } - EXPORT_SYMBOL_GPL(dm_rh_inc_pending); -@@ -694,6 +696,19 @@ void dm_rh_delay(struct dm_region_hash * - } - EXPORT_SYMBOL_GPL(dm_rh_delay); - -+void dm_rh_delay_by_region(struct dm_region_hash *rh, -+ struct bio *bio, region_t region) -+{ -+ struct dm_region *reg; -+ -+ /* FIXME: locking. */ -+ read_lock(&rh->hash_lock); -+ reg = __rh_find(rh, region); -+ bio_list_add(®->delayed_bios, bio); -+ read_unlock(&rh->hash_lock); -+} -+EXPORT_SYMBOL_GPL(dm_rh_delay_by_region); -+ - void dm_rh_stop_recovery(struct dm_region_hash *rh) - { - int i; ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -2606,6 +2606,7 @@ struct gendisk *dm_disk(struct mapped_de - { - return md->disk; - } -+EXPORT_SYMBOL_GPL(dm_disk); - - struct kobject *dm_kobject(struct mapped_device *md) - { ---- a/include/linux/dm-region-hash.h -+++ b/include/linux/dm-region-hash.h -@@ -49,6 +49,7 @@ struct dm_dirty_log *dm_rh_dirty_log(str - */ - region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); - sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); -+region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector); - void *dm_rh_region_context(struct dm_region *reg); - - /* -@@ -72,11 +73,14 @@ void dm_rh_update_states(struct dm_regio - int dm_rh_flush(struct dm_region_hash *rh); - - /* Inc/dec pending count on regions. */ -+void dm_rh_inc(struct dm_region_hash *rh, region_t region); - void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); - void dm_rh_dec(struct dm_region_hash *rh, region_t region); - - /* Delay bios on regions. */ - void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); -+void dm_rh_delay_by_region(struct dm_region_hash *rh, struct bio *bio, -+ region_t region); - - void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); - diff --git a/patches.suse/dm-raid45-api-update-no-barriers b/patches.suse/dm-raid45-api-update-no-barriers deleted file mode 100644 index 92874bb..0000000 --- a/patches.suse/dm-raid45-api-update-no-barriers +++ /dev/null @@ -1,34 +0,0 @@ -From: Jeff Mahoney -Subject: dm-raid45: api update after removal of barriers -Patch-mainline: Whenever dmraid45 is - - This patch updates dm-raid45 to deal with the removal of the - barrier interface. - -Signed-off-by: Jeff Mahoney ---- - drivers/md/dm-raid45.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - ---- a/drivers/md/dm-raid45.c -+++ b/drivers/md/dm-raid45.c -@@ -196,10 +195,6 @@ enum chunk_flags { - CHUNK_UPTODATE, /* Chunk pages are uptodate. */ - }; - --#if READ != 0 || WRITE != 1 --#error dm-raid45: READ/WRITE != 0/1 used as index!!! --#endif -- - enum bl_type { - WRITE_QUEUED = WRITE + 1, - WRITE_MERGED, -@@ -3276,7 +3271,7 @@ static void do_ios(struct raid_set *rs, - * the input queue unless all work queues are empty - * and the stripe cache is inactive. - */ -- if (unlikely(bio_empty_barrier(bio))) { -+ if (bio->bi_rw & REQ_FLUSH) { - /* REMOVEME: statistics. */ - atomic_inc(rs->stats + S_BARRIER); - if (delay || diff --git a/patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md b/patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md deleted file mode 100644 index c9d9cac..0000000 --- a/patches.suse/dm-raid45-api-update-remove-dm_put-after-dm_table_get_md +++ /dev/null @@ -1,45 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] dm-raid45: API update: Remove dm_put after dm_table_get_md -References: bnc#615656 -Patch-mainline: depends on dm-raid45 being upstream - - Commit ecdb2e257abc33ae6798d3ccba87bdafa40ef6b6, for 2.6.34, removed - the dm_get() call from dm_table_get_md(). The dm-raid45 code still has - matching dm_put() calls for the dm_table_get_md() calls. This patch removes - the dm_put() calls as it's causing too many reference drops and BUG_ONs. - -Signed-off-by: Jeff Mahoney ---- - drivers/md/dm-raid45.c | 4 ---- - 1 file changed, 4 deletions(-) - - drivers/md/dm-raid45.c | 4 ---- - 1 file changed, 4 deletions(-) - ---- a/drivers/md/dm-raid45.c -+++ b/drivers/md/dm-raid45.c -@@ -1567,7 +1567,6 @@ static int sc_init(struct raid_set *rs, - disk = dm_disk(md); - snprintf(sc->kc.name, sizeof(sc->kc.name), "%s-%d.%d", TARGET, - disk->first_minor, atomic_inc_return(&_stripe_sc_nr)); -- dm_put(md); - sc->kc.cache = kmem_cache_create(sc->kc.name, stripe_size(rs), - 0, 0, NULL); - if (!sc->kc.cache) -@@ -3981,8 +3980,6 @@ static void rs_set_read_ahead(struct rai - q->backing_dev_info.ra_pages = ra_pages; - } - } -- -- dm_put(md); - } - - /* Set congested function. */ -@@ -3994,7 +3991,6 @@ static void rs_set_congested_fn(struct r - /* Set congested function and data. */ - bdi->congested_fn = rs_congested; - bdi->congested_data = rs; -- dm_put(md); - } - - /* diff --git a/patches.suse/dmraid45-dm_dirty_log_create-api-fix b/patches.suse/dmraid45-dm_dirty_log_create-api-fix deleted file mode 100644 index bc7b5d1..0000000 --- a/patches.suse/dmraid45-dm_dirty_log_create-api-fix +++ /dev/null @@ -1,25 +0,0 @@ -From: Jeff Mahoney -Subject: dmraid45: dm_dirty_log_create API fix -Patch-mainline: not yet, depends on patches.suse/dm-raid45_2.6.27_20081027.patch - - 2.6.33 added an optional callback to dm_dirty_log_create for flush - operations. Eventually raid45 should have one but until then, this is - to allow it to build. - -Signed-off-by: Jeff Mahoney - ---- - drivers/md/dm-raid45.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/md/dm-raid45.c -+++ b/drivers/md/dm-raid45.c -@@ -3598,7 +3598,7 @@ context_alloc(struct raid_type *raid_typ - */ - ti_len = ti->len; - ti->len = sectors_per_dev; -- dl = dm_dirty_log_create(argv[0], ti, dl_parms, argv + 2); -+ dl = dm_dirty_log_create(argv[0], ti, NULL, dl_parms, argv + 2); - ti->len = ti_len; - if (!dl) - goto bad_dirty_log; diff --git a/patches.suse/dmraid45-dm_get_device-takes-fewer-arguments b/patches.suse/dmraid45-dm_get_device-takes-fewer-arguments deleted file mode 100644 index 06dc87d..0000000 --- a/patches.suse/dmraid45-dm_get_device-takes-fewer-arguments +++ /dev/null @@ -1,26 +0,0 @@ -From: Jeff Mahoney -Subject: dmraid45: dm_get_device takes fewer arguments -Patch-mainline: Whenever dmraid45 is - - With 2.6.34-rc1, dm_get_device takes 4 args instead of 6. - -Signed-off-by: Jeff Mahoney -Acked-by: Jeff Mahoney ---- - drivers/md/dm-raid45.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - ---- a/drivers/md/dm-raid45.c -+++ b/drivers/md/dm-raid45.c -@@ -3810,9 +3810,8 @@ DMINFO("rs->set.sectors_per_dev=%llu", ( - TI_ERR("Invalid RAID device offset parameter"); - - dev->start = tmp; -- r = dm_get_device(ti, argv[0], dev->start, -- rs->set.sectors_per_dev, -- dm_table_get_mode(ti->table), &dev->dev); -+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), -+ &dev->dev); - if (r) - TI_ERR_RET("RAID device lookup failure", r); - diff --git a/patches.suse/elousb-2.6.35-api-changes b/patches.suse/elousb-2.6.35-api-changes deleted file mode 100644 index d73a4a9..0000000 --- a/patches.suse/elousb-2.6.35-api-changes +++ /dev/null @@ -1,51 +0,0 @@ -From: Jeff Mahoney -Subject: elousb: API Changes for 2.6.35 -Patch-mainline: Whenever the driver makes it upstream - - This patch contains API fixes for 2.6.35. - -Acked-by: Jeff Mahoney ---- - drivers/input/touchscreen/elousb.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - ---- a/drivers/input/touchscreen/elousb.c -+++ b/drivers/input/touchscreen/elousb.c -@@ -168,7 +168,7 @@ static int elousb_probe(struct usb_inter - if (!elo || !input_dev) - goto fail1; - -- elo->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &elo->data_dma); -+ elo->data = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &elo->data_dma); - if (!elo->data) - goto fail1; - -@@ -242,7 +242,9 @@ static int elousb_probe(struct usb_inter - elo->irq->transfer_dma = elo->data_dma; - elo->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - -- input_register_device(elo->dev); -+ error = input_register_device(elo->dev); -+ if (error) -+ goto fail4; - - usb_set_intfdata(intf, elo); - return 0; -@@ -252,7 +254,7 @@ fail4: - fail3: - usb_free_urb(elo->irq); - fail2: -- usb_buffer_free(dev, 8, elo->data, elo->data_dma); -+ usb_free_coherent(dev, 8, elo->data, elo->data_dma); - fail1: - input_free_device(input_dev); - kfree(elo); -@@ -268,7 +270,7 @@ static void elousb_disconnect(struct usb - usb_kill_urb(elo->irq); - input_unregister_device(elo->dev); - usb_free_urb(elo->irq); -- usb_buffer_free(interface_to_usbdev(intf), 8, elo->data, elo->data_dma); -+ usb_free_coherent(interface_to_usbdev(intf), 8, elo->data, elo->data_dma); - kfree(elo); - } - } diff --git a/patches.suse/export-release_open_intent b/patches.suse/export-release_open_intent deleted file mode 100644 index 9258456..0000000 --- a/patches.suse/export-release_open_intent +++ /dev/null @@ -1,23 +0,0 @@ -From: Jeff Mahoney -Subject: Export release_open_intent for NFS branches with aufs -Patch-mainline: never - - aufs requires a way to release an open intent when handling an error - condition after using NFSv4's atomic open. It was using put_filp, - but release_open_intent is more appropriate. - -Signed-off-by: Jeff Mahoney ---- - fs/namei.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -562,6 +562,7 @@ void release_open_intent(struct nameidat - fput(file); - } - } -+EXPORT_SYMBOL_GPL(release_open_intent); - - static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) - { diff --git a/patches.suse/export-security_inode_permission b/patches.suse/export-security_inode_permission deleted file mode 100644 index bca9a33..0000000 --- a/patches.suse/export-security_inode_permission +++ /dev/null @@ -1,21 +0,0 @@ -From: Jeff Mahoney -Subject: Export security_inode_permission for aufs -Patch-mainline: never - - This patch exports security_inode_permission for aufs. - -Signed-off-by: Jeff Mahoney ---- - security/security.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/security/security.c -+++ b/security/security.c -@@ -528,6 +528,7 @@ int security_inode_permission(struct ino - return 0; - return security_ops->inode_permission(inode, mask); - } -+EXPORT_SYMBOL_GPL(security_inode_permission); - - int security_inode_exec_permission(struct inode *inode, unsigned int flags) - { diff --git a/patches.suse/ext3-barrier-default b/patches.suse/ext3-barrier-default deleted file mode 100644 index 7041431..0000000 --- a/patches.suse/ext3-barrier-default +++ /dev/null @@ -1,77 +0,0 @@ -From: Chris Mason -Subject: make ext3 mount default to barrier=1 -Patch-mainline: probably never - -Hello everyone, - -This patch turns on barriers by default for ext3. mount -o barrier=0 -will turn them off. It also changes the ext3 fsync call to trigger a -barrier when a commit isn't done. - -It should be safe, but some extra review would be appreciated. - -Updated Apr 13 2009 jeffm: -- Added Kconfig option - -Acked-by: Jeff Mahoney - ---- - fs/ext3/Kconfig | 22 ++++++++++++++++++++++ - fs/ext3/fsync.c | 1 + - fs/ext3/super.c | 4 ++++ - 3 files changed, 27 insertions(+) - ---- a/fs/ext3/Kconfig -+++ b/fs/ext3/Kconfig -@@ -50,6 +50,28 @@ config EXT3_DEFAULTS_TO_ORDERED - privacy issues of data=writeback and are willing to make - that trade off, answer 'n'. - -+config EXT3_DEFAULTS_TO_BARRIERS_ENABLED -+ bool "Default to 'barrier=1' in ext3" -+ depends on EXT3_FS -+ help -+ Modern disk drives support write caches that can speed up writeback. -+ Some devices, in order to improve their performance statistics, -+ report that the write has been completed even when it has only -+ been committed to volatile cache memory. This can result in -+ severe corruption in the event of power loss. -+ -+ The -o barrier option enables the file system to direct the block -+ layer to issue a barrier, which ensures that the cache has been -+ flushed before proceeding. This can produce some slowdown in -+ certain environments, but allows higher end storage arrays with -+ battery-backed caches to report completes writes sooner than -+ would be otherwise possible. -+ -+ Without this option, disk write caches should be disabled if -+ you value data integrity over writeback performance. -+ -+ If unsure, say N. -+ - config EXT3_FS_XATTR - bool "Ext3 extended attributes" - depends on EXT3_FS ---- a/fs/ext3/fsync.c -+++ b/fs/ext3/fsync.c -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - #include - #include - ---- a/fs/ext3/super.c -+++ b/fs/ext3/super.c -@@ -1693,6 +1693,10 @@ static int ext3_fill_super (struct super - sbi->s_resuid = le16_to_cpu(es->s_def_resuid); - sbi->s_resgid = le16_to_cpu(es->s_def_resgid); - -+ /* enable barriers by default */ -+#ifdef CONFIG_EXT3_DEFAULTS_TO_BARRIERS_ENABLED -+ set_opt(sbi->s_mount_opt, BARRIER); -+#endif - set_opt(sbi->s_mount_opt, RESERVATION); - - if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, diff --git a/patches.suse/file-capabilities-disable-by-default.diff b/patches.suse/file-capabilities-disable-by-default.diff deleted file mode 100644 index 4b767cb..0000000 --- a/patches.suse/file-capabilities-disable-by-default.diff +++ /dev/null @@ -1,56 +0,0 @@ -From: Andreas Gruenbacher -Subject: Disable file capabilities by default -Patch-mainline: probably never - -Disable file capabilities by default: we are still lacking documentation -and file capability awareness in system management tools. - -Signed-off-by: Andreas Gruenbacher - ---- - Documentation/kernel-parameters.txt | 8 +++++++- - kernel/capability.c | 9 ++++++++- - 2 files changed, 15 insertions(+), 2 deletions(-) - ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -1659,7 +1659,13 @@ and is between 256 and 4096 characters. - - no_file_caps Tells the kernel not to honor file capabilities. The - only way then for a file to be executed with privilege -- is to be setuid root or executed by root. -+ is to be setuid root or executed by root. They -+ default to disabled. -+ -+ file_caps Tells the kernel to honor file capabilities. The -+ only way then for a file to be executed with privilege -+ is to be setuid root or executed by root. They default -+ to disabled. - - nohalt [IA-64] Tells the kernel not to use the power saving - function PAL_HALT_LIGHT when idle. This increases ---- a/kernel/capability.c -+++ b/kernel/capability.c -@@ -28,7 +28,7 @@ EXPORT_SYMBOL(__cap_empty_set); - EXPORT_SYMBOL(__cap_full_set); - EXPORT_SYMBOL(__cap_init_eff_set); - --int file_caps_enabled = 1; -+int file_caps_enabled; - - static int __init file_caps_disable(char *str) - { -@@ -37,6 +37,13 @@ static int __init file_caps_disable(char - } - __setup("no_file_caps", file_caps_disable); - -+static int __init file_caps_enable(char *str) -+{ -+ file_caps_enabled = 1; -+ return 1; -+} -+__setup("file_caps", file_caps_enable); -+ - /* - * More recent versions of libcap are available from: - * diff --git a/patches.suse/files-slab-rcu.patch b/patches.suse/files-slab-rcu.patch deleted file mode 100644 index 1e54d6d..0000000 --- a/patches.suse/files-slab-rcu.patch +++ /dev/null @@ -1,330 +0,0 @@ -From: Nick Piggin -Subject: SLAB_DESTROY_BY_RCU for file slab -Patch-mainline: not yet - -Use SLAB_DESTROY_BY_RCU for file slab cache. Ensure we have the correct -object by using a spinlock to protect the refcount rather than having it -atomic (problem with it being atomic is having to release the last ref -on a file we have incorrectly picked up a reference to). - -This improves single threaded repeated open/close performance by 28% by -avoiding the full RCU cycle. - -Signed-off-by: Nick Piggin ---- - drivers/net/ppp_generic.c | 4 +- - drivers/scsi/osst.c | 2 - - drivers/scsi/st.c | 2 - - fs/aio.c | 4 +- - fs/file_table.c | 70 +++++++++++++++++++++++----------------------- - fs/open.c | 2 - - include/linux/fs.h | 25 +++++++++++++--- - kernel/perf_event.c | 4 +- - net/sched/sch_atm.c | 4 +- - net/unix/garbage.c | 2 - - 10 files changed, 69 insertions(+), 50 deletions(-) - ---- a/drivers/net/ppp_generic.c -+++ b/drivers/net/ppp_generic.c -@@ -590,12 +590,12 @@ static long ppp_ioctl(struct file *file, - if (file == ppp->owner) - ppp_shutdown_interface(ppp); - } -- if (atomic_long_read(&file->f_count) <= 2) { -+ if (file->f_count <= 2) { - ppp_release(NULL, file); - err = 0; - } else - printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", -- atomic_long_read(&file->f_count)); -+ file->f_count); - unlock_kernel(); - return err; - } ---- a/drivers/scsi/osst.c -+++ b/drivers/scsi/osst.c -@@ -4824,7 +4824,7 @@ static int os_scsi_tape_flush(struct fil - struct osst_request * SRpnt = NULL; - char * name = tape_name(STp); - -- if (file_count(filp) > 1) -+ if (filp->f_count > 1) - return 0; - - if ((STps->rw == ST_WRITING || STp->dirty) && !STp->pos_unknown) { ---- a/drivers/scsi/st.c -+++ b/drivers/scsi/st.c -@@ -1272,7 +1272,7 @@ static int st_flush(struct file *filp, f - struct st_partstat *STps = &(STp->ps[STp->partition]); - char *name = tape_name(STp); - -- if (file_count(filp) > 1) -+ if (filp->f_count > 1) - return 0; - - if (STps->rw == ST_WRITING && !STp->pos_unknown) { ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -545,7 +545,7 @@ static void aio_fput_routine(struct work - static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) - { - dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", -- req, atomic_long_read(&req->ki_filp->f_count)); -+ req, req->ki_filp->f_count); - - assert_spin_locked(&ctx->ctx_lock); - -@@ -563,7 +563,7 @@ static int __aio_put_req(struct kioctx * - * we would not be holding the last reference to the file*, so - * this function will be executed w/out any aio kthread wakeup. - */ -- if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { -+ if (unlikely(file_dec_and_test(req->ki_filp))) { - get_ioctx(ctx); - spin_lock(&fput_lock); - list_add(&req->ki_list, &fput_head); ---- a/fs/file_table.c -+++ b/fs/file_table.c -@@ -40,19 +40,12 @@ static struct kmem_cache *filp_cachep __ - - static struct percpu_counter nr_files __cacheline_aligned_in_smp; - --static inline void file_free_rcu(struct rcu_head *head) --{ -- struct file *f = container_of(head, struct file, f_u.fu_rcuhead); -- -- put_cred(f->f_cred); -- kmem_cache_free(filp_cachep, f); --} -- - static inline void file_free(struct file *f) - { - percpu_counter_dec(&nr_files); - file_check_state(f); -- call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); -+ put_cred(f->f_cred); -+ kmem_cache_free(filp_cachep, f); - } - - /* -@@ -127,7 +120,7 @@ struct file *get_empty_filp(void) - goto fail_sec; - - INIT_LIST_HEAD(&f->f_u.fu_list); -- atomic_long_set(&f->f_count, 1); -+ f->f_count = 1; - rwlock_init(&f->f_owner.lock); - f->f_cred = get_cred(cred); - spin_lock_init(&f->f_lock); -@@ -196,7 +189,7 @@ EXPORT_SYMBOL(alloc_file); - - void fput(struct file *file) - { -- if (atomic_long_dec_and_test(&file->f_count)) -+ if (unlikely(file_dec_and_test(file))) - __fput(file); - } - -@@ -267,21 +260,38 @@ void __fput(struct file *file) - mntput(mnt); - } - --struct file *fget(unsigned int fd) -+static inline struct file *get_stable_file(struct files_struct *files, unsigned int fd) - { -+ struct fdtable *fdt; - struct file *file; -- struct files_struct *files = current->files; - - rcu_read_lock(); -- file = fcheck_files(files, fd); -- if (file) { -- if (!atomic_long_inc_not_zero(&file->f_count)) { -- /* File object ref couldn't be taken */ -- rcu_read_unlock(); -- return NULL; -+ fdt = files_fdtable(files); -+ if (likely(fd < fdt->max_fds)) { -+ file = rcu_dereference(fdt->fd[fd]); -+ if (file) { -+ spin_lock(&file->f_lock); -+ if (unlikely(file != fdt->fd[fd] || !file->f_count)) { -+ spin_unlock(&file->f_lock); -+ file = NULL; -+ goto out; -+ } -+ file->f_count++; -+ spin_unlock(&file->f_lock); - } -- } -+ } else -+ file = NULL; -+out: - rcu_read_unlock(); -+ return file; -+} -+ -+struct file *fget(unsigned int fd) -+{ -+ struct file *file; -+ struct files_struct *files = current->files; -+ -+ file = get_stable_file(files, fd); - - return file; - } -@@ -300,20 +310,12 @@ struct file *fget_light(unsigned int fd, - struct file *file; - struct files_struct *files = current->files; - -- *fput_needed = 0; - if (likely((atomic_read(&files->count) == 1))) { -+ *fput_needed = 0; - file = fcheck_files(files, fd); - } else { -- rcu_read_lock(); -- file = fcheck_files(files, fd); -- if (file) { -- if (atomic_long_inc_not_zero(&file->f_count)) -- *fput_needed = 1; -- else -- /* Didn't get the reference, someone's freed */ -- file = NULL; -- } -- rcu_read_unlock(); -+ *fput_needed = 1; -+ file = get_stable_file(files, fd); - } - - return file; -@@ -322,7 +324,7 @@ struct file *fget_light(unsigned int fd, - - void put_filp(struct file *file) - { -- if (atomic_long_dec_and_test(&file->f_count)) { -+ if (unlikely(file_dec_and_test(file))) { - security_file_free(file); - file_kill(file); - file_free(file); -@@ -388,7 +390,7 @@ retry: - struct vfsmount *mnt; - if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) - continue; -- if (!file_count(f)) -+ if (!f->f_count) - continue; - if (!(f->f_mode & FMODE_WRITE)) - continue; -@@ -414,7 +416,7 @@ void __init files_init(unsigned long mem - int n; - - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, -- SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); -+ SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU | SLAB_PANIC, NULL); - - /* - * One file with associated inode and dcache is very roughly 1K. ---- a/fs/open.c -+++ b/fs/open.c -@@ -1114,7 +1114,7 @@ int filp_close(struct file *filp, fl_own - { - int retval = 0; - -- if (!file_count(filp)) { -+ if (unlikely(!filp->f_count)) { - printk(KERN_ERR "VFS: Close: file count is 0\n"); - return 0; - } ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -920,9 +920,10 @@ struct file { - #define f_dentry f_path.dentry - #define f_vfsmnt f_path.mnt - const struct file_operations *f_op; -- spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ -- atomic_long_t f_count; -+ /* f_lock protects f_count, f_ep_links, f_flags, no IRQ */ -+ spinlock_t f_lock; - unsigned int f_flags; -+ long f_count; - fmode_t f_mode; - loff_t f_pos; - struct fown_struct f_owner; -@@ -949,8 +950,24 @@ extern spinlock_t files_lock; - #define file_list_lock() spin_lock(&files_lock); - #define file_list_unlock() spin_unlock(&files_lock); - --#define get_file(x) atomic_long_inc(&(x)->f_count) --#define file_count(x) atomic_long_read(&(x)->f_count) -+static inline void get_file(struct file *f) -+{ -+ spin_lock(&f->f_lock); -+ f->f_count++; -+ spin_unlock(&f->f_lock); -+} -+ -+static inline int file_dec_and_test(struct file *f) -+{ -+ int ret; -+ -+ spin_lock(&f->f_lock); -+ f->f_count--; -+ ret = (f->f_count == 0); -+ spin_unlock(&f->f_lock); -+ -+ return ret; -+} - - #ifdef CONFIG_DEBUG_WRITECOUNT - static inline void file_take_write(struct file *f) ---- a/kernel/perf_event.c -+++ b/kernel/perf_event.c -@@ -4617,7 +4617,7 @@ static int perf_event_set_output(struct - if (event->data) - goto out; - -- atomic_long_inc(&output_file->f_count); -+ get_file(output_file); - - set: - mutex_lock(&event->mmap_mutex); -@@ -4878,7 +4878,7 @@ inherit_event(struct perf_event *parent_ - * we are in the parent and we know that the filp still - * exists and has a nonzero count: - */ -- atomic_long_inc(&parent_event->filp->f_count); -+ get_file(parent_event->filp); - - /* - * Link this into the parent event's child list ---- a/net/sched/sch_atm.c -+++ b/net/sched/sch_atm.c -@@ -164,7 +164,7 @@ static void atm_tc_put(struct Qdisc *sch - tcf_destroy_chain(&flow->filter_list); - if (flow->sock) { - pr_debug("atm_tc_put: f_count %ld\n", -- file_count(flow->sock->file)); -+ flow->sock->file->f_count); - flow->vcc->pop = flow->old_pop; - sockfd_put(flow->sock); - } -@@ -260,7 +260,7 @@ static int atm_tc_change(struct Qdisc *s - sock = sockfd_lookup(fd, &error); - if (!sock) - return error; /* f_count++ */ -- pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file)); -+ pr_debug("atm_tc_change: f_count %ld\n", sock->file->f_count); - if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { - error = -EPROTOTYPE; - goto err_out; ---- a/net/unix/garbage.c -+++ b/net/unix/garbage.c -@@ -311,7 +311,7 @@ void unix_gc(void) - long total_refs; - long inflight_refs; - -- total_refs = file_count(u->sk.sk_socket->file); -+ total_refs = u->sk.sk_socket->file->f_count; - inflight_refs = atomic_long_read(&u->inflight); - - BUG_ON(inflight_refs < 1); diff --git a/patches.suse/genksyms-add-override-flag.diff b/patches.suse/genksyms-add-override-flag.diff deleted file mode 100644 index f716b7f..0000000 --- a/patches.suse/genksyms-add-override-flag.diff +++ /dev/null @@ -1,116 +0,0 @@ -From: Andreas Gruenbacher -Subject: genksyms: add --override flag -Patch-mainline: not yet - -Add --override flag to genksyms to allow overriding types with old -definitions using the 'override' keyword. This is similar to -p --preserve, -but it doesn't abort the build if a symtype cannot be preserved - -[mmarek: added KBUILD_OVERRIDE env var to set this globally for the entire - build] ---- - scripts/Makefile.build | 1 + - scripts/genksyms/genksyms.c | 21 +++++++++++++++------ - 2 files changed, 16 insertions(+), 6 deletions(-) - ---- a/scripts/Makefile.build -+++ b/scripts/Makefile.build -@@ -161,6 +161,7 @@ cmd_gensymtypes = - $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ - $(GENKSYMS) $(if $(1), -T $(2)) -a $(ARCH) \ - $(if $(KBUILD_PRESERVE),-p) \ -+ $(if $(KBUILD_OVERRIDE),-o) \ - -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) - - quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ ---- a/scripts/genksyms/genksyms.c -+++ b/scripts/genksyms/genksyms.c -@@ -43,7 +43,7 @@ int cur_line = 1; - char *cur_filename; - - static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, -- flag_preserve, flag_warnings; -+ flag_override, flag_preserve, flag_warnings; - static const char *arch = ""; - static const char *mod_prefix = ""; - -@@ -200,7 +200,7 @@ static struct symbol *__add_symbol(const - sym->is_declared = 1; - return sym; - } else if (!sym->is_declared) { -- if (sym->is_override && flag_preserve) { -+ if (sym->is_override && flag_override) { - print_location(); - fprintf(stderr, "ignoring "); - print_type_name(type, name); -@@ -586,11 +586,13 @@ void export_symbol(const char *name) - struct symbol *n = sym->expansion_trail; - - if (sym->status != STATUS_UNCHANGED) { -+ int fail = sym->is_override && flag_preserve; -+ - if (!has_changed) { - print_location(); - fprintf(stderr, "%s: %s: modversion " - "changed because of changes " -- "in ", flag_preserve ? "error" : -+ "in ", fail ? "error" : - "warning", name); - } else - fprintf(stderr, ", "); -@@ -598,7 +600,7 @@ void export_symbol(const char *name) - if (sym->status == STATUS_DEFINED) - fprintf(stderr, " (became defined)"); - has_changed = 1; -- if (flag_preserve) -+ if (fail) - errors++; - } - sym->expansion_trail = 0; -@@ -655,6 +657,7 @@ static void genksyms_usage(void) - " -D, --dump Dump expanded symbol defs (for debugging only)\n" - " -r, --reference file Read reference symbols from a file\n" - " -T, --dump-types file Dump expanded types into file\n" -+ " -o, --override Allow to override reference modversions\n" - " -p, --preserve Preserve reference modversions or fail\n" - " -w, --warnings Enable warnings\n" - " -q, --quiet Disable warnings (default)\n" -@@ -666,6 +669,7 @@ static void genksyms_usage(void) - " -D Dump expanded symbol defs (for debugging only)\n" - " -r file Read reference symbols from a file\n" - " -T file Dump expanded types into file\n" -+ " -o Allow to override reference modversions\n" - " -p Preserve reference modversions or fail\n" - " -w Enable warnings\n" - " -q Disable warnings (default)\n" -@@ -690,15 +694,16 @@ int main(int argc, char **argv) - {"reference", 1, 0, 'r'}, - {"dump-types", 1, 0, 'T'}, - {"preserve", 0, 0, 'p'}, -+ {"override", 0, 0, 'o'}, - {"version", 0, 0, 'V'}, - {"help", 0, 0, 'h'}, - {0, 0, 0, 0} - }; - -- while ((o = getopt_long(argc, argv, "a:dwqVDr:T:ph", -+ while ((o = getopt_long(argc, argv, "a:dwqVDr:T:oph", - &long_opts[0], NULL)) != EOF) - #else /* __GNU_LIBRARY__ */ -- while ((o = getopt(argc, argv, "a:dwqVDr:T:ph")) != EOF) -+ while ((o = getopt(argc, argv, "a:dwqVDr:T:oph")) != EOF) - #endif /* __GNU_LIBRARY__ */ - switch (o) { - case 'a': -@@ -735,7 +740,11 @@ int main(int argc, char **argv) - return 1; - } - break; -+ case 'o': -+ flag_override = 1; -+ break; - case 'p': -+ flag_override = 1; - flag_preserve = 1; - break; - case 'h': diff --git a/patches.suse/hung_task_timeout-configurable-default b/patches.suse/hung_task_timeout-configurable-default deleted file mode 100644 index 989a0e0..0000000 --- a/patches.suse/hung_task_timeout-configurable-default +++ /dev/null @@ -1,54 +0,0 @@ -From: Jeff Mahoney -Subject: hung_task_timeout: configurable default -References: bnc#552820 -Patch-mainline: not yet - - This patch allows the default value for sysctl_hung_task_timeout_secs - to be set at build time. The feature carries virtually no overhead, - so it makes sense to keep it enabled. On heavily loaded systems, though, - it can end up triggering stack traces when there is no bug other than - the system being underprovisioned. - - The old default of 120 seconds is preserved. - -Signed-off-by: Jeff Mahoney ---- - kernel/hung_task.c | 3 ++- - lib/Kconfig.debug | 14 ++++++++++++++ - 2 files changed, 16 insertions(+), 1 deletion(-) - ---- a/kernel/hung_task.c -+++ b/kernel/hung_task.c -@@ -33,7 +33,8 @@ unsigned long __read_mostly sysctl_hung_ - /* - * Zero means infinite timeout - no checking done: - */ --unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; -+unsigned long __read_mostly sysctl_hung_task_timeout_secs = -+ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; - - unsigned long __read_mostly sysctl_hung_task_warnings = 10; - ---- a/lib/Kconfig.debug -+++ b/lib/Kconfig.debug -@@ -214,6 +214,20 @@ config DETECT_HUNG_TASK - enabled then all held locks will also be reported. This - feature has negligible overhead. - -+config DEFAULT_HUNG_TASK_TIMEOUT -+ int "Default timeout for hung task detection (in seconds)" -+ depends on DETECT_HUNG_TASK -+ default 120 -+ help -+ This option controls the default timeout (in seconds) used -+ to determine when a task has become non-responsive and should -+ be considered hung. -+ -+ It can be adjusted at runtime via the kernel.hung_task_timeout -+ sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout. -+ -+ A timeout of 0 disables the check. The default is 120 seconds. -+ - config BOOTPARAM_HUNG_TASK_PANIC - bool "Panic (Reboot) On Hung Tasks" - depends on DETECT_HUNG_TASK diff --git a/patches.suse/init-move-populate_rootfs-back-to-start_kernel b/patches.suse/init-move-populate_rootfs-back-to-start_kernel deleted file mode 100644 index 0a1a0ec..0000000 --- a/patches.suse/init-move-populate_rootfs-back-to-start_kernel +++ /dev/null @@ -1,111 +0,0 @@ -From: Jeff Mahoney -Subject: init: move populate_rootfs back to start_kernel -References: bnc#533555 -Patch-mainline: Probably never - - Mainline commit 8d610dd5 introduced the rootfs_initcall and moved - populate_rootfs out of start_kernel. This was because of issues - with userspace helpers being executed way too early. Things like - pipes weren't initialized yet and users were seeing Oopses or - unpredictable behavior in certain circumstances. - - The fix worked by causing the execve to fail because it couldn't lookup - the helper in the file system since the file system wasn't populate yet. - It turns out that's a really late place to fail since the entire - usermodehelper infrastructure depends on a work queue that is already - checked to see if it has been initialized. We can fail earlier without - having to fork threads that will ultimately fail. - - This patch moves populate_rootfs back to start_kernel and avoids the - race against a very early userspace by moving the initialization of - khelper_wq to rootfs_initcall. - - This may seem like a small win, but it is essential for my next patch - which adds the ability to override ACPI tables at boot-time. - -Signed-off-by: Jeff Mahoney ---- - include/linux/init.h | 1 + - include/linux/kmod.h | 2 -- - init/initramfs.c | 3 +-- - init/main.c | 10 +++++++++- - kernel/kmod.c | 4 +++- - 5 files changed, 14 insertions(+), 6 deletions(-) - ---- a/include/linux/init.h -+++ b/include/linux/init.h -@@ -146,6 +146,7 @@ extern unsigned int reset_devices; - /* used by init/main.c */ - void setup_arch(char **); - void prepare_namespace(void); -+int populate_rootfs(void); - - extern void (*late_time_init)(void); - ---- a/include/linux/kmod.h -+++ b/include/linux/kmod.h -@@ -109,8 +109,6 @@ call_usermodehelper(char *path, char **a - NULL, NULL, NULL); - } - --extern void usermodehelper_init(void); -- - extern int usermodehelper_disable(void); - extern void usermodehelper_enable(void); - ---- a/init/initramfs.c -+++ b/init/initramfs.c -@@ -714,7 +714,7 @@ static void __init clean_rootfs(void) - } - #endif - --static int __init populate_rootfs(void) -+int __init populate_rootfs(void) - { - char *err = unpack_to_rootfs(__initramfs_start, - __initramfs_end - __initramfs_start); -@@ -754,4 +754,3 @@ static int __init populate_rootfs(void) - } - return 0; - } --rootfs_initcall(populate_rootfs); ---- a/init/main.c -+++ b/init/main.c -@@ -696,6 +696,15 @@ asmlinkage void __init start_kernel(void - - check_bugs(); - -+ /* -+ * Do this before starting ACPI so we can read-in -+ * override tables before the tables are actually -+ * loaded. The usermode helper won't be initialized -+ * until much later so we don't race against things -+ * calling out to userspace. -+ */ -+ populate_rootfs(); -+ - acpi_early_init(); /* before LAPIC and SMP init */ - sfi_init_late(); - -@@ -793,7 +802,6 @@ static void __init do_initcalls(void) - static void __init do_basic_setup(void) - { - cpuset_init_smp(); -- usermodehelper_init(); - init_tmpfs(); - driver_init(); - init_irq_proc(); ---- a/kernel/kmod.c -+++ b/kernel/kmod.c -@@ -416,8 +416,10 @@ unlock: - } - EXPORT_SYMBOL(call_usermodehelper_exec); - --void __init usermodehelper_init(void) -+static int __init usermodehelper_init(void) - { - khelper_wq = create_singlethread_workqueue("khelper"); - BUG_ON(!khelper_wq); -+ return 0; - } -+rootfs_initcall(usermodehelper_init); diff --git a/patches.suse/kbd-ignore-gfx.patch b/patches.suse/kbd-ignore-gfx.patch deleted file mode 100644 index f0b44bd..0000000 --- a/patches.suse/kbd-ignore-gfx.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Dirk Mueller -Subject: setfont breaks first Xserver start -References: 302010 -Patch-Mainline: No - -The patch prevents setfont from accessing vga registers on the card when -the card is in graphics mode KD_GRAPHICS as we assume, that someone else (ie. -the Xserver) is in charge of the HW in which case accessing the vga registers -may (at best) have no effect (not even the desired one) or (at worst) interfer -with settings the graphics driver has made. - -Signed-off-by: Hannes Reinecke - ---- - drivers/video/console/vgacon.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/video/console/vgacon.c -+++ b/drivers/video/console/vgacon.c -@@ -1259,7 +1259,7 @@ static int vgacon_font_set(struct vc_dat - unsigned charcount = font->charcount; - int rc; - -- if (vga_video_type < VIDEO_TYPE_EGAM) -+ if (vga_video_type < VIDEO_TYPE_EGAM || vga_is_gfx) - return -EINVAL; - - if (font->width != VGA_FONTWIDTH || -@@ -1277,7 +1277,7 @@ static int vgacon_font_set(struct vc_dat - - static int vgacon_font_get(struct vc_data *c, struct console_font *font) - { -- if (vga_video_type < VIDEO_TYPE_EGAM) -+ if (vga_video_type < VIDEO_TYPE_EGAM || vga_is_gfx) - return -EINVAL; - - font->width = VGA_FONTWIDTH; diff --git a/patches.suse/kconfig-automate-kernel-desktop b/patches.suse/kconfig-automate-kernel-desktop deleted file mode 100644 index 5b1dccf..0000000 --- a/patches.suse/kconfig-automate-kernel-desktop +++ /dev/null @@ -1,54 +0,0 @@ -From: Suresh Jayaraman -Subject: [PATCH] automate config options for kernel-desktop -References: FATE#305694 -Patch-mainline: Never - -Automate the desktop only kernel configuration options with the new -CONFIG_KERNEL_DESKTOP. - -Signed-off-by: Suresh Jayaraman ---- - init/Kconfig | 5 ++++- - kernel/Kconfig.hz | 1 + - kernel/Kconfig.preempt | 1 + - 3 files changed, 6 insertions(+), 1 deletion(-) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -537,6 +537,7 @@ config HAVE_UNSTABLE_SCHED_CLOCK - menuconfig CGROUPS - boolean "Control Group support" - depends on EVENTFD -+ default !KERNEL_DESKTOP - help - This option adds support for grouping sets of processes together, for - use with process control subsystems such as Cpusets, CFS, memory -@@ -651,7 +653,7 @@ config CGROUP_MEM_RES_CTLR_SWAP - menuconfig CGROUP_SCHED - bool "Group CPU scheduler" - depends on EXPERIMENTAL -- default n -+ default !KERNEL_DESKTOP - help - This feature lets CPU scheduler recognize task groups and control CPU - bandwidth allocation to such task groups. It uses cgroups to group ---- a/kernel/Kconfig.hz -+++ b/kernel/Kconfig.hz -@@ -4,6 +4,7 @@ - - choice - prompt "Timer frequency" -+ default HZ_1000 if KERNEL_DESKTOP - default HZ_250 - help - Allows the configuration of the timer frequency. It is customary ---- a/kernel/Kconfig.preempt -+++ b/kernel/Kconfig.preempt -@@ -1,6 +1,7 @@ - - choice - prompt "Preemption Model" -+ default PREEMPT if KERNEL_DESKTOP - default PREEMPT_NONE - - config PREEMPT_NONE diff --git a/patches.suse/kdump-dump_after_notifier.patch b/patches.suse/kdump-dump_after_notifier.patch deleted file mode 100644 index 7c58853..0000000 --- a/patches.suse/kdump-dump_after_notifier.patch +++ /dev/null @@ -1,136 +0,0 @@ -From: Takenori Nagano -Subject: [PATCH] Add dump_after_notifier sysctl -Patch-mainline: never -References: 265764 - -This patch adds dump_after_notifier sysctl to execute kdump after the notifier -call chain. This basically makes it possible to execute KDB before kdump. - -Signed-off-by: Takenori Nagano -Acked-by: Bernhard Walle - ---- - include/linux/kexec.h | 2 ++ - include/linux/sysctl.h | 1 + - kernel/kexec.c | 29 +++++++++++++++++++++++++++++ - kernel/panic.c | 5 ++++- - kernel/sysctl_check.c | 1 + - 5 files changed, 37 insertions(+), 1 deletion(-) - ---- a/include/linux/kexec.h -+++ b/include/linux/kexec.h -@@ -158,6 +158,7 @@ unsigned long paddr_vmcoreinfo_note(void - - extern struct kimage *kexec_image; - extern struct kimage *kexec_crash_image; -+extern int dump_after_notifier; - - #ifndef kexec_flush_icache_page - #define kexec_flush_icache_page(page) -@@ -212,5 +213,6 @@ struct pt_regs; - struct task_struct; - static inline void crash_kexec(struct pt_regs *regs) { } - static inline int kexec_should_crash(struct task_struct *p) { return 0; } -+#define dump_after_notifier 0 - #endif /* CONFIG_KEXEC */ - #endif /* LINUX_KEXEC_H */ ---- a/include/linux/sysctl.h -+++ b/include/linux/sysctl.h -@@ -162,6 +162,7 @@ enum - KERN_MAX_LOCK_DEPTH=74, - KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ - KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ -+ KERN_DUMP_AFTER_NOTIFIER=78, /* int: kdump after panic_notifier (SUSE only) */ - KERN_PANIC_ON_IO_NMI=79, /* int: whether we will panic on an io NMI */ - }; - ---- a/kernel/kexec.c -+++ b/kernel/kexec.c -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -46,6 +47,7 @@ - - /* Per cpu memory for storing cpu states in case of system crash. */ - note_buf_t* crash_notes; -+int dump_after_notifier; - - /* vmcoreinfo stuff */ - static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; -@@ -1152,6 +1154,30 @@ void crash_save_cpu(struct pt_regs *regs - final_note(buf); - } - -+#ifdef CONFIG_SYSCTL -+static ctl_table dump_after_notifier_table[] = { -+ { -+ .ctl_name = KERN_DUMP_AFTER_NOTIFIER, -+ .procname = "dump_after_notifier", -+ .data = &dump_after_notifier, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec, -+ }, -+ { .ctl_name = 0 } -+}; -+ -+static ctl_table kexec_sys_table[] = { -+ { -+ .ctl_name = CTL_KERN, -+ .procname = "kernel", -+ .mode = 0555, -+ .child = dump_after_notifier_table, -+ }, -+ { .ctl_name = 0 } -+}; -+#endif -+ - static int __init crash_notes_memory_init(void) - { - /* Allocate memory for saving cpu registers. */ -@@ -1161,6 +1187,9 @@ static int __init crash_notes_memory_ini - " states failed\n"); - return -ENOMEM; - } -+#ifdef CONFIG_SYSCTL -+ register_sysctl_table(kexec_sys_table); -+#endif - return 0; - } - module_init(crash_notes_memory_init) ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -87,7 +87,8 @@ NORET_TYPE void panic(const char * fmt, - * everything else. - * Do we want to call this before we try to display a message? - */ -- crash_kexec(NULL); -+ if (!dump_after_notifier) -+ crash_kexec(NULL); - - /* - * Note smp_send_stop is the usual smp shutdown function, which -@@ -98,6 +99,8 @@ NORET_TYPE void panic(const char * fmt, - - atomic_notifier_call_chain(&panic_notifier_list, 0, buf); - -+ crash_kexec(NULL); -+ - bust_spinlocks(0); - - if (!panic_blink) ---- a/kernel/sysctl_check.c -+++ b/kernel/sysctl_check.c -@@ -106,6 +106,7 @@ static const struct trans_ctl_table tran - { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, - { KERN_PANIC_ON_IO_NMI, "panic_on_io_nmi" }, - { KERN_SETUID_DUMPABLE, "suid_dumpable" }, -+ { KERN_DUMP_AFTER_NOTIFIER, "dump_after_notifier" }, - {} - }; - diff --git a/patches.suse/led_classdev.sysfs-name.patch b/patches.suse/led_classdev.sysfs-name.patch deleted file mode 100644 index 8291e30..0000000 --- a/patches.suse/led_classdev.sysfs-name.patch +++ /dev/null @@ -1,22 +0,0 @@ -Subject: use correct name for /sys/devices/virtual/leds/ entries -From: olh@suse.de -References: 468350 -Patch-mainline: not yet - -the low hanging fruits - ---- - drivers/leds/ledtrig-default-on.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/leds/ledtrig-default-on.c -+++ b/drivers/leds/ledtrig-default-on.c -@@ -23,7 +23,7 @@ static void defon_trig_activate(struct l - } - - static struct led_trigger defon_led_trigger = { -- .name = "default-on", -+ .name = "default::on", - .activate = defon_trig_activate, - }; - diff --git a/patches.suse/linux-2.6.29-dont-wait-for-mouse.patch b/patches.suse/linux-2.6.29-dont-wait-for-mouse.patch deleted file mode 100644 index c5fd2b4..0000000 --- a/patches.suse/linux-2.6.29-dont-wait-for-mouse.patch +++ /dev/null @@ -1,46 +0,0 @@ -From dce8113d033975f56630cf6d2a6a908cfb66059d Mon Sep 17 00:00:00 2001 -From: Arjan van de Ven -Date: Sun, 20 Jul 2008 13:12:16 -0700 -Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay -Patch-mainline: not yet - -In the non-initrd case, we wait for all devices to finish their -probing before we try to mount the rootfs. -In practice, this means that we end up waiting 2 extra seconds for -the PS/2 mouse probing even though the root holding device has been -ready since a long time. - -The previous two patches in this series made the RAID autodetect code -do it's own "wait for probing to be done" code, and added -"wait and retry" functionality in case the root device isn't actually -available. - -These two changes should make it safe to remove the delay itself, -and this patch does this. On my test laptop, this reduces the boot time -by 2 seconds (kernel time goes from 3.9 to 1.9 seconds). - -Signed-off-by: Arjan van de Ven -Signed-off-by: Greg Kroah-Hartman ---- - init/do_mounts.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/init/do_mounts.c -+++ b/init/do_mounts.c -@@ -373,6 +373,7 @@ void __init prepare_namespace(void) - ssleep(root_delay); - } - -+#if 0 - /* - * wait for the known devices to complete their probing - * -@@ -381,6 +382,8 @@ void __init prepare_namespace(void) - * for the touchpad of a laptop to initialize. - */ - wait_for_device_probe(); -+#endif -+ async_synchronize_full(); - - md_run_setup(); - diff --git a/patches.suse/linux-2.6.29-even-faster-kms.patch b/patches.suse/linux-2.6.29-even-faster-kms.patch deleted file mode 100644 index 5b5bff2..0000000 --- a/patches.suse/linux-2.6.29-even-faster-kms.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: someone at intel -Subject: speed up kms even more - - -Signed-off-by: Greg Kroah-Hartman - ---- - drivers/gpu/drm/i915/intel_lvds.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/gpu/drm/i915/intel_lvds.c -+++ b/drivers/gpu/drm/i915/intel_lvds.c -@@ -551,7 +551,7 @@ static void intel_lvds_prepare(struct dr - dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & - BACKLIGHT_DUTY_CYCLE_MASK); - -- intel_lvds_set_power(dev, false); -+// intel_lvds_set_power(dev, false); - } - - static void intel_lvds_commit( struct drm_encoder *encoder) -@@ -563,7 +563,7 @@ static void intel_lvds_commit( struct dr - dev_priv->backlight_duty_cycle = - intel_lvds_get_max_backlight(dev); - -- intel_lvds_set_power(dev, true); -+// intel_lvds_set_power(dev, true); - } - - static void intel_lvds_mode_set(struct drm_encoder *encoder, diff --git a/patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch b/patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch deleted file mode 100644 index c773be5..0000000 --- a/patches.suse/linux-2.6.29-jbd-longer-commit-interval.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 0143f8eb8afcaccba5a78196fb3db4361e0097a7 Mon Sep 17 00:00:00 2001 -From: Arjan van de Ven -Date: Mon, 9 Feb 2009 21:25:32 -0800 -Subject: [PATCH] jbd: longer commit interval -Patch-mainline: not yet - -... 5 seconds is rather harsh on ssd's.. - -Signed-off-by: Arjan van de Ven -Signed-off-by: Greg Kroah-Hartman - ---- - include/linux/jbd.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/include/linux/jbd.h -+++ b/include/linux/jbd.h -@@ -47,7 +47,7 @@ - /* - * The default maximum commit age, in seconds. - */ --#define JBD_DEFAULT_MAX_COMMIT_AGE 5 -+#define JBD_DEFAULT_MAX_COMMIT_AGE 15 - - #ifdef CONFIG_JBD_DEBUG - /* diff --git a/patches.suse/linux-2.6.29-kms-after-sata.patch b/patches.suse/linux-2.6.29-kms-after-sata.patch deleted file mode 100644 index 31db509..0000000 --- a/patches.suse/linux-2.6.29-kms-after-sata.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: someone at intel -Subject: make kms happen after sata -Patch-mainline: not yet - -I guess this speeds something up, would be nice if we had some descriptions here... - - -Signed-off-by: Greg Kroah-Hartman - - ---- - drivers/Makefile | 14 +++++++------- - 1 file changed, 7 insertions(+), 7 deletions(-) - ---- a/drivers/Makefile -+++ b/drivers/Makefile -@@ -28,15 +28,8 @@ obj-$(CONFIG_REGULATOR) += regulator/ - obj-y += tty/ - obj-y += char/ - --# gpu/ comes after char for AGP vs DRM startup --obj-y += gpu/ -- - obj-$(CONFIG_CONNECTOR) += connector/ - --# i810fb and intelfb depend on char/agp/ --obj-$(CONFIG_FB_I810) += video/i810/ --obj-$(CONFIG_FB_INTEL) += video/intelfb/ -- - obj-$(CONFIG_PARPORT) += parport/ - obj-y += base/ block/ misc/ mfd/ nfc/ - obj-$(CONFIG_NUBUS) += nubus/ -@@ -48,6 +41,13 @@ obj-$(CONFIG_ATA) += ata/ - obj-$(CONFIG_MTD) += mtd/ - obj-$(CONFIG_SPI) += spi/ - obj-y += net/ -+ -+# gpu/ comes after char for AGP vs DRM startup -+obj-y += gpu/ -+# i810fb and intelfb depend on char/agp/ -+obj-$(CONFIG_FB_I810) += video/i810/ -+obj-$(CONFIG_FB_INTEL) += video/intelfb/ -+ - obj-$(CONFIG_ATM) += atm/ - obj-$(CONFIG_FUSION) += message/ - obj-y += firewire/ diff --git a/patches.suse/linux-2.6.29-touchkit.patch b/patches.suse/linux-2.6.29-touchkit.patch deleted file mode 100644 index 52c5d87..0000000 --- a/patches.suse/linux-2.6.29-touchkit.patch +++ /dev/null @@ -1,135 +0,0 @@ -From: someone at intel -Subject: some new touch screen device ids -Patch-mainline: not yet - - -Signed-off-by: Greg Kroah-Hartman - ---- - drivers/input/mouse/psmouse-base.c | 9 +++++++ - drivers/input/mouse/psmouse.h | 1 - drivers/input/mouse/touchkit_ps2.c | 45 +++++++++++++++++++++++++++++++++++-- - drivers/input/mouse/touchkit_ps2.h | 6 ++++ - 4 files changed, 59 insertions(+), 2 deletions(-) - ---- a/drivers/input/mouse/psmouse-base.c -+++ b/drivers/input/mouse/psmouse-base.c -@@ -708,6 +708,9 @@ static int psmouse_extensions(struct psm - - if (touchkit_ps2_detect(psmouse, set_properties) == 0) - return PSMOUSE_TOUCHKIT_PS2; -+ -+ if (elftouch_ps2_detect(psmouse, set_properties) == 0) -+ return PSMOUSE_ELFTOUCH_PS2; - } - - /* -@@ -836,6 +839,12 @@ static const struct psmouse_protocol psm - .alias = "trackpoint", - .detect = trackpoint_detect, - }, -+ { -+ .type = PSMOUSE_ELFTOUCH_PS2, -+ .name = "elftouchPS2", -+ .alias = "elftouch", -+ .detect = elftouch_ps2_detect, -+ }, - #endif - #ifdef CONFIG_MOUSE_PS2_TOUCHKIT - { ---- a/drivers/input/mouse/psmouse.h -+++ b/drivers/input/mouse/psmouse.h -@@ -90,6 +90,7 @@ enum psmouse_type { - PSMOUSE_TRACKPOINT, - PSMOUSE_TOUCHKIT_PS2, - PSMOUSE_CORTRON, -+ PSMOUSE_ELFTOUCH_PS2, - PSMOUSE_HGPK, - PSMOUSE_ELANTECH, - PSMOUSE_FSP, ---- a/drivers/input/mouse/touchkit_ps2.c -+++ b/drivers/input/mouse/touchkit_ps2.c -@@ -50,6 +50,11 @@ - #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2]) - #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4]) - -+#define ELFTOUCH_MAX_XC 0x0fff -+#define ELFTOUCH_MAX_YC 0x0fff -+#define ELFTOUCH_GET_X(packet) (((packet)[3] << 7) | (packet)[4]) -+#define ELFTOUCH_GET_Y(packet) (((packet)[1] << 7) | (packet)[2]) -+ - static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse) - { - unsigned char *packet = psmouse->packet; -@@ -58,9 +63,15 @@ static psmouse_ret_t touchkit_ps2_proces - if (psmouse->pktcnt != 5) - return PSMOUSE_GOOD_DATA; - -- input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet)); -- input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet)); -+ if(psmouse->type==PSMOUSE_ELFTOUCH_PS2) { -+ input_report_abs(dev, ABS_X, ELFTOUCH_GET_X(packet)); -+ input_report_abs(dev, ABS_Y, ELFTOUCH_GET_Y(packet)); -+ } else { -+ input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet)); -+ input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet)); -+ } - input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet)); -+ - input_sync(dev); - - return PSMOUSE_FULL_PACKET; -@@ -98,3 +109,33 @@ int touchkit_ps2_detect(struct psmouse * - - return 0; - } -+ -+int elftouch_ps2_detect(struct psmouse *psmouse, bool set_properties) -+{ -+ struct input_dev *dev = psmouse->dev; -+ unsigned char param[16]; -+ int command, res; -+ -+ param[0]=0x0f4; -+ command = TOUCHKIT_SEND_PARMS(1, 0, TOUCHKIT_CMD); -+ res=ps2_command(&psmouse->ps2dev, param, command); -+ if(res) { return -ENODEV; } -+ -+ param[0]=0x0b0; -+ command = TOUCHKIT_SEND_PARMS(1, 1, TOUCHKIT_CMD); -+ res=ps2_command(&psmouse->ps2dev, param, command); -+ if(res) { return -ENODEV; } -+ -+ if (set_properties) { -+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); -+ set_bit(BTN_TOUCH, dev->keybit); -+ input_set_abs_params(dev, ABS_X, 0, ELFTOUCH_MAX_XC, 0, 0); -+ input_set_abs_params(dev, ABS_Y, 0, ELFTOUCH_MAX_YC, 0, 0); -+ -+ psmouse->vendor = "ElfTouch"; -+ psmouse->name = "Touchscreen"; -+ psmouse->protocol_handler = touchkit_ps2_process_byte; -+ psmouse->pktsize = 5; -+ } -+ return 0; -+} ---- a/drivers/input/mouse/touchkit_ps2.h -+++ b/drivers/input/mouse/touchkit_ps2.h -@@ -14,11 +14,17 @@ - - #ifdef CONFIG_MOUSE_PS2_TOUCHKIT - int touchkit_ps2_detect(struct psmouse *psmouse, bool set_properties); -+int elftouch_ps2_detect(struct psmouse *psmouse, bool set_properties); - #else - static inline int touchkit_ps2_detect(struct psmouse *psmouse, - bool set_properties) - { - return -ENOSYS; -+} -+static inline int elftouch_ps2_detect(struct psmouse *psmouse, -+ bool set_properties) -+{ -+ return -ENOSYS; - } - #endif /* CONFIG_MOUSE_PS2_TOUCHKIT */ - diff --git a/patches.suse/mm-devzero-optimisation.patch b/patches.suse/mm-devzero-optimisation.patch deleted file mode 100644 index 53ed0fa..0000000 --- a/patches.suse/mm-devzero-optimisation.patch +++ /dev/null @@ -1,260 +0,0 @@ -From: Nick Piggin -Subject: mm: /dev/zero optimisation -References: bnc#430738 -Patch-mainline: no (could be submit) - -Patch for removal of ZERO_PAGE from main VM paths also removed the -/dev/zero optimisation to map directly from ZERO_PAGE when doing -mmap() and also the interesting read(2) "hack" where the MMU was -used to make zero-filling the target buffer zero-copy. - -Some benchmarks have run into issues with this. Customers sometimes -use these benchmarks to qualify and test systems, so even if the -benchmarks themselves are "stupid", it saves some trouble to retain -this optimisation for them. Also, while I don't think it was established -that there is a "real" workload where this helps, but it can't be proven -that one does not exist. - -Signed-off-by: Nick Piggin ---- - drivers/char/mem.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++++++- - include/linux/mm.h | 2 + - mm/memory.c | 87 +++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 193 insertions(+), 1 deletion(-) - ---- a/drivers/char/mem.c -+++ b/drivers/char/mem.c -@@ -639,6 +639,100 @@ static ssize_t splice_write_null(struct - return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); - } - -+#if 1 //ndef CONFIG_XEN -+/* -+ * For fun, we are using the MMU for this. -+ */ -+static inline size_t read_zero_pagealigned(char __user * buf, size_t size) -+{ -+ struct mm_struct *mm; -+ struct vm_area_struct * vma; -+ unsigned long addr=(unsigned long)buf; -+ -+ mm = current->mm; -+ /* Oops, this was forgotten before. -ben */ -+ down_read(&mm->mmap_sem); -+ -+ /* For private mappings, just map in zero pages. */ -+ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { -+ unsigned long count; -+ -+ if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) -+ goto out_up; -+ if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) -+ break; -+ count = vma->vm_end - addr; -+ if (count > size) -+ count = size; -+ -+ zap_page_range(vma, addr, count, NULL); -+ if (zeromap_page_range(vma, addr, count, PAGE_COPY)) -+ break; -+ -+ size -= count; -+ buf += count; -+ addr += count; -+ if (size == 0) -+ goto out_up; -+ } -+ -+ up_read(&mm->mmap_sem); -+ -+ /* The shared case is hard. Let's do the conventional zeroing. */ -+ do { -+ unsigned long unwritten = clear_user(buf, PAGE_SIZE); -+ if (unwritten) -+ return size + unwritten - PAGE_SIZE; -+ cond_resched(); -+ buf += PAGE_SIZE; -+ size -= PAGE_SIZE; -+ } while (size); -+ -+ return size; -+out_up: -+ up_read(&mm->mmap_sem); -+ return size; -+} -+ -+static ssize_t read_zero(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ unsigned long left, unwritten, written = 0; -+ -+ if (!count) -+ return 0; -+ -+ if (!access_ok(VERIFY_WRITE, buf, count)) -+ return -EFAULT; -+ -+ left = count; -+ -+ /* do we want to be clever? Arbitrary cut-off */ -+ if (count >= PAGE_SIZE*4) { -+ unsigned long partial; -+ -+ /* How much left of the page? */ -+ partial = (PAGE_SIZE-1) & -(unsigned long) buf; -+ unwritten = clear_user(buf, partial); -+ written = partial - unwritten; -+ if (unwritten) -+ goto out; -+ left -= partial; -+ buf += partial; -+ unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); -+ written += (left & PAGE_MASK) - unwritten; -+ if (unwritten) -+ goto out; -+ buf += left & PAGE_MASK; -+ left &= ~PAGE_MASK; -+ } -+ unwritten = clear_user(buf, left); -+ written += left - unwritten; -+out: -+ return written ? written : -EFAULT; -+} -+ -+#else /* CONFIG_XEN */ - static ssize_t read_zero(struct file *file, char __user *buf, - size_t count, loff_t *ppos) - { -@@ -669,15 +763,24 @@ static ssize_t read_zero(struct file *fi - } - return written ? written : -EFAULT; - } -+#endif /* CONFIG_XEN */ - - static int mmap_zero(struct file *file, struct vm_area_struct *vma) - { -+ int err = 0; -+ - #ifndef CONFIG_MMU - return -ENOSYS; - #endif -+ - if (vma->vm_flags & VM_SHARED) - return shmem_zero_setup(vma); -- return 0; -+#if 1 //ndef CONFIG_XEN -+ err = zeromap_page_range(vma, vma->vm_start, -+ vma->vm_end - vma->vm_start, vma->vm_page_prot); -+ BUG_ON(err == -EEXIST); -+#endif -+ return err; - } - - static ssize_t write_full(struct file *file, const char __user *buf, ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -799,6 +799,8 @@ void free_pgd_range(struct mmu_gather *t - unsigned long end, unsigned long floor, unsigned long ceiling); - int copy_page_range(struct mm_struct *dst, struct mm_struct *src, - struct vm_area_struct *vma); -+int zeromap_page_range(struct vm_area_struct *vma, unsigned long from, -+ unsigned long size, pgprot_t prot); - void unmap_mapping_range(struct address_space *mapping, - loff_t const holebegin, loff_t const holelen, int even_cows); - int follow_pfn(struct vm_area_struct *vma, unsigned long address, ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -1590,6 +1590,93 @@ struct page *get_dump_page(unsigned long - } - #endif /* CONFIG_ELF_CORE */ - -+static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, -+ unsigned long addr, unsigned long end, pgprot_t prot) -+{ -+ pte_t *pte; -+ spinlock_t *ptl; -+ int err = 0; -+ -+ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); -+ if (!pte) -+ return -EAGAIN; -+ arch_enter_lazy_mmu_mode(); -+ do { -+ pte_t zero_pte; -+ -+ if (unlikely(!pte_none(*pte))) { -+ err = -EEXIST; -+ pte++; -+ break; -+ } -+ zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(addr), prot)); -+ zero_pte = pte_wrprotect(zero_pte); -+ set_pte_at(mm, addr, pte, zero_pte); -+ } while (pte++, addr += PAGE_SIZE, addr != end); -+ arch_leave_lazy_mmu_mode(); -+ pte_unmap_unlock(pte - 1, ptl); -+ return err; -+} -+ -+static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, -+ unsigned long addr, unsigned long end, pgprot_t prot) -+{ -+ pmd_t *pmd; -+ unsigned long next; -+ int err; -+ -+ pmd = pmd_alloc(mm, pud, addr); -+ if (!pmd) -+ return -EAGAIN; -+ do { -+ next = pmd_addr_end(addr, end); -+ err = zeromap_pte_range(mm, pmd, addr, next, prot); -+ if (err) -+ break; -+ } while (pmd++, addr = next, addr != end); -+ return err; -+} -+ -+static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, -+ unsigned long addr, unsigned long end, pgprot_t prot) -+{ -+ pud_t *pud; -+ unsigned long next; -+ int err; -+ -+ pud = pud_alloc(mm, pgd, addr); -+ if (!pud) -+ return -EAGAIN; -+ do { -+ next = pud_addr_end(addr, end); -+ err = zeromap_pmd_range(mm, pud, addr, next, prot); -+ if (err) -+ break; -+ } while (pud++, addr = next, addr != end); -+ return err; -+} -+ -+int zeromap_page_range(struct vm_area_struct *vma, -+ unsigned long addr, unsigned long size, pgprot_t prot) -+{ -+ pgd_t *pgd; -+ unsigned long next; -+ unsigned long end = addr + size; -+ struct mm_struct *mm = vma->vm_mm; -+ int err; -+ -+ BUG_ON(addr >= end); -+ pgd = pgd_offset(mm, addr); -+ flush_cache_range(vma, addr, end); -+ do { -+ next = pgd_addr_end(addr, end); -+ err = zeromap_pud_range(mm, pgd, addr, next, prot); -+ if (err) -+ break; -+ } while (pgd++, addr = next, addr != end); -+ return err; -+} -+ - pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, - spinlock_t **ptl) - { diff --git a/patches.suse/mm-increase-dirty-limits.patch b/patches.suse/mm-increase-dirty-limits.patch deleted file mode 100644 index 093ed6f..0000000 --- a/patches.suse/mm-increase-dirty-limits.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Jan Kara -Subject: Increase limits for starting writeback of dirty data -References: bnc#449662 -Patch-mainline: ? - -Return limits for dirty pages writeback to the numbers from SLES10. This dramatically -improves performance of workloads dirtying a lot of memory (e.g. simple databases not -using direct IO) and we're not aware it would harm anything. - -Signed-off-by: Jan Kara - ---- - mm/page-writeback.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/mm/page-writeback.c -+++ b/mm/page-writeback.c -@@ -83,7 +83,7 @@ int vm_highmem_is_dirtyable; - /* - * The generator of dirty data starts writeback at this percentage - */ --int vm_dirty_ratio = 10; -+int vm_dirty_ratio = 40; - - /* - * vm_dirty_bytes starts at 0 (disabled) so that it is a function of diff --git a/patches.suse/mm-tune-dirty-limits.patch b/patches.suse/mm-tune-dirty-limits.patch deleted file mode 100644 index 7e5fbc9..0000000 --- a/patches.suse/mm-tune-dirty-limits.patch +++ /dev/null @@ -1,77 +0,0 @@ -From: Suresh Jayaraman -Subject: [PATCH] mm: Make default VM dirty ratio configurable to suit different workloads -References: bnc#552883 -Patch-mainline: Never - -Based on the observation that higher VM dirty ratio improves performance of -most server workloads that dirties a lot of memory (e.g. simple databases not -using direct IO, workloads doing heavy writes) and the latency-sensitive -workloads like desktop and typical workstations perform better with a -decreased VM dirty ratio, make default VM dirty ratio configurable. This also -ensures that we have the similar dirty pages writeback limit in SLES11 SP1 as -compared to SLES11 GM. - -The default VM dirty ratio is 20 for kernel-desktop flavor and 40 for all the -other flavors. - -Also introduce a new CONFIG_KERNEL_DESKTOP option which might allow to tune -the kernel to suit desktop workloads. - -Signed-off-by: Suresh Jayaraman -Acked-by: Jiri Kosina -Acked-by: Jeff Mahoney ---- - init/Kconfig | 24 ++++++++++++++++++++++++ - mm/page-writeback.c | 2 +- - 2 files changed, 25 insertions(+), 1 deletion(-) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -32,6 +32,12 @@ config SPLIT_PACKAGE - If you aren't packaging a kernel for distribution, it's safe to - say n. - -+config KERNEL_DESKTOP -+ bool "Kernel to suit desktop workloads" -+ help -+ This is an option used to tune kernel parameters to better suit -+ desktop workloads. -+ - config ARCH - string - option env="ARCH" -@@ -1169,6 +1176,23 @@ config MMAP_ALLOW_UNINITIALIZED - - See Documentation/nommu-mmap.txt for more information. - -+config DEFAULT_VM_DIRTY_RATIO -+ int "Default VM dirty ratio (in %)" -+ default 20 if KERNEL_DESKTOP -+ default 40 -+ help -+ Allows to tune VM dirty ratio to suit different workloads. Increased -+ VM dirty ratio improves performance of most server workloads that -+ dirties a lot of memory (e.g. simple databases not using direct IO, -+ workloads doing heavy writes). The latency-sensitive workloads like -+ desktop and typical workstations perform better with a decreased -+ VM dirty ratio. -+ -+ Recommended value for desktop workload is 20. -+ Recommended value for server workload is 40. -+ -+ Only use this if you really know what you are doing. -+ - config PROFILING - bool "Profiling support" - help ---- a/mm/page-writeback.c -+++ b/mm/page-writeback.c -@@ -78,7 +78,7 @@ int vm_highmem_is_dirtyable; - /* - * The generator of dirty data starts writeback at this percentage - */ --int vm_dirty_ratio = 20; -+int vm_dirty_ratio = CONFIG_DEFAULT_VM_DIRTY_RATIO; - - /* - * vm_dirty_bytes starts at 0 (disabled) so that it is a function of diff --git a/patches.suse/mpath-fix b/patches.suse/mpath-fix deleted file mode 100644 index d3f55c2..0000000 --- a/patches.suse/mpath-fix +++ /dev/null @@ -1,50 +0,0 @@ -From: Jeff Mahoney -Subject: dm-mpath: pgpath->path.pdev -> pgpath->path.dev->name -Patch-mainline: Dependent on local patches - - 2.6.38-rc1 moved the device name from path.pdev to pgpath->path.dev->name. - - This patch addresses that. - -Signed-off-by: Jeff Mahoney ---- - drivers/md/dm-mpath.c | 7 +++---- - 1 file changed, 3 insertions(+), 4 deletions(-) - ---- a/drivers/md/dm-mpath.c -+++ b/drivers/md/dm-mpath.c -@@ -165,7 +165,6 @@ static struct priority_group *alloc_prio - static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) - { - struct pgpath *pgpath, *tmp; -- struct multipath *m = ti->private; - - list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { - list_del(&pgpath->list); -@@ -1201,7 +1200,7 @@ static void pg_init_done(void *data, int - break; - } - DMERR("Count not failover device %s: Handler scsi_dh_%s " -- "was not loaded.", pgpath->path.pdev, -+ "was not loaded.", pgpath->path.dev->name, - m->hw_handler_name); - /* - * Fail path for now, so we do not ping pong -@@ -1216,7 +1215,7 @@ static void pg_init_done(void *data, int - bypass_pg(m, pg, 1); - break; - case SCSI_DH_DEV_OFFLINED: -- DMWARN("Device %s offlined.", pgpath->path.pdev); -+ DMWARN("Device %s offlined.", pgpath->path.dev->name); - errors = 0; - break; - case SCSI_DH_RETRY: -@@ -1241,7 +1240,7 @@ static void pg_init_done(void *data, int - if (errors) { - if (pgpath == m->current_pgpath) { - DMERR("Could not failover device %s, error %d.", -- pgpath->path.pdev, errors); -+ pgpath->path.dev->name, errors); - m->current_pgpath = NULL; - m->current_pg = NULL; - } diff --git a/patches.suse/nameif-track-rename.patch b/patches.suse/nameif-track-rename.patch deleted file mode 100644 index 90dcf64..0000000 --- a/patches.suse/nameif-track-rename.patch +++ /dev/null @@ -1,53 +0,0 @@ -Subject: [PATCH] keep track of network interface renaming -From: Olaf Hering -Patch-mainline: not yet - -Keep track about which network interface names were renamed after the -network device driver printed its banner. Example insanity: - -honeydew:~ # dmesg| grep -Ew '(eth[0-9]|rename|renamed)' -e1000: eth0: e1000_probe: Intel(R) PRO/1000 Network Connection -e1000: eth1: e1000_probe: Intel(R) PRO/1000 Network Connection -e1000: eth2: e1000_probe: Intel(R) PRO/1000 Network Connection -e1000: eth3: e1000_probe: Intel(R) PRO/1000 Network Connection -dev_change_name: about to rename 'eth3' to 'eth0' -dev_change_name: about to rename 'eth3' to 'ethxx3' -eth3 renamed to ethxx3 -dev_change_name: about to rename 'ethxx3' to 'eth0' -dev_change_name: about to rename 'eth0' to 'eth3' -eth0 renamed to eth3 -dev_change_name: about to rename 'eth1' to 'eth2' -dev_change_name: about to rename 'eth1' to 'ethxx1' -eth1 renamed to ethxx1 -dev_change_name: about to rename 'ethxx1' to 'eth2' -dev_change_name: about to rename 'eth2' to 'eth1' -eth2 renamed to eth1 -dev_change_name: about to rename 'ethxx3' to 'eth0' -ethxx3 renamed to eth0 -dev_change_name: about to rename 'ethxx1' to 'eth2' -ethxx1 renamed to eth2 -e1000: eth0: e1000_watchdog_task: NIC Link is Up 100 Mbps Full Duplex - - - -Signed-off-by: Olaf Hering - - net/core/dev.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -962,8 +962,12 @@ static int dev_get_valid_name(struct net - return dev_alloc_name(dev, name); - else if (__dev_get_by_name(net, name)) - return -EEXIST; -- else if (dev->name != name) -+ else if (dev->name != name) { -+ if (strncmp(name, dev->name, IFNAMSIZ)) -+ printk(KERN_INFO "%s renamed to %s by %s [%u]\n", -+ dev->name, name, current->comm, current->pid); - strlcpy(dev->name, name, IFNAMSIZ); -+ } - - return 0; - } diff --git a/patches.suse/netfilter-ip_conntrack_slp.patch b/patches.suse/netfilter-ip_conntrack_slp.patch deleted file mode 100644 index e7cf5f5..0000000 --- a/patches.suse/netfilter-ip_conntrack_slp.patch +++ /dev/null @@ -1,181 +0,0 @@ -From: Jiri Bohac -Subject: connection tracking helper for SLP -References: fate#301134 -Patch-mainline: Not yet - -A simple connection tracking helper for SLP. Marks replies to a -SLP broadcast query as ESTABLISHED to allow them to pass through the -firewall. - -Signed-off-by: Jiri Bohac - ---- - net/netfilter/Kconfig | 15 ++++ - net/netfilter/Makefile | 1 - net/netfilter/nf_conntrack_slp.c | 127 +++++++++++++++++++++++++++++++++++++++ - 3 files changed, 143 insertions(+) - ---- a/net/netfilter/Kconfig -+++ b/net/netfilter/Kconfig -@@ -260,6 +260,21 @@ config NF_CONNTRACK_TFTP - - To compile it as a module, choose M here. If unsure, say N. - -+config NF_CONNTRACK_SLP -+ tristate "SLP protocol support" -+ depends on NF_CONNTRACK -+ depends on NETFILTER_ADVANCED -+ help -+ SLP queries are sometimes sent as broadcast messages from an -+ unprivileged port and responded to with unicast messages to the -+ same port. This make them hard to firewall properly because connection -+ tracking doesn't deal with broadcasts. This helper tracks locally -+ originating broadcast SLP queries and the corresponding -+ responses. It relies on correct IP address configuration, specifically -+ netmask and broadcast address. -+ -+ To compile it as a module, choose M here. If unsure, say N. -+ - config NF_CT_NETLINK - tristate 'Connection tracking netlink interface' - select NETFILTER_NETLINK ---- a/net/netfilter/Makefile -+++ b/net/netfilter/Makefile -@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_co - obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o - obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o - obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o -+obj-$(CONFIG_NF_CONNTRACK_SLP) += nf_conntrack_slp.o - - # transparent proxy support - obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o ---- /dev/null -+++ b/net/netfilter/nf_conntrack_slp.c -@@ -0,0 +1,127 @@ -+/* -+ * NetBIOS name service broadcast connection tracking helper -+ * -+ * (c) 2007 Jiri Bohac -+ * (c) 2005 Patrick McHardy -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version -+ * 2 of the License, or (at your option) any later version. -+ */ -+/* -+ * This helper tracks locally originating NetBIOS name service -+ * requests by issuing permanent expectations (valid until -+ * timing out) matching all reply connections from the -+ * destination network. The only NetBIOS specific thing is -+ * actually the port number. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#define SLP_PORT 427 -+ -+MODULE_AUTHOR("Jiri Bohac "); -+MODULE_DESCRIPTION("SLP broadcast connection tracking helper"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("ip_conntrack_slp"); -+ -+static unsigned int timeout __read_mostly = 3; -+module_param(timeout, uint, 0400); -+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); -+ -+static int help(struct sk_buff *skb, unsigned int protoff, -+ struct nf_conn *ct, enum ip_conntrack_info ctinfo) -+{ -+ struct nf_conntrack_expect *exp; -+ struct iphdr *iph = ip_hdr(skb); -+ struct rtable *rt = skb_rtable(skb); -+ struct in_device *in_dev; -+ __be32 mask = 0; -+ -+ /* we're only interested in locally generated packets */ -+ if (skb->sk == NULL) -+ goto out; -+ if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) -+ goto out; -+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) -+ goto out; -+ -+ rcu_read_lock(); -+ in_dev = __in_dev_get_rcu(rt->dst.dev); -+ if (in_dev != NULL) { -+ for_primary_ifa(in_dev) { -+ if (ifa->ifa_broadcast == iph->daddr) { -+ mask = ifa->ifa_mask; -+ break; -+ } -+ } endfor_ifa(in_dev); -+ } -+ rcu_read_unlock(); -+ -+ if (mask == 0) -+ goto out; -+ -+ exp = nf_ct_expect_alloc(ct); -+ if (exp == NULL) -+ goto out; -+ -+ exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; -+ exp->tuple.src.u.udp.port = htons(SLP_PORT); -+ -+ exp->mask.src.u3.ip = mask; -+ exp->mask.src.u.udp.port = htons(0xFFFF); -+ -+ exp->expectfn = NULL; -+ exp->flags = NF_CT_EXPECT_PERMANENT; -+ exp->class = NF_CT_EXPECT_CLASS_DEFAULT; -+ exp->helper = NULL; -+ -+ nf_ct_expect_related(exp); -+ nf_ct_expect_put(exp); -+ -+ nf_ct_refresh(ct, skb, timeout * HZ); -+out: -+ return NF_ACCEPT; -+} -+ -+static struct nf_conntrack_expect_policy exp_policy = { -+ .max_expected = 1, -+}; -+ -+static struct nf_conntrack_helper helper __read_mostly = { -+ .name = "slp", -+ .tuple.src.l3num = AF_INET, -+ .tuple.src.u.udp.port = __constant_htons(SLP_PORT), -+ .tuple.dst.protonum = IPPROTO_UDP, -+ .me = THIS_MODULE, -+ .help = help, -+ .expect_policy = &exp_policy, -+}; -+ -+static int __init nf_conntrack_slp_init(void) -+{ -+ exp_policy.timeout = timeout; -+ return nf_conntrack_helper_register(&helper); -+} -+ -+static void __exit nf_conntrack_slp_fini(void) -+{ -+ nf_conntrack_helper_unregister(&helper); -+} -+ -+module_init(nf_conntrack_slp_init); -+module_exit(nf_conntrack_slp_fini); diff --git a/patches.suse/nfsacl-client-cache-CHECK.diff b/patches.suse/nfsacl-client-cache-CHECK.diff deleted file mode 100644 index add3ead..0000000 --- a/patches.suse/nfsacl-client-cache-CHECK.diff +++ /dev/null @@ -1,76 +0,0 @@ -From: Andreas Gruenbacher -Subject: nfsacl: improve cache consistency - -(This one is currently disabled.) - -Index: linux-2.6.11-rc2/fs/nfs/inode.c -=================================================================== ---- linux-2.6.11-rc2.orig/fs/nfs/inode.c -+++ linux-2.6.11-rc2/fs/nfs/inode.c -@@ -65,13 +65,8 @@ static int nfs_statfs(struct super_bloc - static int nfs_show_options(struct seq_file *, struct vfsmount *); - - #ifdef CONFIG_NFS_ACL --static void nfs_forget_cached_acls(struct inode *); - static void __nfs_forget_cached_acls(struct nfs_inode *nfsi); - #else --static inline void nfs_forget_cached_acls(struct inode *inode) --{ --} -- - static inline void __nfs_forget_cached_acls(struct nfs_inode *nfsi) - { - } -@@ -1188,7 +1183,7 @@ static void __nfs_forget_cached_acls(str - #endif /* CONFIG_NFS_ACL */ - - #ifdef CONFIG_NFS_ACL --static void nfs_forget_cached_acls(struct inode *inode) -+void nfs_forget_cached_acls(struct inode *inode) - { - dprintk("NFS: nfs_forget_cached_acls(%s/%ld)\n", inode->i_sb->s_id, - inode->i_ino); -@@ -1293,6 +1288,8 @@ int nfs_refresh_inode(struct inode *inod - if ((fattr->valid & NFS_ATTR_WCC) != 0) { - if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) - memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); -+ else -+ nfs_forget_cached_acls(inode); - if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) - memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); - } -Index: linux-2.6.11-rc2/fs/nfs/nfs3proc.c -=================================================================== ---- linux-2.6.11-rc2.orig/fs/nfs/nfs3proc.c -+++ linux-2.6.11-rc2/fs/nfs/nfs3proc.c -@@ -876,7 +876,11 @@ nfs3_proc_setacls(struct inode *inode, s - acl = NULL; - } - } -- nfs_cache_acls(inode, acl, dfacl); -+ if ((fattr.valid & NFS_ATTR_WCC) && -+ timespec_equal(&inode->i_ctime, &fattr.pre_ctime)) -+ nfs_cache_acls(inode, acl, dfacl); -+ else -+ nfs_forget_cached_acls(inode); - status = nfs_refresh_inode(inode, &fattr); - } - -Index: linux-2.6.11-rc2/include/linux/nfs_fs.h -=================================================================== ---- linux-2.6.11-rc2.orig/include/linux/nfs_fs.h -+++ linux-2.6.11-rc2/include/linux/nfs_fs.h -@@ -293,6 +293,13 @@ extern struct inode *nfs_fhget(struct su - struct nfs_fattr *); - extern struct posix_acl *nfs_get_cached_acl(struct inode *, int); - extern void nfs_cache_acls(struct inode *, struct posix_acl *, struct posix_acl *); -+#ifdef CONFIG_NFS_ACL -+void nfs_forget_cached_acls(struct inode *); -+#else -+static inline void nfs_forget_cached_acls(struct inode *inode) -+{ -+} -+#endif - extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); - extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); - extern int nfs_permission(struct inode *, int, struct nameidata *); diff --git a/patches.suse/no-frame-pointer-select b/patches.suse/no-frame-pointer-select deleted file mode 100644 index 5c6091b..0000000 --- a/patches.suse/no-frame-pointer-select +++ /dev/null @@ -1,41 +0,0 @@ -From: Andi Kleen -Subject: Fix stack unwinder Kconfig -Patch-mainline: no -References: bnc#402518 - -Incremental patch for dwarf2 unwinder - -Fix the Kconfigs that do SELECT FRAME_POINTER to do select UNWIND_INFO -instead. - -Signed-off-by: Andi Kleen -Acked-by: Jan Beulich - ---- - lib/Kconfig.debug | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - ---- a/lib/Kconfig.debug -+++ b/lib/Kconfig.debug -@@ -1020,17 +1020,19 @@ config FAULT_INJECTION_STACKTRACE_FILTER - depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT - depends on !X86_64 - select STACKTRACE -- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE -+ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !X86 -+ select UNWIND_INFO if X86 && !FRAME_POINTER - help - Provide stacktrace filter for fault-injection capabilities - - config LATENCYTOP - bool "Latency measuring infrastructure" - depends on HAVE_LATENCYTOP_SUPPORT - depends on DEBUG_KERNEL - depends on STACKTRACE_SUPPORT - depends on PROC_FS -- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE -+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !X86 -+ select UNWIND_INFO if X86 && !FRAME_POINTER - select KALLSYMS - select KALLSYMS_ALL - select STACKTRACE diff --git a/patches.suse/no-partition-scan b/patches.suse/no-partition-scan deleted file mode 100644 index 6f15f5f..0000000 --- a/patches.suse/no-partition-scan +++ /dev/null @@ -1,108 +0,0 @@ -From: Hannes Reinecke -Subject: Implement 'no_partition_scan' commandline option -Refences: FATE#303697 -Patch-mainline: Not yet - -Under certain setups the partition table on the disk is not -useable directly (eg for dmraid or multipathing). So we should -be able to switch it off completely so as not to be flooded with -pointless messages. - -Signed-off-by: Hannes Reinecke - ---- - block/genhd.c | 39 +++++++++++++++++++++++++++++++++++++-- - fs/partitions/check.c | 2 ++ - include/linux/genhd.h | 1 + - 3 files changed, 40 insertions(+), 2 deletions(-) - ---- a/block/genhd.c -+++ b/block/genhd.c -@@ -504,6 +504,18 @@ static int exact_lock(dev_t devt, void * - disk_part_iter_exit(&piter); - } - -+static int __read_mostly no_partition_scan; -+ -+static int __init no_partition_scan_setup(char *str) -+{ -+ no_partition_scan = 1; -+ printk(KERN_INFO "genhd: omit partition scan.\n"); -+ -+ return 1; -+} -+ -+__setup("no_partition_scan", no_partition_scan_setup); -+ - /** - * add_disk - add partitioning information to kernel list - * @disk: per-device partitioning information -@@ -528,6 +540,9 @@ void add_disk(struct gendisk *disk) - - disk->flags |= GENHD_FL_UP; - -+ if (no_partition_scan) -+ disk->flags |= GENHD_FL_NO_PARTITION_SCAN; -+ - retval = blk_alloc_devt(&disk->part0, &devt); - if (retval) { - WARN_ON(1); -@@ -817,7 +832,27 @@ static ssize_t disk_range_show(struct de - { - struct gendisk *disk = dev_to_disk(dev); - -- return sprintf(buf, "%d\n", disk->minors); -+ return sprintf(buf, "%d\n", -+ (disk->flags & GENHD_FL_NO_PARTITION_SCAN ? 0 : disk->minors)); -+} -+ -+static ssize_t disk_range_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct gendisk *disk = dev_to_disk(dev); -+ int i; -+ -+ if (count > 0 && sscanf(buf, "%d", &i) > 0) { -+ if (i == 0) -+ disk->flags |= GENHD_FL_NO_PARTITION_SCAN; -+ else if (i <= disk->minors) -+ disk->flags &= ~GENHD_FL_NO_PARTITION_SCAN; -+ else -+ count = -EINVAL; -+ } -+ -+ return count; - } - - static ssize_t disk_ext_range_show(struct device *dev, -@@ -871,7 +906,7 @@ static ssize_t disk_discard_alignment_sh - return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); - } - --static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); -+static DEVICE_ATTR(range, S_IRUGO|S_IWUSR, disk_range_show, disk_range_store); - static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); - static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); - static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); ---- a/fs/partitions/check.c -+++ b/fs/partitions/check.c -@@ -597,6 +597,8 @@ rescan: - disk->fops->revalidate_disk(disk); - check_disk_size_change(disk, bdev); - bdev->bd_invalidated = 0; -+ if (disk->flags & GENHD_FL_NO_PARTITION_SCAN) -+ return 0; - if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) - return 0; - if (IS_ERR(state)) { ---- a/include/linux/genhd.h -+++ b/include/linux/genhd.h -@@ -116,6 +116,7 @@ struct hd_struct { - #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 - #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ - #define GENHD_FL_NATIVE_CAPACITY 128 -+#define GENHD_FL_NO_PARTITION_SCAN 256 - - enum { - DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ diff --git a/patches.suse/novfs-2.6.35-api-changes b/patches.suse/novfs-2.6.35-api-changes deleted file mode 100644 index 0119764..0000000 --- a/patches.suse/novfs-2.6.35-api-changes +++ /dev/null @@ -1,54 +0,0 @@ -From: Jeff Mahoney -Subject: [PATCH] novfs: API changes for 2.6.35 -Patch-mainline: WHenever novfs makes it upstream - - This patch contains API changes for 2.6.35 - -Acked-by: Jeff Mahoney ---- - fs/novfs/inode.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -84,7 +84,7 @@ void addtodentry(struct dentry *Parent, - int novfs_filldir(void *data, const char *name, int namelen, loff_t off, - ino_t ino, unsigned ftype); - int novfs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir); --int novfs_dir_fsync(struct file *file, struct dentry *dentry, int datasync); -+int novfs_dir_fsync(struct file *file, int datasync); - - /* - * Declared address space operations -@@ -115,7 +115,7 @@ int novfs_f_mmap(struct file *file, stru - int novfs_f_open(struct inode *, struct file *); - int novfs_f_flush(struct file *, fl_owner_t); - int novfs_f_release(struct inode *, struct file *); --int novfs_f_fsync(struct file *, struct dentry *, int datasync); -+int novfs_f_fsync(struct file *, int datasync); - int novfs_f_lock(struct file *, int, struct file_lock *); - - /* -@@ -1278,11 +1278,11 @@ int novfs_dir_readdir(struct file *file, - return 1; - } - --int novfs_dir_fsync(struct file *file, struct dentry *dentry, int datasync) -+int novfs_dir_fsync(struct file *file, int datasync) - { - DbgPrint("Name %.*s", file->f_dentry->d_name.len, - file->f_dentry->d_name.name); -- return (simple_sync_file(file, dentry, datasync)); -+ return generic_file_fsync(file, datasync); - } - - ssize_t novfs_f_read(struct file * file, char *buf, size_t len, loff_t * off) -@@ -1709,7 +1709,7 @@ int novfs_f_release(struct inode *inode, - return (retCode); - } - --int novfs_f_fsync(struct file *file, struct dentry *dentry, int datasync) -+int novfs_f_fsync(struct file *file, int datasync) - { - return 0; - } diff --git a/patches.suse/novfs-2.6.37-api-changes b/patches.suse/novfs-2.6.37-api-changes deleted file mode 100644 index 33d19c1..0000000 --- a/patches.suse/novfs-2.6.37-api-changes +++ /dev/null @@ -1,298 +0,0 @@ -From: Jeff Mahoney -Subject: novfs: 2.6.37 api changes -Patch-mainline: If novfs gets merged - - 2.6.37-rc1 removed the mutex interface to semaphores. This patch - replaces uses of semaphores as mutex with the mutex interface. - -Signed-off-by: Jeff Mahoney ---- - fs/novfs/daemon.c | 22 +++++++++++----------- - fs/novfs/inode.c | 33 +++++++++++++++++++-------------- - fs/novfs/profile.c | 10 +++++----- - fs/novfs/scope.c | 4 ++-- - 4 files changed, 37 insertions(+), 32 deletions(-) - ---- a/fs/novfs/daemon.c -+++ b/fs/novfs/daemon.c -@@ -109,7 +109,7 @@ static atomic_t Daemon_Open_Count = ATOM - - static unsigned long Daemon_Command_Timeout = TIMEOUT_VALUE; - --static DECLARE_MUTEX(DriveMapLock); -+static DEFINE_MUTEX(DriveMapLock); - static LIST_HEAD(DriveMapList); - - int novfs_max_iosize = PAGE_SIZE; -@@ -118,7 +118,7 @@ void novfs_daemon_queue_init() - { - INIT_LIST_HEAD(&Daemon_Queue.list); - spin_lock_init(&Daemon_Queue.lock); -- init_MUTEX_LOCKED(&Daemon_Queue.semaphore); -+ sema_init(&Daemon_Queue.semaphore, 0); - } - - void novfs_daemon_queue_exit(void) -@@ -159,7 +159,7 @@ int Queue_Daemon_Command(void *request, - que->status = QUEUE_SENDING; - que->flags = 0; - -- init_MUTEX_LOCKED(&que->semaphore); -+ sema_init(&que->semaphore, 0); - - que->sequence = atomic_inc_return(&Sequence); - -@@ -881,7 +881,7 @@ int novfs_daemon_destroy_sessionId(struc - * When destroying the session check to see if there are any - * mapped drives. If there are then remove them. - */ -- down(&DriveMapLock); -+ mutex_lock(&DriveMapLock); - list_for_each(list, &DriveMapList) { - dm = list_entry(list, struct drive_map, list); - if (SC_EQUAL(SessionId, dm->session)) { -@@ -892,7 +892,7 @@ int novfs_daemon_destroy_sessionId(struc - } - - } -- up(&DriveMapLock); -+ mutex_unlock(&DriveMapLock); - - } else { - retCode = -EIO; -@@ -1740,7 +1740,7 @@ static int set_map_drive(struct novfs_xp - - dm = (struct drive_map *)&DriveMapList.next; - -- down(&DriveMapLock); -+ mutex_lock(&DriveMapLock); - - list_for_each(list, &DriveMapList) { - dm = list_entry(list, struct drive_map, list); -@@ -1766,7 +1766,7 @@ static int set_map_drive(struct novfs_xp - } - } else - kfree(drivemap); -- up(&DriveMapLock); -+ mutex_unlock(&DriveMapLock); - return (retVal); - } - -@@ -1799,7 +1799,7 @@ static int unmap_drive(struct novfs_xpla - - dm = NULL; - -- down(&DriveMapLock); -+ mutex_lock(&DriveMapLock); - - list_for_each(list, &DriveMapList) { - dm = list_entry(list, struct drive_map, list); -@@ -1823,7 +1823,7 @@ static int unmap_drive(struct novfs_xpla - kfree(dm); - } - -- up(&DriveMapLock); -+ mutex_unlock(&DriveMapLock); - return (retVal); - } - -@@ -1832,7 +1832,7 @@ static void RemoveDriveMaps(void) - struct drive_map *dm; - struct list_head *list; - -- down(&DriveMapLock); -+ mutex_lock(&DriveMapLock); - list_for_each(list, &DriveMapList) { - dm = list_entry(list, struct drive_map, list); - -@@ -1844,7 +1844,7 @@ static void RemoveDriveMaps(void) - list_del(&dm->list); - kfree(dm); - } -- up(&DriveMapLock); -+ mutex_unlock(&DriveMapLock); - } - - /* As picked from do_unlinkat() */ ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -43,7 +43,7 @@ struct inode_data { - struct inode *Inode; - unsigned long cntDC; - struct list_head DirCache; -- struct semaphore DirCacheLock; -+ struct mutex DirCacheLock; - void *FileHandle; - int CacheFlag; - char Name[1]; /* Needs to be last entry */ -@@ -268,11 +268,11 @@ static atomic_t novfs_Inode_Number = ATO - struct dentry *novfs_root = NULL; - char *novfs_current_mnt = NULL; - --DECLARE_MUTEX(InodeList_lock); -+DEFINE_MUTEX(InodeList_lock); - - LIST_HEAD(InodeList); - --DECLARE_MUTEX(TimeDir_Lock); -+DEFINE_MUTEX(TimeDir_Lock); - uint64_t lastTime; - char lastDir[PATH_MAX]; - -@@ -1050,7 +1050,7 @@ int novfs_dir_readdir(struct file *file, - // Use this hack by default - #ifndef SKIP_CROSSOVER_HACK - // Hack for crossover - begin -- down(&TimeDir_Lock); -+ mutex_lock(&TimeDir_Lock); - if ((file->f_dentry->d_name.len == 7) && - ((0 == strncmp(file->f_dentry->d_name.name, " !xover", 7)) || - (0 == strncmp(file->f_dentry->d_name.name, "z!xover", 7)))) { -@@ -1076,7 +1076,7 @@ int novfs_dir_readdir(struct file *file, - } - } - -- up(&TimeDir_Lock); -+ mutex_unlock(&TimeDir_Lock); - // Hack for crossover - end - #endif - -@@ -3157,9 +3157,9 @@ void novfs_evict_inode(struct inode *ino - - novfs_free_inode_cache(inode); - -- down(&InodeList_lock); -+ mutex_lock(&InodeList_lock); - list_del(&id->IList); -- up(&InodeList_lock); -+ mutex_unlock(&InodeList_lock); - - kfree(inode->i_private); - inode->i_private = NULL; -@@ -3292,15 +3292,15 @@ struct inode *novfs_get_inode(struct sup - id->cntDC = 1; - - INIT_LIST_HEAD(&id->DirCache); -- init_MUTEX(&id->DirCacheLock); -+ mutex_init(&id->DirCacheLock); - - id->FileHandle = 0; - id->CacheFlag = 0; - -- down(&InodeList_lock); -+ mutex_lock(&InodeList_lock); - - list_add_tail(&id->IList, &InodeList); -- up(&InodeList_lock); -+ mutex_unlock(&InodeList_lock); - - id->Name[0] = '\0'; - -@@ -3443,6 +3443,11 @@ static void novfs_kill_sb(struct super_b - kill_litter_super(super); - } - -+/* This should be removed */ -+#ifndef kernel_locked -+#define kernel_locked() (current->lock_depth >= 0) -+#endif -+ - ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, loff_t * ppos) - { - ssize_t retval = 0; -@@ -3532,7 +3537,7 @@ int novfs_lock_inode_cache(struct inode - - DbgPrint("0x%p", i); - if (i && (id = i->i_private) && id->DirCache.next) { -- down(&id->DirCacheLock); -+ mutex_lock(&id->DirCacheLock); - retVal = 1; - } - DbgPrint("return %d", retVal); -@@ -3544,7 +3549,7 @@ void novfs_unlock_inode_cache(struct ino - struct inode_data *id; - - if (i && (id = i->i_private) && id->DirCache.next) { -- up(&id->DirCacheLock); -+ mutex_unlock(&id->DirCacheLock); - } - } - -@@ -4042,7 +4047,7 @@ void novfs_dump_inode(void *pf) - char ctime_buf[32]; - unsigned long icnt = 0, dccnt = 0; - -- down(&InodeList_lock); -+ mutex_lock(&InodeList_lock); - list_for_each(il, &InodeList) { - id = list_entry(il, struct inode_data, IList); - inode = id->Inode; -@@ -4087,7 +4092,7 @@ void novfs_dump_inode(void *pf) - } - } - } -- up(&InodeList_lock); -+ mutex_unlock(&InodeList_lock); - - pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, dccnt); - ---- a/fs/novfs/profile.c -+++ b/fs/novfs/profile.c -@@ -60,7 +60,7 @@ static struct proc_dir_entry *dbg_file = - static struct proc_dir_entry *dentry_file = NULL; - static struct proc_dir_entry *inode_file = NULL; - --static DECLARE_MUTEX(LocalPrint_lock); -+static DEFINE_MUTEX(LocalPrint_lock); - - static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user * buf, size_t nbytes, loff_t * ppos) - { -@@ -513,7 +513,7 @@ static ssize_t novfs_profile_read_inode( - static char save_DbgPrintOn; - - if (offset == 0) { -- down(&LocalPrint_lock); -+ mutex_lock(&LocalPrint_lock); - save_DbgPrintOn = DbgPrintOn; - DbgPrintOn = 0; - -@@ -527,7 +527,7 @@ static ssize_t novfs_profile_read_inode( - DbgPrintOn = save_DbgPrintOn; - DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; - -- up(&LocalPrint_lock); -+ mutex_unlock(&LocalPrint_lock); - } - - return retval; -@@ -541,7 +541,7 @@ static ssize_t novfs_profile_dentry_read - static char save_DbgPrintOn; - - if (offset == 0) { -- down(&LocalPrint_lock); -+ mutex_lock(&LocalPrint_lock); - save_DbgPrintOn = DbgPrintOn; - DbgPrintOn = 0; - DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -@@ -554,7 +554,7 @@ static ssize_t novfs_profile_dentry_read - DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; - DbgPrintOn = save_DbgPrintOn; - -- up(&LocalPrint_lock); -+ mutex_unlock(&LocalPrint_lock); - } - - return retval; ---- a/fs/novfs/scope.c -+++ b/fs/novfs/scope.c -@@ -601,8 +601,8 @@ char *novfs_scope_dget_path(struct dentr - void novfs_scope_init(void) - { - INIT_LIST_HEAD(&Scope_List); -- init_MUTEX(&Scope_Lock); -- init_MUTEX_LOCKED(&Scope_Thread_Delay); -+ sema_init(&Scope_Lock, 1); -+ sema_init(&Scope_Thread_Delay, 0); - kthread_run(Scope_Cleanup_Thread, NULL, "novfs_ST"); - } - diff --git a/patches.suse/novfs-build-fix b/patches.suse/novfs-build-fix deleted file mode 100644 index 2f8daaa..0000000 --- a/patches.suse/novfs-build-fix +++ /dev/null @@ -1,142 +0,0 @@ -From: Jeff Mahoney -Subject: novfs: Adopt 2.6.38-rc1 API changes -Patch-mainline: depends on local patches - - 2.6.38 changed a few FS-related APIs including - dentry_operations->d_{hash,compare}. - - This patch addresses those. - -Signed-off-by: Jeff Mahoney ---- - fs/novfs/inode.c | 46 +++++++++++++++++++++++----------------------- - 1 file changed, 23 insertions(+), 23 deletions(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -64,8 +64,10 @@ static void novfs_kill_sb(struct super_b - * Declared dentry_operations - */ - int novfs_d_revalidate(struct dentry *, struct nameidata *); --int novfs_d_hash(struct dentry *, struct qstr *); --int novfs_d_compare(struct dentry *, struct qstr *, struct qstr *); -+int novfs_d_hash(const struct dentry *, const struct inode *, struct qstr *); -+int novfs_d_compare(const struct dentry *, const struct inode *, -+ const struct dentry *, const struct inode *, -+ unsigned int, const char *, const struct qstr *); - int novfs_d_delete(struct dentry *dentry); - void novfs_d_release(struct dentry *dentry); - void novfs_d_iput(struct dentry *dentry, struct inode *inode); -@@ -306,7 +308,6 @@ static void PRINT_DENTRY(const char *s, - __DbgPrint(" d_op: 0x%p\n", d->d_op); - __DbgPrint(" d_sb: 0x%p\n", d->d_sb); - __DbgPrint(" d_flags: 0x%x\n", d->d_flags); -- __DbgPrint(" d_mounted: 0x%x\n", d->d_mounted); - __DbgPrint(" d_fsdata: 0x%p\n", d->d_fsdata); - /* DbgPrint(" d_cookie: 0x%x\n", d->d_cookie); */ - __DbgPrint(" d_parent: 0x%p\n", d->d_parent); -@@ -327,7 +328,7 @@ int novfs_remove_from_root(char *RemoveN - DbgPrint("%s", RemoveName); - name.len = strlen(RemoveName); - name.name = RemoveName; -- novfs_d_hash(novfs_root, &name); -+ novfs_d_hash(novfs_root, novfs_root->d_inode, &name); - - dentry = d_lookup(novfs_root, &name); - if (dentry) { -@@ -358,7 +359,7 @@ int novfs_add_to_root(char *AddName) - DbgPrint("%s", AddName); - name.len = strlen(AddName); - name.name = AddName; -- novfs_d_hash(novfs_root, &name); -+ novfs_d_hash(novfs_root, novfs_root->d_inode, &name); - - dir = novfs_root->d_inode; - -@@ -392,7 +393,7 @@ int novfs_Add_to_Root2(char *AddName) - name.len = strlen(AddName); - name.name = AddName; - -- novfs_d_hash(novfs_root, &name); -+ novfs_d_hash(novfs_root, novfs_root->d_inode, &name); - - entry = d_lookup(novfs_root, &name); - DbgPrint("novfs_d_lookup 0x%p", entry); -@@ -735,7 +736,8 @@ static unsigned long novfs_internal_hash - return (hash); - } - --int novfs_d_hash(struct dentry *dentry, struct qstr *name) -+int novfs_d_hash(const struct dentry *dentry, const struct inode *inode, -+ struct qstr *name) - { - DbgPrint("%.*s", name->len, name->name); - -@@ -744,18 +746,15 @@ int novfs_d_hash(struct dentry *dentry, - return (0); - } - --int novfs_d_strcmp(struct qstr *s1, struct qstr *s2) -+static int novfs_d_strcmp(const char *str1, unsigned int len, -+ const struct qstr *s2) - { - int retCode = 1; -- unsigned char *str1, *str2; -- unsigned int len; -+ const unsigned char *str2 = s2->name; - -- DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, s2->len, s2->name); -+ DbgPrint("s1=%.*s s2=%.*s", len, str1, s2->len, s2->name); - -- if (s1->len && (s1->len == s2->len) && (s1->hash == s2->hash)) { -- len = s1->len; -- str1 = (unsigned char *)s1->name; -- str2 = (unsigned char *)s2->name; -+ if (len && (len == s2->len)) { - for (retCode = 0; len--; str1++, str2++) { - if (*str1 != *str2) { - if (tolower(*str1) != tolower(*str2)) { -@@ -770,11 +769,14 @@ int novfs_d_strcmp(struct qstr *s1, stru - return (retCode); - } - --int novfs_d_compare(struct dentry *parent, struct qstr *s1, struct qstr *s2) -+int novfs_d_compare(const struct dentry *parent, -+ const struct inode *parent_inode, -+ const struct dentry *dentry, const struct inode *inode, -+ unsigned int len, const char *s1, const struct qstr *s2) - { - int retCode; - -- retCode = novfs_d_strcmp(s1, s2); -+ retCode = novfs_d_strcmp(s1, len, s2); - - DbgPrint("retCode=0x%x", retCode); - return (retCode); -@@ -2647,7 +2649,7 @@ int novfs_i_rename(struct inode *odir, s - int retCode = -ENOTEMPTY; - char *newpath, *newbuf, *newcon; - char *oldpath, *oldbuf, *oldcon; -- struct qstr newname, oldname; -+ struct qstr oldname; - struct novfs_entry_info *info = NULL; - int oldlen, newlen; - struct novfs_schandle session; -@@ -2693,14 +2695,12 @@ int novfs_i_rename(struct inode *odir, s - DbgPrint("2; newcon=0x%p newpath=0x%p", newcon, newpath); - DbgPrint("2; oldcon=0x%p oldpath=0x%p", oldcon, oldpath); - if (newcon && oldcon && ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { -- newname.name = newpath; -- newname.len = (int)(newcon - newpath); -- newname.hash = 0; -- - oldname.name = oldpath; - oldname.len = (int)(oldcon - oldpath); - oldname.hash = 0; -- if (!novfs_d_strcmp(&newname, &oldname)) { -+ if (!novfs_d_strcmp(newpath, -+ newcon - newpath, -+ &oldname)) { - - if (od->d_inode && od->d_inode->i_private) { - diff --git a/patches.suse/novfs-client-module b/patches.suse/novfs-client-module deleted file mode 100644 index a7d25e6..0000000 --- a/patches.suse/novfs-client-module +++ /dev/null @@ -1,15977 +0,0 @@ -From 9297af3ffd8a1c98f35fb7a273386576e061ff16 Mon Sep 17 00:00:00 2001 -From: Greg Kroah-Hartman -Date: Thu, 27 Mar 2008 10:40:48 -0700 -Subject: novfs: Add the Novell filesystem client kernel module -Patch-mainline: not yet, being worked on. - -This adds the Novell filesystem client kernel module. - -Things to do before it can be submitted: - - coding style cleanups - - remove typedefs - - function name lowercase - - 80 chars wide - - sparse cleanups - - __user markings - - endian markings - - remove functions that are never called and structures never used - - yeah, there are a lot of them... - - remove wrapper functions - - private kmalloc/free? - - resolve FIXME markings that have been added to the code - - wrong types passed to functions!!! - - userspace interface revisit - - uses /proc/novfs, not nice. - - might need userspace tools rework - - use of semaphore as mutex - - abuse of semaphore in lieu of completions. - -Update May 13 2009 jeffm: -- Merged patches back into master novfs patch - -Cc: Lonnie Iverson -Signed-off-by: Greg Kroah-Hartman - ---- - fs/Kconfig | 1 - fs/Makefile | 1 - fs/novfs/Kconfig | 8 - fs/novfs/Makefile | 19 - fs/novfs/commands.h | 955 ++++++++++ - fs/novfs/daemon.c | 2085 +++++++++++++++++++++++ - fs/novfs/file.c | 1921 +++++++++++++++++++++ - fs/novfs/inode.c | 4638 ++++++++++++++++++++++++++++++++++++++++++++++++++++ - fs/novfs/nwcapi.c | 2202 ++++++++++++++++++++++++ - fs/novfs/nwcapi.h | 1416 +++++++++++++++ - fs/novfs/nwerror.h | 658 +++++++ - fs/novfs/proc.c | 149 + - fs/novfs/profile.c | 704 +++++++ - fs/novfs/scope.c | 659 +++++++ - fs/novfs/vfs.h | 454 +++++ - 15 files changed, 15870 insertions(+) - ---- a/fs/Kconfig -+++ b/fs/Kconfig -@@ -241,6 +241,7 @@ source "fs/ncpfs/Kconfig" - source "fs/coda/Kconfig" - source "fs/afs/Kconfig" - source "fs/9p/Kconfig" -+source "fs/novfs/Kconfig" - - endif # NETWORK_FILESYSTEMS - ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -125,4 +125,5 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/ - obj-$(CONFIG_BTRFS_FS) += btrfs/ - obj-$(CONFIG_GFS2_FS) += gfs2/ - obj-$(CONFIG_EXOFS_FS) += exofs/ -+obj-$(CONFIG_NOVFS) += novfs/ - obj-$(CONFIG_CEPH_FS) += ceph/ ---- /dev/null -+++ b/fs/novfs/Kconfig -@@ -0,0 +1,8 @@ -+config NOVFS -+ tristate "Novell Netware Filesystem support (novfs) (EXPERIMENTAL)" -+ depends on INET && EXPERIMENTAL -+ help -+ If you say Y here, you will get an experimental Novell Netware -+ filesystem driver. -+ -+ If unsure, say N. ---- /dev/null -+++ b/fs/novfs/Makefile -@@ -0,0 +1,19 @@ -+# -+# Makefile for the Novell NetWare Client for Linux filesystem. -+# -+ -+NOVFS_VFS_MAJOR = 2 -+NOVFS_VFS_MINOR = 0 -+NOVFS_VFS_SUB = 0 -+NOVFS_VFS_RELEASE = 440 -+ -+EXTRA_CFLAGS += -DNOVFS_VFS_MAJOR=$(NOVFS_VFS_MAJOR) -+EXTRA_CFLAGS += -DNOVFS_VFS_MINOR=$(NOVFS_VFS_MINOR) -+EXTRA_CFLAGS += -DNOVFS_VFS_SUB=$(NOVFS_VFS_SUB) -+EXTRA_CFLAGS += -DNOVFS_VFS_PATCH=$(NOVFS_VFS_PATCH) -+EXTRA_CFLAGS += -DNOVFS_VFS_RELEASE=$(NOVFS_VFS_RELEASE) -+ -+obj-$(CONFIG_NOVFS) += novfs.o -+ -+novfs-objs := inode.o proc.o profile.o daemon.o file.o scope.o nwcapi.o -+ ---- /dev/null -+++ b/fs/novfs/commands.h -@@ -0,0 +1,955 @@ -+/* -+ * NetWare Redirector for Linux -+ * Author: James Turner/Richard Williams -+ * -+ * This file contains all defined commands. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#ifndef __NOVFS_COMMANDS_H -+#define __NOVFS_COMMANDS_H -+ -+#define VFS_COMMAND_GET_CONNECTED_SERVER_LIST 0 -+#define VFS_COMMAND_GET_SERVER_VOLUME_LIST 1 -+#define VFS_COMMAND_VERIFY_FILE 2 -+#define VFS_COMMAND_OPEN_CONNECTION_BY_ADDR 3 -+#define VFS_COMMAND_LOGIN_IDENTITY 4 -+#define VFS_COMMAND_ENUMERATE_DIRECTORY 5 -+#define VFS_COMMAND_OPEN_FILE 6 -+#define VFS_COMMAND_CREATE_FILE 7 -+#define VFS_COMMAND_CLOSE_FILE 8 -+#define VFS_COMMAND_READ_FILE 9 -+#define VFS_COMMAND_WRITE_FILE 10 -+#define VFS_COMMAND_DELETE_FILE 11 -+#define VFS_COMMAND_CREATE_DIRECOTRY 12 -+#define VFS_COMMAND_START_ENUMERATE 13 -+#define VFS_COMMAND_END_ENUMERATE 14 -+#define VFS_COMMAND_LOGIN_USER 15 -+#define VFS_COMMAND_LOGOUT_USER 16 -+#define VFS_COMMAND_CREATE_CONTEXT 17 -+#define VFS_COMMAND_DESTROY_CONTEXT 18 -+#define VFS_COMMAND_SET_FILE_INFO 19 -+#define VFS_COMMAND_TRUNCATE_FILE 20 -+#define VFS_COMMAND_OPEN_CONNECTION_BY_NAME 21 -+#define VFS_COMMAND_XPLAT_CALL 22 -+#define VFS_COMMAND_RENAME_FILE 23 -+#define VFS_COMMAND_ENUMERATE_DIRECTORY_EX 24 -+#define VFS_COMMAND_GETPWUD 25 -+#define VFS_COMMAND_ENUM_XCONN 26 -+#define VFS_COMMAND_READ_STREAM 27 -+#define VFS_COMMAND_WRITE_STREAM 28 -+#define VFS_COMMAND_CLOSE_STREAM 29 -+#define VFS_COMMAND_GET_VERSION 30 -+#define VFS_COMMAND_SET_MOUNT_PATH 31 -+#define VFS_COMMAND_GET_USER_SPACE 32 -+#define VFS_COMMAND_DBG 33 -+#define VFS_COMMAND_GET_CACHE_FLAG 34 -+#define VFS_COMMAND_GET_EXTENDED_ATTRIBUTE 35 -+#define VFS_COMMAND_LIST_EXTENDED_ATTRIBUTES 36 -+#define VFS_COMMAND_SET_EXTENDED_ATTRIBUTE 37 -+#define VFS_COMMAND_SET_FILE_LOCK 38 -+ -+#define NWD_ACCESS_QUERY 0x00000001 -+#define NWD_ACCESS_READ 0x00000002 -+#define NWD_ACCESS_WRITE 0x00000004 -+#define NWD_ACCESS_EXECUTE 0x00000008 -+#define NWD_ACCESS_VALID 0x0000000F -+ -+/* -+ Share Mode -+ -+ A value of zero in a shared mode field specifies the caller -+ desires exclusive access to the object. -+*/ -+ -+#define NWD_SHARE_READ 0x00000001 -+#define NWD_SHARE_WRITE 0x00000002 -+#define NWD_SHARE_DELETE 0x00000004 -+#define NWD_SHARE_VALID 0x00000007 -+ -+/* -+ Creates a new file. The create API will fail if the specified -+ file already exists. -+*/ -+#define NWD_DISP_CREATE_NEW 0x00000001 -+ -+/* -+ Creates a new file. If the specified file already exists, -+ the create API will overwrite the old file and clear the -+ existing attributes. -+*/ -+#define NWD_DISP_CREATE_ALWAYS 0x00000002 -+ -+/* -+ Opens the file. The API will fail if the file does not exist. -+*/ -+#define NWD_DISP_OPEN_EXISTING 0x00000003 -+ -+/* -+ Opens the file. If the file does not exist, the API will -+ create the file. -+*/ -+#define NWD_DISP_OPEN_ALWAYS 0x00000004 -+ -+/* -+ Opens the file. When the file is opened the API will truncate -+ the stream to zero bytes. The API will fail if the file -+ does not exist. -+*/ -+#define NWD_DISP_TRUNCATE_EXISTING 0x00000005 -+#define NWD_DISP_MAXIMUM 0x00000005 -+ -+/* -+ Open/Create returned information values -+ -+ The bottom two bytes of NWD_ACTION are returned -+ as a value. All values are mutually exclusive. -+*/ -+ -+#define NWD_ACTION_OPENED 0x00000001 -+#define NWD_ACTION_CREATED 0x00000002 -+ -+#define MAX_IO_SIZE (1024 * 32) -+ -+#define MAX_XATTR_NAME_LEN 255 -+#define MAX_PATH_LENGTH 255 -+#define ENOATTR ENODATA -+/*===[ Type definitions ]=================================================*/ -+ -+/*===[ Function prototypes ]==============================================*/ -+ -+#pragma pack(push, 1) -+ -+/*struct _ncl_string -+{ -+ unsigned int type; -+ unsigned char *buffer; -+ unsigned int len; -+ -+} NclString, *PNclString; -+*/ -+struct ncl_string { -+ unsigned int type; -+ unsigned char *buffer; -+ u32 len; -+}; -+ -+struct nwd_string { -+ unsigned int type; -+ unsigned int len; -+ unsigned int boffset; -+}; -+ -+struct novfs_command_request_header { -+ unsigned int CommandType; -+ unsigned long SequenceNumber; -+ struct novfs_schandle SessionId; -+ -+}; -+ -+struct novfs_command_reply_header { -+ unsigned long Sequence_Number; -+ unsigned int ErrorCode; -+ -+}; -+ -+ -+struct novfs_delete_file_request { -+ struct novfs_command_request_header Command; -+ unsigned int isDirectory; -+ unsigned int pathlength; -+ unsigned char path[1]; -+}; -+ -+struct novfs_delete_file_reply { -+ struct novfs_command_reply_header Reply; -+}; -+ -+struct novfs_get_connected_server_list { -+ struct novfs_command_request_header Command; -+}; -+ -+struct novfs_get_connected_server_list_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned char List[1]; -+}; -+ -+struct novfs_get_connected_server_list_request_ex { -+ struct novfs_command_request_header Command; -+}; -+ -+struct novfs_get_connected_server_list_reply_ex { -+ -+ struct novfs_command_reply_header Reply; -+ unsigned int bufferLen; -+ unsigned char List[1]; -+ -+}; -+ -+struct novfs_get_server_volume_list { -+ struct novfs_command_request_header Command; -+ unsigned int Length; -+ unsigned char Name[1]; -+}; -+ -+struct novfs_get_server_volume_list_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned char List[1]; -+}; -+ -+struct novfs_verify_file_request { -+ struct novfs_command_request_header Command; -+ unsigned int pathLen; -+ unsigned char path[1]; -+ -+}; -+ -+struct novfs_verify_file_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned int lastAccessTime; -+ unsigned int modifyTime; -+ unsigned int createTime; -+ unsigned long long fileSize; -+ unsigned int fileMode; -+ -+}; -+ -+struct novfs_begin_enumerate_directory_request { -+ struct novfs_command_request_header Command; -+ unsigned int pathLen; -+ unsigned char path[1]; -+ -+}; -+ -+struct novfs_begin_enumerate_directory_reply { -+ struct novfs_command_reply_header Reply; -+ void *enumerateHandle; -+ -+}; -+ -+struct novfs_end_enumerate_directory_request { -+ struct novfs_command_request_header Command; -+ void *enumerateHandle; -+ -+}; -+ -+struct novfs_end_enumerate_directory_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct novfs_enumerate_directory_request { -+ struct novfs_command_request_header Command; -+ void *enumerateHandle; -+ unsigned int pathLen; -+ unsigned char path[1]; -+ -+}; -+ -+struct novfs_enumerate_directory_reply { -+ struct novfs_command_reply_header Reply; -+ void *enumerateHandle; -+ unsigned int lastAccessTime; -+ unsigned int modifyTime; -+ unsigned int createTime; -+ unsigned long long size; -+ unsigned int mode; -+ unsigned int nameLen; -+ unsigned char name[1]; -+ -+}; -+ -+struct novfs_enumerate_directory_ex_request { -+ struct novfs_command_request_header Command; -+ void *enumerateHandle; -+ unsigned int pathLen; -+ unsigned char path[1]; -+ -+}; -+ -+struct novfs_enumerate_directory_ex_data { -+ unsigned int length; -+ unsigned int lastAccessTime; -+ unsigned int modifyTime; -+ unsigned int createTime; -+ unsigned long long size; -+ unsigned int mode; -+ unsigned int nameLen; -+ unsigned char name[1]; -+ -+}; -+ -+struct novfs_enumerate_directory_ex_reply { -+ struct novfs_command_reply_header Reply; -+ void *enumerateHandle; -+ unsigned int enumCount; -+ -+}; -+ -+struct novfs_open_file_request { -+ struct novfs_command_request_header Command; -+ unsigned int access; /* File Access */ -+ unsigned int mode; /* Sharing Mode */ -+ unsigned int disp; /* Create Disposition */ -+ unsigned int pathLen; -+ unsigned char path[1]; -+ -+}; -+ -+struct novfs_open_file_reply { -+ struct novfs_command_reply_header Reply; -+ void *handle; -+ unsigned int lastAccessTime; -+ unsigned int modifyTime; -+ unsigned int createTime; -+ unsigned int attributes; -+ loff_t size; -+ -+}; -+ -+struct novfs_create_file_request { -+ -+ struct novfs_command_request_header Command; -+ unsigned int pathlength; -+ unsigned char path[1]; -+ -+}; -+ -+struct novfs_create_file_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct novfs_close_file_request { -+ struct novfs_command_request_header Command; -+ void *handle; -+ -+}; -+ -+struct novfs_close_file_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct novfs_read_file_request { -+ struct novfs_command_request_header Command; -+ void *handle; -+ loff_t offset; -+ size_t len; -+ -+}; -+ -+struct novfs_read_file_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned long long bytesRead; -+ unsigned char data[1]; -+ -+}; -+ -+struct novfs_write_file_request { -+ struct novfs_command_request_header Command; -+ void *handle; -+ loff_t offset; -+ size_t len; -+ unsigned char data[1]; -+ -+}; -+ -+struct novfs_write_file_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned long long bytesWritten; -+}; -+ -+struct novfs_read_stream_request { -+ struct novfs_command_request_header Command; -+ void *connection; -+ unsigned char handle[6]; -+ loff_t offset; -+ size_t len; -+}; -+ -+struct novfs_read_stream_reply { -+ struct novfs_command_reply_header Reply; -+ size_t bytesRead; -+ unsigned char data[1]; -+}; -+ -+struct novfs_write_stream_request { -+ struct novfs_command_request_header Command; -+ void *connection; -+ unsigned char handle[6]; -+ loff_t offset; -+ size_t len; -+ unsigned char data[1]; -+}; -+ -+struct novfs_write_stream_reply { -+ struct novfs_command_reply_header Reply; -+ size_t bytesWritten; -+}; -+ -+struct novfs_close_stream_request { -+ struct novfs_command_request_header Command; -+ void *connection; -+ unsigned char handle[6]; -+}; -+ -+struct novfs_close_stream_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct novfs_login_user_request { -+ struct novfs_command_request_header Command; -+ unsigned int srvNameType; -+ unsigned int serverLength; -+ unsigned int serverOffset; -+ unsigned int usrNameType; -+ unsigned int userNameLength; -+ unsigned int userNameOffset; -+ unsigned int pwdNameType; -+ unsigned int passwordLength; -+ unsigned int passwordOffset; -+ -+}; -+ -+struct novfs_login_user_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned int connectionHandle; -+ void *loginIdentity; -+ -+}; -+ -+struct novfs_logout_request { -+ struct novfs_command_request_header Command; -+ unsigned int length; -+ unsigned char Name[1]; -+ -+}; -+ -+struct novfs_logout_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct novfs_create_context_request { -+ struct novfs_command_request_header Command; -+ -+}; -+ -+struct novfs_create_context_reply { -+ struct novfs_command_reply_header Reply; -+ struct novfs_schandle SessionId; -+ -+}; -+ -+struct novfs_destroy_context_request { -+ struct novfs_command_request_header Command; -+ -+}; -+ -+struct novfs_destroy_context_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+/* -+ * Attribute flags. These should be or-ed together to figure out what -+ * has been changed! -+ */ -+#ifndef ATTR_MODE -+#define ATTR_MODE 1 -+#define ATTR_UID 2 -+#define ATTR_GID 4 -+#define ATTR_SIZE 8 -+#define ATTR_ATIME 16 -+#define ATTR_MTIME 32 -+#define ATTR_CTIME 64 -+#define ATTR_ATIME_SET 128 -+#define ATTR_MTIME_SET 256 -+#define ATTR_FORCE 512 /* Not a change, but a change it */ -+#define ATTR_ATTR_FLAG 1024 -+#endif -+ -+struct novfs_lnx_file_info { -+ unsigned int ia_valid; -+ unsigned int ia_mode; -+ uid_t ia_uid; -+ gid_t ia_gid; -+ loff_t ia_size; -+ time_t ia_atime; -+ time_t ia_mtime; -+ time_t ia_ctime; -+ unsigned int ia_attr_flags; -+}; -+ -+struct novfs_set_file_info_request { -+ struct novfs_command_request_header Command; -+ struct novfs_lnx_file_info fileInfo; -+ unsigned int pathlength; -+ char path[1]; -+}; -+ -+struct novfs_set_file_info_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct novfs_truncate_file_request { -+ struct novfs_command_request_header Command; -+ unsigned int pathLen; -+ char path[1]; -+ -+}; -+ -+struct novfs_truncate_file_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct novfs_getpwuid_request { -+ struct novfs_command_request_header Command; -+ unsigned int uid; -+}; -+ -+struct novfs_getpwuid_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned char UserName[1]; -+}; -+ -+struct novfs_get_version_request { -+ struct novfs_command_request_header Command; -+}; -+ -+struct novfs_get_version_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned char Version[1]; -+}; -+ -+struct novfs_set_mount_path { -+ struct novfs_command_request_header Command; -+ unsigned int PathLength; -+ unsigned char Path[1]; -+}; -+ -+struct novfs_set_mount_path_reply { -+ struct novfs_command_reply_header Reply; -+}; -+ -+struct novfs_get_user_space { -+ struct novfs_command_request_header Command; -+}; -+ -+struct novfs_get_user_space_reply { -+ struct novfs_command_reply_header Reply; -+ uint64_t TotalSpace; -+ uint64_t FreeSpace; -+ uint64_t TotalEnties; -+ uint64_t FreeEnties; -+}; -+ -+struct novfs_xplat_call_request { -+ struct novfs_command_request_header Command; -+ unsigned int NwcCommand; -+ unsigned long dataLen; -+ unsigned char data[1]; -+ -+}; -+ -+struct novfs_xplat_call_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned long dataLen; -+ unsigned char data[1]; -+ -+}; -+ -+/* XPlat NWC structures used by the daemon */ -+ -+struct nwd_open_conn_by_name { -+ void *ConnHandle; -+ unsigned int nameLen; -+ unsigned int oName; /* Ofset to the Name */ -+ unsigned int serviceLen; -+ unsigned int oServiceType; /* Offset to service Type; */ -+ unsigned int uConnFlags; -+ unsigned int uTranType; -+ void *newConnHandle; -+ -+}; -+ -+struct nwd_tran_addr { -+ unsigned int uTransportType; -+ unsigned int uAddressLength; -+ unsigned int oAddress; -+ -+}; -+ -+struct nwd_open_conn_by_addr { -+ void *ConnHandle; -+ unsigned int oServiceType; -+ unsigned int uConnFlags; -+ struct nwd_tran_addr TranAddr; -+ -+}; -+ -+struct nwd_close_conn { -+ void *ConnHandle; -+ -+}; -+ -+struct nwd_ncp_req { -+ void *ConnHandle; -+ unsigned int replyLen; -+ unsigned int requestLen; -+ unsigned int function; -+/* unsigned int subFunction; */ -+/* unsigned int verb; */ -+ unsigned int flags; -+ unsigned char data[1]; -+ -+}; -+ -+struct nwd_ncp_rep { -+ unsigned int replyLen; -+ unsigned char data[1]; -+ -+}; -+ -+struct nwc_auth_wid { -+ void *ConnHandle; -+ u32 AuthenticationId; -+ -+}; -+ -+struct nwc_unauthenticate { -+ void *ConnHandle; -+ unsigned int AuthenticationId; -+ -+}; -+ -+struct nwc_lisc_id { -+ void *ConnHandle; -+ -+}; -+ -+struct nwc_unlic_conn { -+ void *ConnHandle; -+ -+}; -+ -+struct nwd_get_id_info { -+ u32 AuthenticationId; -+ unsigned int AuthType; -+ unsigned int NameType; -+ unsigned short int ObjectType; -+ unsigned int IdentityFlags; -+ unsigned int domainLen; -+ unsigned int pDomainNameOffset; -+ unsigned int objectLen; -+ unsigned int pObjectNameOffset; -+ -+}; -+ -+struct nwc_lo_id { -+ u32 AuthenticationId; -+ -+}; -+ -+struct novfs_rename_file_request { -+ struct novfs_command_request_header Command; -+ int directoryFlag; -+ unsigned int newnameLen; -+ unsigned char newname[256]; -+ unsigned int oldnameLen; -+ unsigned char oldname[256]; -+}; -+ -+struct novfs_rename_file_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct nwd_server_version { -+ unsigned int uMajorVersion; -+ unsigned short int uMinorVersion; -+ unsigned short int uRevision; -+}; -+ -+ -+#define MAX_ADDRESS_LENGTH 32 -+ -+struct tagNwdTranAddrEx { -+ unsigned int uTransportType; -+ unsigned int uAddressLength; -+ unsigned char Buffer[MAX_ADDRESS_LENGTH]; -+ -+}; -+ -+struct __NWD_CONN_INFO { -+ unsigned int uInfoVersion; -+ unsigned int uAuthenticationState; -+ unsigned int uBroadcastState; -+ u32 uConnectionReference; -+ unsigned int pTreeNameOffset; -+/* unsigned int pWorkGroupIdOffset; Not used */ -+ unsigned int uSecurityState; -+ unsigned int uConnectionNumber; -+ unsigned int uUserId; -+ unsigned int pServerNameOffset; -+ unsigned int uNdsState; -+ unsigned int uMaxPacketSize; -+ unsigned int uLicenseState; -+ unsigned int uPublicState; -+ unsigned int bcastState; -+ unsigned int pServiceTypeOffset; -+ unsigned int uDistance; -+ u32 uAuthId; -+ unsigned int uDisconnected; -+ struct nwd_server_version ServerVersion; -+ struct nwd_tran_addr TranAddress; -+}; -+ -+struct nwd_conn_info { -+ void *ConnHandle; -+ unsigned int uInfoLevel; -+ unsigned int uInfoLength; -+}; -+ -+struct nwd_open_conn_by_ref { -+ void *uConnReference; -+ unsigned int uConnFlags; -+ void *ConnHandle; -+ -+}; -+ -+struct nwd_get_reqversion { -+ unsigned int uMajorVersion; -+ unsigned int uMinorVersion; -+ unsigned int uRevision; -+ -+}; -+ -+struct nwd_scan_conn_info { -+ unsigned int uScanIndex; -+ unsigned int uScanInfoLevel; -+ unsigned int uScanInfoLen; -+ unsigned int uScanConnInfoOffset; -+ unsigned int uScanFlags; -+ unsigned int uReturnInfoLevel; -+ unsigned int uReturnInfoLength; -+ unsigned int uConnectionReference; -+ unsigned int uReturnConnInfoOffset; -+ -+}; -+ -+struct nwd_get_pref_ds_tree { -+ unsigned int uTreeLength; -+ unsigned int DsTreeNameOffset; -+ -+}; -+ -+struct nwd_set_pref_ds_tree { -+ unsigned int uTreeLength; -+ unsigned int DsTreeNameOffset; -+ -+}; -+ -+struct nwd_set_def_name_ctx { -+ unsigned int uTreeLength; -+ unsigned int TreeOffset; -+ unsigned int uNameLength; -+ unsigned int NameContextOffset; -+ -+}; -+ -+struct nwd_get_def_name_ctx { -+ unsigned int uTreeLength; -+ unsigned int TreeOffset; -+ unsigned int uNameLength; -+ unsigned int NameContextOffset; -+ -+}; -+ -+struct nwd_get_tree_monitored_conn_ref { -+ struct nwd_string TreeName; -+ void *uConnReference; -+ -+}; -+ -+struct nwd_enum_ids { -+ unsigned int Iterator; -+ unsigned int domainNameLen; -+ unsigned int domainNameOffset; -+ unsigned int AuthType; -+ unsigned int objectNameLen; -+ unsigned int objectNameOffset; -+ unsigned int NameType; -+ unsigned short int ObjectType; -+ unsigned int IdentityFlags; -+ u32 AuthenticationId; -+ -+}; -+ -+struct nwd_change_key { -+ unsigned int domainNameOffset; -+ unsigned int domainNameLen; -+ unsigned int AuthType; -+ unsigned int objectNameOffset; -+ unsigned int objectNameLen; -+ unsigned int NameType; -+ unsigned short int ObjectType; -+ unsigned int verifyPasswordOffset; -+ unsigned int verifyPasswordLen; -+ unsigned int newPasswordOffset; -+ unsigned int newPasswordLen; -+ -+}; -+ -+struct nwd_set_primary_conn { -+ void *ConnHandle; -+ -+}; -+ -+struct nwd_get_bcast_notification { -+ unsigned int uMessageFlags; -+ void *uConnReference; -+ unsigned int messageLen; -+ char message[1]; -+ -+}; -+ -+struct nwd_set_conn_info { -+ void *ConnHandle; -+ unsigned int uInfoLevel; -+ unsigned int uInfoLength; -+ unsigned int offsetConnInfo; -+ -+}; -+ -+struct novfs_debug_request { -+ struct novfs_command_request_header Command; -+ int cmdlen; -+ char dbgcmd[1]; -+ -+}; -+ -+struct novfs_debug_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+struct nwd_set_key { -+ void *ConnHandle; -+ unsigned int AuthenticationId; -+ unsigned int objectNameLen; -+ unsigned int objectNameOffset; -+ unsigned short int ObjectType; -+ unsigned int newPasswordLen; -+ unsigned int newPasswordOffset; -+ -+}; -+ -+struct nwd_verify_key { -+ unsigned int AuthType; -+ unsigned int NameType; -+ unsigned short int ObjectType; -+ unsigned int domainNameLen; -+ unsigned int domainNameOffset; -+ unsigned int objectNameLen; -+ unsigned int objectNameOffset; -+ unsigned int verifyPasswordLen; -+ unsigned int verifyPasswordOffset; -+ -+}; -+ -+struct novfs_get_cache_flag { -+ struct novfs_command_request_header Command; -+ int pathLen; -+ unsigned char path[0]; -+ -+}; -+ -+struct novfs_get_cache_flag_reply { -+ struct novfs_command_reply_header Reply; -+ int CacheFlag; -+ -+}; -+ -+struct novfs_xa_list_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned char *pData; -+ -+}; -+ -+struct novfs_xa_get_request { -+ struct novfs_command_request_header Command; -+ unsigned int pathLen; -+ unsigned int nameLen; -+ unsigned char data[1]; //hold path, attribute name -+ -+}; -+ -+struct novfs_xa_get_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned char *pData; -+ -+}; -+ -+struct novfs_xa_set_request { -+ struct novfs_command_request_header Command; -+ unsigned int TtlWriteDataSize; -+ unsigned int WritePosition; -+ int flags; -+ unsigned int pathLen; -+ unsigned int nameLen; -+ unsigned int valueLen; -+ unsigned char data[1]; //hold path, attribute name, value data -+ -+}; -+ -+struct novfs_xa_set_reply { -+ struct novfs_command_reply_header Reply; -+ unsigned char *pData; -+ -+}; -+ -+struct novfs_set_file_lock_request { -+ struct novfs_command_request_header Command; -+ void *handle; -+ unsigned char fl_type; -+ loff_t fl_start; -+ loff_t fl_len; -+ -+}; -+ -+struct novfs_set_file_lock_reply { -+ struct novfs_command_reply_header Reply; -+ -+}; -+ -+ -+struct novfs_scope_list{ -+ struct list_head ScopeList; -+ struct novfs_schandle ScopeId; -+ struct novfs_schandle SessionId; -+ pid_t ScopePid; -+ struct task_struct *ScopeTask; -+ unsigned int ScopeHash; -+ uid_t ScopeUid; -+ uint64_t ScopeUSize; -+ uint64_t ScopeUFree; -+ uint64_t ScopeUTEnties; -+ uint64_t ScopeUAEnties; -+ int ScopeUserNameLength; -+ unsigned char ScopeUserName[32]; -+}; -+ -+#pragma pack(pop) -+ -+#endif /* __NOVFS_COMMANDS_H */ ---- /dev/null -+++ b/fs/novfs/daemon.c -@@ -0,0 +1,2085 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner -+ * -+ * This file contains all the functions necessary for sending commands to our -+ * daemon module. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "vfs.h" -+#include "nwcapi.h" -+#include "commands.h" -+#include "nwerror.h" -+ -+#define QUEUE_SENDING 0 -+#define QUEUE_WAITING 1 -+#define QUEUE_TIMEOUT 2 -+#define QUEUE_ACKED 3 -+#define QUEUE_DONE 4 -+ -+#define TIMEOUT_VALUE 10 -+ -+#define DH_TYPE_UNDEFINED 0 -+#define DH_TYPE_STREAM 1 -+#define DH_TYPE_CONNECTION 2 -+ -+struct daemon_queue { -+ struct list_head list; /* Must be first entry */ -+ spinlock_t lock; /* Used to control access to list */ -+ struct semaphore semaphore; /* Used to signal when data is available */ -+}; -+ -+struct daemon_cmd { -+ struct list_head list; /* Must be first entry */ -+ atomic_t reference; -+ unsigned int status; -+ unsigned int flags; -+ struct semaphore semaphore; -+ unsigned long sequence; -+ struct timer_list timer; -+ void *request; -+ unsigned long reqlen; -+ void *data; -+ int datalen; -+ void *reply; -+ unsigned long replen; -+}; -+ -+struct daemon_handle { -+ struct list_head list; -+ rwlock_t lock; -+ struct novfs_schandle session; -+}; -+ -+struct daemon_resource { -+ struct list_head list; -+ int type; -+ void *connection; -+ unsigned char handle[6]; -+ mode_t mode; -+ loff_t size; -+}; -+ -+struct drive_map { -+ struct list_head list; /* Must be first item */ -+ struct novfs_schandle session; -+ unsigned long hash; -+ int namelen; -+ char name[1]; -+}; -+ -+static void Queue_get(struct daemon_cmd * Que); -+static void Queue_put(struct daemon_cmd * Que); -+static void RemoveDriveMaps(void); -+static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle); -+static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle); -+static int set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); -+static int unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session); -+static int NwdGetMountPath(struct novfs_xplat *pdata); -+static long local_unlink(const char *pathname); -+ -+ -+/*===[ Global variables ]=================================================*/ -+static struct daemon_queue Daemon_Queue; -+ -+static DECLARE_WAIT_QUEUE_HEAD(Read_waitqueue); -+ -+static atomic_t Sequence = ATOMIC_INIT(-1); -+static atomic_t Daemon_Open_Count = ATOMIC_INIT(0); -+ -+static unsigned long Daemon_Command_Timeout = TIMEOUT_VALUE; -+ -+static DECLARE_MUTEX(DriveMapLock); -+static LIST_HEAD(DriveMapList); -+ -+int novfs_max_iosize = PAGE_SIZE; -+ -+void novfs_daemon_queue_init() -+{ -+ INIT_LIST_HEAD(&Daemon_Queue.list); -+ spin_lock_init(&Daemon_Queue.lock); -+ init_MUTEX_LOCKED(&Daemon_Queue.semaphore); -+} -+ -+void novfs_daemon_queue_exit(void) -+{ -+ /* Does nothing for now but we maybe should clear the queue. */ -+} -+ -+/*++======================================================================*/ -+static void novfs_daemon_timer(unsigned long data) -+{ -+ struct daemon_cmd *que = (struct daemon_cmd *) data; -+ -+ if (QUEUE_ACKED != que->status) { -+ que->status = QUEUE_TIMEOUT; -+ } -+ up(&que->semaphore); -+} -+ -+/*++======================================================================*/ -+int Queue_Daemon_Command(void *request, -+ unsigned long reqlen, -+ void *data, -+ int dlen, -+ void **reply, unsigned long * replen, int interruptible) -+{ -+ struct daemon_cmd *que; -+ int retCode = 0; -+ uint64_t ts1, ts2; -+ -+ ts1 = get_nanosecond_time(); -+ -+ DbgPrint("0x%p %d", request, reqlen); -+ -+ if (atomic_read(&Daemon_Open_Count)) { -+ -+ que = kmalloc(sizeof(*que), GFP_KERNEL); -+ -+ DbgPrint("que=0x%p", que); -+ if (que) { -+ atomic_set(&que->reference, 0); -+ que->status = QUEUE_SENDING; -+ que->flags = 0; -+ -+ init_MUTEX_LOCKED(&que->semaphore); -+ -+ que->sequence = atomic_inc_return(&Sequence); -+ -+ ((struct novfs_command_request_header *) request)->SequenceNumber = -+ que->sequence; -+ -+ /* -+ * Setup and start que timer -+ */ -+ init_timer(&que->timer); -+ que->timer.expires = jiffies + (HZ * Daemon_Command_Timeout); -+ que->timer.data = (unsigned long) que; -+ que->timer.function = novfs_daemon_timer; -+ add_timer(&que->timer); -+ -+ /* -+ * Setup request -+ */ -+ que->request = request; -+ que->reqlen = reqlen; -+ que->data = data; -+ que->datalen = dlen; -+ que->reply = NULL; -+ que->replen = 0; -+ -+ /* -+ * Added entry to queue. -+ */ -+ /* -+ * Check to see if interruptible and set flags. -+ */ -+ if (interruptible) { -+ que->flags |= INTERRUPTIBLE; -+ } -+ -+ Queue_get(que); -+ -+ spin_lock(&Daemon_Queue.lock); -+ list_add_tail(&que->list, &Daemon_Queue.list); -+ spin_unlock(&Daemon_Queue.lock); -+ -+ /* -+ * Signal that there is data to be read -+ */ -+ up(&Daemon_Queue.semaphore); -+ -+ /* -+ * Give a change to the other processes. -+ */ -+ yield(); -+ -+ /* -+ * Block waiting for reply or timeout -+ */ -+ down(&que->semaphore); -+ -+ if (QUEUE_ACKED == que->status) { -+ que->status = QUEUE_WAITING; -+ mod_timer(&que->timer, -+ jiffies + -+ (HZ * 2 * Daemon_Command_Timeout)); -+ if (interruptible) { -+ retCode = -+ down_interruptible(&que->semaphore); -+ } else { -+ down(&que->semaphore); -+ } -+ } -+ -+ /* -+ * Delete timer -+ */ -+ del_timer(&que->timer); -+ -+ /* -+ * Check for timeout -+ */ -+ if ((QUEUE_TIMEOUT == que->status) -+ && (NULL == que->reply)) { -+ DbgPrint("Timeout"); -+ retCode = -ETIME; -+ } -+ *reply = que->reply; -+ *replen = que->replen; -+ -+ /* -+ * Remove item from queue -+ */ -+ Queue_put(que); -+ -+ } else { /* Error case with no memory */ -+ -+ retCode = -ENOMEM; -+ *reply = NULL; -+ *replen = 0; -+ } -+ } else { -+ retCode = -EIO; -+ *reply = NULL; -+ *replen = 0; -+ -+ } -+ ts2 = get_nanosecond_time(); -+ ts2 = ts2 - ts1; -+ -+ DbgPrint("%llu retCode=%d", ts2, retCode); -+ return (retCode); -+} -+ -+static void Queue_get(struct daemon_cmd * Que) -+{ -+ DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference)); -+ atomic_inc(&Que->reference); -+} -+ -+static void Queue_put(struct daemon_cmd * Que) -+{ -+ -+ DbgPrint("que=0x%p %d", Que, atomic_read(&Que->reference)); -+ spin_lock(&Daemon_Queue.lock); -+ -+ if (atomic_dec_and_test(&Que->reference)) { -+ /* -+ * Remove item from queue -+ */ -+ list_del(&Que->list); -+ spin_unlock(&Daemon_Queue.lock); -+ -+ /* -+ * Free item memory -+ */ -+ kfree(Que); -+ } else { -+ spin_unlock(&Daemon_Queue.lock); -+ } -+} -+ -+struct daemon_cmd *get_next_queue(int Set_Queue_Waiting) -+{ -+ struct daemon_cmd *que; -+ -+ DbgPrint("que=0x%p", Daemon_Queue.list.next); -+ -+ spin_lock(&Daemon_Queue.lock); -+ que = (struct daemon_cmd *) Daemon_Queue.list.next; -+ -+ while (que && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) -+ && (que->status != QUEUE_SENDING)) { -+ que = (struct daemon_cmd *) que->list.next; -+ } -+ -+ if ((NULL == que) || (que == (struct daemon_cmd *) & Daemon_Queue.list) -+ || (que->status != QUEUE_SENDING)) { -+ que = NULL; -+ } else if (Set_Queue_Waiting) { -+ que->status = QUEUE_WAITING; -+ } -+ -+ if (que) { -+ atomic_inc(&que->reference); -+ } -+ -+ spin_unlock(&Daemon_Queue.lock); -+ -+ DbgPrint("return=0x%p", que); -+ return (que); -+} -+ -+static struct daemon_cmd *find_queue(unsigned long sequence) -+{ -+ struct daemon_cmd *que; -+ -+ DbgPrint("0x%x", sequence); -+ -+ spin_lock(&Daemon_Queue.lock); -+ que = (struct daemon_cmd *) Daemon_Queue.list.next; -+ -+ while (que && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) -+ && (que->sequence != sequence)) { -+ que = (struct daemon_cmd *) que->list.next; -+ } -+ -+ if ((NULL == que) -+ || (que == (struct daemon_cmd *) & Daemon_Queue.list.next) -+ || (que->sequence != sequence)) { -+ que = NULL; -+ } -+ -+ if (que) { -+ atomic_inc(&que->reference); -+ } -+ -+ spin_unlock(&Daemon_Queue.lock); -+ -+ DbgPrint("return 0x%p", que); -+ return (que); -+} -+ -+int novfs_daemon_open_control(struct inode *Inode, struct file *File) -+{ -+ DbgPrint("pid=%d Count=%d", current->pid, -+ atomic_read(&Daemon_Open_Count)); -+ atomic_inc(&Daemon_Open_Count); -+ -+ return (0); -+} -+ -+int novfs_daemon_close_control(struct inode *Inode, struct file *File) -+{ -+ struct daemon_cmd *que; -+ -+ DbgPrint("pid=%d Count=%d", current->pid, -+ atomic_read(&Daemon_Open_Count)); -+ -+ if (atomic_dec_and_test(&Daemon_Open_Count)) { -+ /* -+ * Signal any pending que itmes. -+ */ -+ -+ spin_lock(&Daemon_Queue.lock); -+ que = (struct daemon_cmd *) Daemon_Queue.list.next; -+ -+ while (que -+ && (que != (struct daemon_cmd *) & Daemon_Queue.list.next) -+ && (que->status != QUEUE_DONE)) { -+ que->status = QUEUE_TIMEOUT; -+ up(&que->semaphore); -+ -+ que = (struct daemon_cmd *) que->list.next; -+ } -+ spin_unlock(&Daemon_Queue.lock); -+ -+ RemoveDriveMaps(); -+ -+ novfs_scope_cleanup(); -+ } -+ -+ return (0); -+} -+ -+ssize_t novfs_daemon_cmd_send(struct file * file, char *buf, size_t len, loff_t * off) -+{ -+ struct daemon_cmd *que; -+ size_t retValue = 0; -+ int Finished = 0; -+ struct novfs_data_list *dlist; -+ int i, dcnt, bcnt, ccnt, error; -+ char *vadr; -+ unsigned long cpylen; -+ -+ DbgPrint("%u %lld", len, *off); -+ if (len > novfs_max_iosize) { -+ novfs_max_iosize = len; -+ } -+ -+ while (!Finished) { -+ que = get_next_queue(1); -+ DbgPrint("0x%p", que); -+ if (que) { -+ retValue = que->reqlen; -+ if (retValue > len) { -+ retValue = len; -+ } -+ if (retValue > 0x80) -+ novfs_dump(0x80, que->request); -+ else -+ novfs_dump(retValue, que->request); -+ -+ cpylen = copy_to_user(buf, que->request, retValue); -+ if (que->datalen && (retValue < len)) { -+ buf += retValue; -+ dlist = que->data; -+ dcnt = que->datalen; -+ for (i = 0; i < dcnt; i++, dlist++) { -+ if (DLREAD == dlist->rwflag) { -+ bcnt = dlist->len; -+ DbgPrint("page=0x%p " -+ "offset=0x%p len=%d", -+ i, dlist->page, -+ dlist->offset, dlist->len); -+ if ((bcnt + retValue) <= len) { -+ void *km_adr = NULL; -+ -+ if (dlist->page) { -+ km_adr = -+ kmap(dlist-> -+ page); -+ vadr = km_adr; -+ vadr += -+ (unsigned long) -+ dlist-> -+ offset; -+ } else { -+ vadr = -+ dlist-> -+ offset; -+ } -+ -+ ccnt = -+ copy_to_user(buf, -+ vadr, -+ bcnt); -+ -+ DbgPrint("Copy %d from 0x%p to 0x%p.", -+ bcnt, vadr, buf); -+ if (bcnt > 0x80) -+ novfs_dump(0x80, -+ vadr); -+ else -+ novfs_dump(bcnt, -+ vadr); -+ -+ if (km_adr) { -+ kunmap(dlist-> -+ page); -+ } -+ -+ retValue += bcnt; -+ buf += bcnt; -+ } else { -+ break; -+ } -+ } -+ } -+ } -+ Queue_put(que); -+ break; -+ } -+ -+ if (O_NONBLOCK & file->f_flags) { -+ retValue = -EAGAIN; -+ break; -+ } else { -+ if ((error = -+ down_interruptible(&Daemon_Queue.semaphore))) { -+ DbgPrint("after down_interruptible error...%d", -+ error); -+ retValue = -EINTR; -+ break; -+ } -+ DbgPrint("after down_interruptible"); -+ } -+ } -+ -+ *off = *off; -+ -+ DbgPrint("return 0x%x", retValue); -+ -+ return (retValue); -+} -+ -+ssize_t novfs_daemon_recv_reply(struct file *file, const char *buf, size_t nbytes, loff_t * ppos) -+{ -+ struct daemon_cmd *que; -+ size_t retValue = 0; -+ void *reply; -+ unsigned long sequence, cpylen; -+ -+ struct novfs_data_list *dlist; -+ char *vadr; -+ int i; -+ -+ DbgPrint("buf=0x%p nbytes=%d ppos=%llx", buf, -+ nbytes, *ppos); -+ -+ /* -+ * Get sequence number from reply buffer -+ */ -+ -+ cpylen = copy_from_user(&sequence, buf, sizeof(sequence)); -+ -+ /* -+ * Find item based on sequence number -+ */ -+ que = find_queue(sequence); -+ -+ DbgPrint("0x%x 0x%p %d", sequence, que, nbytes); -+ if (que) { -+ do { -+ retValue = nbytes; -+ /* -+ * Ack packet from novfsd. Remove timer and -+ * return -+ */ -+ if (nbytes == sizeof(sequence)) { -+ que->status = QUEUE_ACKED; -+ break; -+ } -+ -+ if (NULL != (dlist = que->data)) { -+ int thiscopy, left = nbytes; -+ retValue = 0; -+ -+ DbgPrint("dlist=0x%p count=%d", -+ dlist, que->datalen); -+ for (i = 0; -+ (i < que->datalen) && (retValue < nbytes); -+ i++, dlist++) { -+ __DbgPrint("\n" -+ " dlist[%d].page: 0x%p\n" -+ " dlist[%d].offset: 0x%p\n" -+ " dlist[%d].len: 0x%x\n" -+ " dlist[%d].rwflag: 0x%x\n", -+ i, dlist->page, i, -+ dlist->offset, i, dlist->len, -+ i, dlist->rwflag); -+ -+ if (DLWRITE == dlist->rwflag) { -+ void *km_adr = NULL; -+ -+ if (dlist->page) { -+ km_adr = -+ kmap(dlist->page); -+ vadr = km_adr; -+ vadr += -+ (unsigned long) dlist-> -+ offset; -+ } else { -+ vadr = dlist->offset; -+ } -+ -+ thiscopy = dlist->len; -+ if (thiscopy > left) { -+ thiscopy = left; -+ dlist->len = left; -+ } -+ cpylen = -+ copy_from_user(vadr, buf, -+ thiscopy); -+ -+ if (thiscopy > 0x80) -+ novfs_dump(0x80, vadr); -+ else -+ novfs_dump(thiscopy, vadr); -+ -+ if (km_adr) { -+ kunmap(dlist->page); -+ } -+ -+ left -= thiscopy; -+ retValue += thiscopy; -+ buf += thiscopy; -+ } -+ } -+ que->replen = retValue; -+ } else { -+ reply = kmalloc(nbytes, GFP_KERNEL); -+ DbgPrint("reply=0x%p", reply); -+ if (reply) { -+ retValue = nbytes; -+ que->reply = reply; -+ que->replen = nbytes; -+ -+ retValue -= -+ copy_from_user(reply, buf, -+ retValue); -+ if (retValue > 0x80) -+ novfs_dump(0x80, reply); -+ else -+ novfs_dump(retValue, reply); -+ -+ } else { -+ retValue = -ENOMEM; -+ } -+ } -+ -+ /* -+ * Set status that packet is done. -+ */ -+ que->status = QUEUE_DONE; -+ -+ } while (0); -+ up(&que->semaphore); -+ Queue_put(que); -+ } -+ -+ DbgPrint("return 0x%x", retValue); -+ -+ return (retValue); -+} -+ -+int novfs_do_login(struct ncl_string *Server, struct ncl_string *Username, -+struct ncl_string *Password, void **lgnId, struct novfs_schandle *Session) -+{ -+ struct novfs_login_user_request *cmd; -+ struct novfs_login_user_reply *reply; -+ unsigned long replylen = 0; -+ int retCode, cmdlen, datalen; -+ unsigned char *data; -+ -+ datalen = Server->len + Username->len + Password->len; -+ cmdlen = sizeof(*cmd) + datalen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ data = (unsigned char *) cmd + sizeof(*cmd); -+ cmd->Command.CommandType = VFS_COMMAND_LOGIN_USER; -+ cmd->Command.SequenceNumber = 0; -+ memcpy(&cmd->Command.SessionId, Session, sizeof(*Session)); -+ -+ cmd->srvNameType = Server->type; -+ cmd->serverLength = Server->len; -+ cmd->serverOffset = (unsigned long) (data - (unsigned char *) cmd); -+ memcpy(data, Server->buffer, Server->len); -+ data += Server->len; -+ -+ cmd->usrNameType = Username->type; -+ cmd->userNameLength = Username->len; -+ cmd->userNameOffset = (unsigned long) (data - (unsigned char *) cmd); -+ memcpy(data, Username->buffer, Username->len); -+ data += Username->len; -+ -+ cmd->pwdNameType = Password->type; -+ cmd->passwordLength = Password->len; -+ cmd->passwordOffset = (unsigned long) (data - (unsigned char *) cmd); -+ memcpy(data, Password->buffer, Password->len); -+ data += Password->len; -+ -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (reply->Reply.ErrorCode) { -+ retCode = reply->Reply.ErrorCode; -+ } else { -+ retCode = 0; -+ if (lgnId) { -+ *lgnId = reply->loginIdentity; -+ } -+ } -+ kfree(reply); -+ } -+ memset(cmd, 0, cmdlen); -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_daemon_logout(struct qstr *Server, struct novfs_schandle *Session) -+{ -+ struct novfs_logout_request *cmd; -+ struct novfs_logout_reply *reply; -+ unsigned long replylen = 0; -+ int retCode, cmdlen; -+ -+ cmdlen = offsetof(struct novfs_logout_request, Name) + Server->len; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_LOGOUT_USER; -+ cmd->Command.SequenceNumber = 0; -+ memcpy(&cmd->Command.SessionId, Session, sizeof(*Session)); -+ cmd->length = Server->len; -+ memcpy(cmd->Name, Server->name, Server->len); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_daemon_getpwuid(uid_t uid, int unamelen, char *uname) -+{ -+ struct novfs_getpwuid_request cmd; -+ struct novfs_getpwuid_reply *reply; -+ unsigned long replylen = 0; -+ int retCode; -+ -+ cmd.Command.CommandType = VFS_COMMAND_GETPWUD; -+ cmd.Command.SequenceNumber = 0; -+ SC_INITIALIZE(cmd.Command.SessionId); -+ cmd.uid = uid; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } else { -+ retCode = 0; -+ memset(uname, 0, unamelen); -+ replylen = -+ replylen - offsetof(struct -+ novfs_getpwuid_reply, UserName); -+ if (replylen) { -+ if (replylen > unamelen) { -+ retCode = -EINVAL; -+ replylen = unamelen - 1; -+ } -+ memcpy(uname, reply->UserName, replylen); -+ } -+ } -+ kfree(reply); -+ } -+ return (retCode); -+ -+} -+ -+int novfs_daemon_getversion(char *Buf, int length) -+{ -+ struct novfs_get_version_request cmd; -+ struct novfs_get_version_reply *reply; -+ unsigned long replylen = 0; -+ int retVal = 0; -+ -+ cmd.Command.CommandType = VFS_COMMAND_GET_VERSION; -+ cmd.Command.SequenceNumber = 0; -+ SC_INITIALIZE(cmd.Command.SessionId); -+ -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (reply->Reply.ErrorCode) { -+ retVal = -EIO; -+ } else { -+ retVal = -+ replylen - offsetof(struct -+ novfs_get_version_reply, Version); -+ if (retVal < length) { -+ memcpy(Buf, reply->Version, retVal); -+ Buf[retVal] = '\0'; -+ } -+ } -+ kfree(reply); -+ } -+ return (retVal); -+ -+} -+ -+static int daemon_login(struct novfs_login *Login, struct novfs_schandle *Session) -+{ -+ int retCode = -ENOMEM; -+ struct novfs_login lLogin; -+ struct ncl_string server; -+ struct ncl_string username; -+ struct ncl_string password; -+ -+ if (!copy_from_user(&lLogin, Login, sizeof(lLogin))) { -+ server.buffer = kmalloc(lLogin.Server.length, GFP_KERNEL); -+ if (server.buffer) { -+ server.len = lLogin.Server.length; -+ server.type = NWC_STRING_TYPE_ASCII; -+ if (!copy_from_user((void *)server.buffer, lLogin.Server.data, server.len)) { -+ username.buffer = kmalloc(lLogin.UserName.length, GFP_KERNEL); -+ if (username.buffer) { -+ username.len = lLogin.UserName.length; -+ username.type = NWC_STRING_TYPE_ASCII; -+ if (!copy_from_user((void *)username.buffer, lLogin.UserName.data, username.len)) { -+ password.buffer = kmalloc(lLogin.Password.length, GFP_KERNEL); -+ if (password.buffer) -+ { -+ password.len = lLogin.Password.length; -+ password.type = NWC_STRING_TYPE_ASCII; -+ if (!copy_from_user((void *)password.buffer, lLogin.Password.data, password.len)) { -+ retCode = novfs_do_login (&server, &username, &password, NULL, Session); -+ if (!retCode) { -+ char *username; -+ username = novfs_scope_get_username(); -+ if (username) { -+ novfs_add_to_root(username); -+ } -+ } -+ } -+ kfree(password.buffer); -+ } -+ } -+ kfree(username.buffer); -+ } -+ } -+ kfree(server.buffer); -+ } -+ } -+ -+ return (retCode); -+} -+ -+static int daemon_logout(struct novfs_logout *Logout, struct novfs_schandle *Session) -+{ -+ struct novfs_logout lLogout; -+ struct qstr server; -+ int retCode = 0; -+ -+ if (copy_from_user(&lLogout, Logout, sizeof(lLogout))) -+ return -EFAULT; -+ server.name = kmalloc(lLogout.Server.length, GFP_KERNEL); -+ if (!server.name) -+ return -ENOMEM; -+ server.len = lLogout.Server.length; -+ if (copy_from_user((void *)server.name, lLogout.Server.data, server.len)) -+ goto exit; -+ retCode = novfs_daemon_logout(&server, Session); -+exit: -+ kfree(server.name); -+ return (retCode); -+} -+ -+int novfs_daemon_create_sessionId(struct novfs_schandle * SessionId) -+{ -+ struct novfs_create_context_request cmd; -+ struct novfs_create_context_reply *reply; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ -+ DbgPrint("%d", current->pid); -+ -+ cmd.Command.CommandType = VFS_COMMAND_CREATE_CONTEXT; -+ cmd.Command.SequenceNumber = 0; -+ SC_INITIALIZE(cmd.Command.SessionId); -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (!reply->Reply.ErrorCode -+ && replylen > sizeof(struct novfs_command_reply_header)) { -+ *SessionId = reply->SessionId; -+ retCode = 0; -+ } else { -+ SessionId->hTypeId = 0; -+ SessionId->hId = 0; -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ DbgPrint("SessionId=0x%llx", *SessionId); -+ return (retCode); -+} -+ -+int novfs_daemon_destroy_sessionId(struct novfs_schandle SessionId) -+{ -+ struct novfs_destroy_context_request cmd; -+ struct novfs_destroy_context_reply *reply; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ -+ DbgPrint("0x%p:%p", SessionId.hTypeId, -+ SessionId.hId); -+ -+ cmd.Command.CommandType = VFS_COMMAND_DESTROY_CONTEXT; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (!reply->Reply.ErrorCode) { -+ struct drive_map *dm; -+ struct list_head *list; -+ -+ retCode = 0; -+ -+ /* -+ * When destroying the session check to see if there are any -+ * mapped drives. If there are then remove them. -+ */ -+ down(&DriveMapLock); -+ list_for_each(list, &DriveMapList) { -+ dm = list_entry(list, struct drive_map, list); -+ if (SC_EQUAL(SessionId, dm->session)) { -+ local_unlink(dm->name); -+ list = list->prev; -+ list_del(&dm->list); -+ kfree(dm); -+ } -+ -+ } -+ up(&DriveMapLock); -+ -+ } else { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ return (retCode); -+} -+ -+int novfs_daemon_get_userspace(struct novfs_schandle SessionId, uint64_t * TotalSize, -+ uint64_t * Free, uint64_t * TotalEnties, -+ uint64_t * FreeEnties) -+{ -+ struct novfs_get_user_space cmd; -+ struct novfs_get_user_space_reply *reply; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ -+ DbgPrint("0x%p:%p", SessionId.hTypeId, -+ SessionId.hId); -+ -+ cmd.Command.CommandType = VFS_COMMAND_GET_USER_SPACE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (!reply->Reply.ErrorCode) { -+ -+ __DbgPrint("TotalSpace: %llu\n", reply->TotalSpace); -+ __DbgPrint("FreeSpace: %llu\n", reply->FreeSpace); -+ __DbgPrint("TotalEnties: %llu\n", reply->TotalEnties); -+ __DbgPrint("FreeEnties: %llu\n", reply->FreeEnties); -+ -+ if (TotalSize) -+ *TotalSize = reply->TotalSpace; -+ if (Free) -+ *Free = reply->FreeSpace; -+ if (TotalEnties) -+ *TotalEnties = reply->TotalEnties; -+ if (FreeEnties) -+ *FreeEnties = reply->FreeEnties; -+ retCode = 0; -+ } else { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ return (retCode); -+} -+ -+int novfs_daemon_set_mnt_point(char *Path) -+{ -+ struct novfs_set_mount_path *cmd; -+ struct novfs_set_mount_path_reply *reply; -+ unsigned long replylen, cmdlen; -+ int retCode = -ENOMEM; -+ -+ DbgPrint("%s", Path); -+ -+ replylen = strlen(Path); -+ -+ cmdlen = sizeof(struct novfs_set_mount_path) + replylen; -+ -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_SET_MOUNT_PATH; -+ cmd->Command.SequenceNumber = 0; -+ SC_INITIALIZE(cmd->Command.SessionId); -+ cmd->PathLength = replylen; -+ -+ strcpy(cmd->Path, Path); -+ -+ replylen = 0; -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (!reply->Reply.ErrorCode) { -+ retCode = 0; -+ } else { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ return retCode; -+} -+ -+int novfs_daemon_debug_cmd_send(char *Command) -+{ -+ struct novfs_debug_request cmd; -+ struct novfs_debug_reply *reply; -+ struct novfs_debug_reply lreply; -+ unsigned long replylen, cmdlen; -+ struct novfs_data_list dlist[2]; -+ -+ int retCode = -ENOMEM; -+ -+ DbgPrint("%s", Command); -+ -+ dlist[0].page = NULL; -+ dlist[0].offset = (char *)Command; -+ dlist[0].len = strlen(Command); -+ dlist[0].rwflag = DLREAD; -+ -+ dlist[1].page = NULL; -+ dlist[1].offset = (char *)&lreply; -+ dlist[1].len = sizeof(lreply); -+ dlist[1].rwflag = DLWRITE; -+ -+ cmdlen = offsetof(struct novfs_debug_request, dbgcmd); -+ -+ cmd.Command.CommandType = VFS_COMMAND_DBG; -+ cmd.Command.SequenceNumber = 0; -+ SC_INITIALIZE(cmd.Command.SessionId); -+ cmd.cmdlen = strlen(Command); -+ -+ replylen = 0; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ kfree(reply); -+ } -+ if (0 == retCode) { -+ retCode = lreply.Reply.ErrorCode; -+ } -+ -+ return (retCode); -+} -+ -+int novfs_daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ int retCode = -ENOSYS; -+ unsigned long cpylen; -+ struct novfs_schandle session_id; -+ session_id = novfs_scope_get_sessionId(NULL); -+ -+ switch (cmd) { -+ case IOC_LOGIN: -+ retCode = daemon_login((struct novfs_login *) arg, &session_id); -+ break; -+ -+ case IOC_LOGOUT: -+ retCode = daemon_logout((struct novfs_logout *)arg, &session_id); -+ break; -+ case IOC_DEBUGPRINT: -+ { -+ struct Ioctl_Debug { -+ int length; -+ char *data; -+ } io; -+ char *buf; -+ io.length = 0; -+ cpylen = copy_from_user(&io, (char *)arg, sizeof(io)); -+ if (io.length) { -+ buf = kmalloc(io.length + 1, GFP_KERNEL); -+ if (buf) { -+ buf[0] = 0; -+ cpylen = -+ copy_from_user(buf, io.data, -+ io.length); -+ buf[io.length] = '\0'; -+ DbgPrint("%s", buf); -+ kfree(buf); -+ retCode = 0; -+ } -+ } -+ break; -+ } -+ -+ case IOC_XPLAT: -+ { -+ struct novfs_xplat data; -+ -+ cpylen = -+ copy_from_user(&data, (void *)arg, sizeof(data)); -+ retCode = ((data.xfunction & 0x0000FFFF) | 0xCC000000); -+ -+ switch (data.xfunction) { -+ case NWC_GET_MOUNT_PATH: -+ DbgPrint("Call NwdGetMountPath"); -+ retCode = NwdGetMountPath(&data); -+ break; -+ } -+ -+ DbgPrint("[NOVFS XPLAT] status Code = %X\n", retCode); -+ break; -+ } -+ -+ } -+ return (retCode); -+} -+ -+static int daemon_added_resource(struct daemon_handle * DHandle, int Type, void *CHandle, -+ unsigned char * FHandle, unsigned long Mode, u_long Size) -+{ -+ struct daemon_resource *resource; -+ -+ if (FHandle) -+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x " -+ "Mode=0x%x Size=%d", DHandle, Type, CHandle, -+ *(u32 *) & FHandle[2], Mode, Size); -+ else -+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p\n", -+ DHandle, Type, CHandle); -+ -+ resource = kmalloc(sizeof(struct daemon_resource), GFP_KERNEL); -+ if (!resource) -+ return -ENOMEM; -+ -+ resource->type = Type; -+ resource->connection = CHandle; -+ if (FHandle) -+ memcpy(resource->handle, FHandle, -+ sizeof(resource->handle)); -+ else -+ memset(resource->handle, 0, sizeof(resource->handle)); -+ resource->mode = Mode; -+ resource->size = Size; -+ write_lock(&DHandle->lock); -+ list_add(&resource->list, &DHandle->list); -+ write_unlock(&DHandle->lock); -+ DbgPrint("Adding resource=0x%p", resource); -+ return 0; -+} -+ -+static int daemon_remove_resource(struct daemon_handle * DHandle, int Type, void *CHandle, -+ unsigned long FHandle) -+{ -+ struct daemon_resource *resource; -+ struct list_head *l; -+ int retVal = -ENOMEM; -+ -+ DbgPrint("DHandle=0x%p Type=%d CHandle=0x%p FHandle=0x%x", -+ DHandle, Type, CHandle, FHandle); -+ -+ write_lock(&DHandle->lock); -+ -+ list_for_each(l, &DHandle->list) { -+ resource = list_entry(l, struct daemon_resource, list); -+ -+ if ((Type == resource->type) && -+ (resource->connection == CHandle)) { -+ DbgPrint("Found resource=0x%p", resource); -+ l = l->prev; -+ list_del(&resource->list); -+ kfree(resource); -+ break; -+ } -+ } -+ -+ write_unlock(&DHandle->lock); -+ -+ return (retVal); -+} -+ -+int novfs_daemon_lib_open(struct inode *inode, struct file *file) -+{ -+ struct daemon_handle *dh; -+ -+ DbgPrint("inode=0x%p file=0x%p", inode, file); -+ dh = kmalloc(sizeof(struct daemon_handle), GFP_KERNEL); -+ if (!dh) -+ return -ENOMEM; -+ file->private_data = dh; -+ INIT_LIST_HEAD(&dh->list); -+ rwlock_init(&dh->lock); -+ dh->session = novfs_scope_get_sessionId(NULL); -+ return 0; -+} -+ -+int novfs_daemon_lib_close(struct inode *inode, struct file *file) -+{ -+ struct daemon_handle *dh; -+ struct daemon_resource *resource; -+ struct list_head *l; -+ -+ char commanddata[sizeof(struct novfs_xplat_call_request) + sizeof(struct nwd_close_conn)]; -+ struct novfs_xplat_call_request *cmd; -+ struct xplat_call_reply *reply; -+ struct nwd_close_conn *nwdClose; -+ unsigned long cmdlen, replylen; -+ -+ DbgPrint("inode=0x%p file=0x%p", inode, file); -+ if (file->private_data) { -+ dh = (struct daemon_handle *) file->private_data; -+ -+ list_for_each(l, &dh->list) { -+ resource = list_entry(l, struct daemon_resource, list); -+ -+ if (DH_TYPE_STREAM == resource->type) { -+ novfs_close_stream(resource->connection, -+ resource->handle, -+ dh->session); -+ } else if (DH_TYPE_CONNECTION == resource->type) { -+ cmd = (struct novfs_xplat_call_request *) commanddata; -+ cmdlen = -+ offsetof(struct novfs_xplat_call_request, -+ data) + sizeof(struct nwd_close_conn); -+ cmd->Command.CommandType = -+ VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = dh->session; -+ cmd->NwcCommand = NWC_CLOSE_CONN; -+ -+ cmd->dataLen = sizeof(struct nwd_close_conn); -+ nwdClose = (struct nwd_close_conn *) cmd->data; -+ nwdClose->ConnHandle = -+ (void *) resource->connection; -+ -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, -+ 0, (void **)&reply, -+ &replylen, 0); -+ if (reply) -+ kfree(reply); -+ } -+ l = l->prev; -+ list_del(&resource->list); -+ kfree(resource); -+ } -+ kfree(dh); -+ file->private_data = NULL; -+ } -+ -+ return (0); -+} -+ -+ssize_t novfs_daemon_lib_read(struct file * file, char *buf, size_t len, -+ loff_t * off) -+{ -+ struct daemon_handle *dh; -+ struct daemon_resource *resource; -+ -+ size_t thisread, totalread = 0; -+ loff_t offset = *off; -+ -+ DbgPrint("file=0x%p len=%d off=%lld", file, len, *off); -+ -+ if (file->private_data) { -+ dh = file->private_data; -+ read_lock(&dh->lock); -+ if (&dh->list != dh->list.next) { -+ resource = -+ list_entry(dh->list.next, struct daemon_resource, list); -+ -+ if (DH_TYPE_STREAM == resource->type) { -+ while (len > 0 && (offset < resource->size)) { -+ thisread = len; -+ if (novfs_read_stream -+ (resource->connection, -+ resource->handle, buf, &thisread, -+ &offset, 1, dh->session) -+ || !thisread) { -+ break; -+ } -+ len -= thisread; -+ buf += thisread; -+ offset += thisread; -+ totalread += thisread; -+ } -+ } -+ } -+ read_unlock(&dh->lock); -+ } -+ *off = offset; -+ DbgPrint("return = 0x%x", totalread); -+ return (totalread); -+} -+ -+ssize_t novfs_daemon_lib_write(struct file * file, const char *buf, size_t len, -+ loff_t * off) -+{ -+ struct daemon_handle *dh; -+ struct daemon_resource *resource; -+ -+ size_t thiswrite, totalwrite = -EINVAL; -+ loff_t offset = *off; -+ int status; -+ -+ DbgPrint("file=0x%p len=%d off=%lld", file, len, *off); -+ -+ if (file->private_data) { -+ dh = file->private_data; -+ write_lock(&dh->lock); -+ if (&dh->list != dh->list.next) { -+ resource = -+ list_entry(dh->list.next, struct daemon_resource, list); -+ -+ if ((DH_TYPE_STREAM == resource->type) && (len >= 0)) { -+ totalwrite = 0; -+ do { -+ thiswrite = len; -+ status = -+ novfs_write_stream(resource-> -+ connection, -+ resource->handle, -+ (void *)buf, -+ &thiswrite, -+ &offset, -+ dh->session); -+ if (status || !thiswrite) { -+ /* -+ * If len is zero then the file will have just been -+ * truncated to offset. Update size. -+ */ -+ if (!status && !len) { -+ resource->size = offset; -+ } -+ totalwrite = status; -+ break; -+ } -+ len -= thiswrite; -+ buf += thiswrite; -+ offset += thiswrite; -+ totalwrite += thiswrite; -+ if (offset > resource->size) { -+ resource->size = offset; -+ } -+ } while (len > 0); -+ } -+ } -+ write_unlock(&dh->lock); -+ } -+ *off = offset; -+ DbgPrint("return = 0x%x", totalwrite); -+ -+ return (totalwrite); -+} -+ -+loff_t novfs_daemon_lib_llseek(struct file * file, loff_t offset, int origin) -+{ -+ struct daemon_handle *dh; -+ struct daemon_resource *resource; -+ -+ loff_t retVal = -EINVAL; -+ -+ DbgPrint("file=0x%p offset=%lld origin=%d", file, offset, origin); -+ -+ if (file->private_data) { -+ dh = file->private_data; -+ read_lock(&dh->lock); -+ if (&dh->list != dh->list.next) { -+ resource = -+ list_entry(dh->list.next, struct daemon_resource, list); -+ -+ if (DH_TYPE_STREAM == resource->type) { -+ switch (origin) { -+ case 2: -+ offset += resource->size; -+ break; -+ case 1: -+ offset += file->f_pos; -+ } -+ if (offset >= 0) { -+ if (offset != file->f_pos) { -+ file->f_pos = offset; -+ file->f_version = 0; -+ } -+ retVal = offset; -+ } -+ } -+ } -+ read_unlock(&dh->lock); -+ } -+ -+ DbgPrint("ret %lld", retVal); -+ -+ return retVal; -+} -+ -+#define DbgIocCall(str) __DbgPrint("[VFS XPLAT] Call " str "\n") -+ -+int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ int retCode = -ENOSYS; -+ struct daemon_handle *dh; -+ void *handle = NULL; -+ unsigned long cpylen; -+ -+ dh = file->private_data; -+ -+ DbgPrint("file=0x%p 0x%x 0x%p dh=0x%p", file, cmd, arg, dh); -+ -+ if (dh) { -+ -+ switch (cmd) { -+ case IOC_LOGIN: -+ retCode = daemon_login((struct novfs_login *)arg, &dh->session); -+ break; -+ -+ case IOC_LOGOUT: -+ retCode = daemon_logout((struct novfs_logout *)arg, &dh->session); -+ break; -+ -+ case IOC_DEBUGPRINT: -+ { -+ struct Ioctl_Debug { -+ int length; -+ char *data; -+ } io; -+ char *buf; -+ io.length = 0; -+ cpylen = -+ copy_from_user(&io, (void *)arg, -+ sizeof(io)); -+ if (io.length) { -+ buf = -+ kmalloc(io.length + 1, -+ GFP_KERNEL); -+ if (buf) { -+ buf[0] = 0; -+ cpylen = -+ copy_from_user(buf, io.data, -+ io.length); -+ buf[io.length] = '\0'; -+ __DbgPrint("%s", buf); -+ kfree(buf); -+ retCode = 0; -+ } -+ } -+ break; -+ } -+ -+ case IOC_XPLAT: -+ { -+ struct novfs_xplat data; -+ -+ cpylen = -+ copy_from_user(&data, (void *)arg, -+ sizeof(data)); -+ retCode = -+ ((data. -+ xfunction & 0x0000FFFF) | 0xCC000000); -+ -+ switch (data.xfunction) { -+ case NWC_OPEN_CONN_BY_NAME: -+ DbgIocCall("NwOpenConnByName"); -+ retCode = -+ novfs_open_conn_by_name(&data, -+ &handle, dh->session); -+ if (!retCode) -+ daemon_added_resource(dh, -+ DH_TYPE_CONNECTION,handle, 0, 0, 0); -+ break; -+ -+ case NWC_OPEN_CONN_BY_ADDRESS: -+ DbgIocCall("NwOpenConnByAddress"); -+ retCode = -+ novfs_open_conn_by_addr(&data, &handle, -+ dh->session); -+ if (!retCode) -+ daemon_added_resource(dh, -+ DH_TYPE_CONNECTION, -+ handle, 0, -+ 0, 0); -+ break; -+ -+ case NWC_OPEN_CONN_BY_REFERENCE: -+ -+ DbgIocCall("NwOpenConnByReference"); -+ retCode = -+ novfs_open_conn_by_ref(&data, &handle, -+ dh->session); -+ if (!retCode) -+ daemon_added_resource(dh, -+ DH_TYPE_CONNECTION, -+ handle, 0, -+ 0, 0); -+ break; -+ -+ case NWC_SYS_CLOSE_CONN: -+ DbgIocCall("NwSysCloseConn"); -+ retCode = -+ novfs_sys_conn_close(&data, (unsigned long *)&handle, dh->session); -+ daemon_remove_resource(dh, DH_TYPE_CONNECTION, handle, 0); -+ break; -+ -+ case NWC_CLOSE_CONN: -+ DbgIocCall("NwCloseConn"); -+ retCode = -+ novfs_conn_close(&data, &handle, -+ dh->session); -+ daemon_remove_resource(dh, -+ DH_TYPE_CONNECTION, -+ handle, 0); -+ break; -+ -+ case NWC_LOGIN_IDENTITY: -+ DbgIocCall("" -+ "NwLoginIdentity"); -+ retCode = -+ novfs_login_id(&data, dh->session); -+ break; -+ -+ case NWC_RAW_NCP_REQUEST: -+ DbgIocCall("[VFS XPLAT] Send Raw " -+ "NCP Request"); -+ retCode = novfs_raw_send(&data, dh->session); -+ break; -+ -+ case NWC_AUTHENTICATE_CONN_WITH_ID: -+ DbgIocCall("[VFS XPLAT] Authenticate " -+ "Conn With ID"); -+ retCode = -+ novfs_auth_conn(&data, -+ dh->session); -+ break; -+ -+ case NWC_UNAUTHENTICATE_CONN: -+ DbgIocCall("[VFS XPLAT] UnAuthenticate " -+ "Conn With ID"); -+ retCode = -+ novfs_unauthenticate(&data, -+ dh->session); -+ break; -+ -+ case NWC_LICENSE_CONN: -+ DbgIocCall("Call NwLicenseConn"); -+ retCode = -+ novfs_license_conn(&data, dh->session); -+ break; -+ -+ case NWC_LOGOUT_IDENTITY: -+ DbgIocCall("NwLogoutIdentity"); -+ retCode = -+ novfs_logout_id(&data, -+ dh->session); -+ break; -+ -+ case NWC_UNLICENSE_CONN: -+ DbgIocCall("NwUnlicense"); -+ retCode = -+ novfs_unlicense_conn(&data, dh->session); -+ break; -+ -+ case NWC_GET_CONN_INFO: -+ DbgIocCall("NwGetConnInfo"); -+ retCode = -+ novfs_get_conn_info(&data, dh->session); -+ break; -+ -+ case NWC_SET_CONN_INFO: -+ DbgIocCall("NwGetConnInfo"); -+ retCode = -+ novfs_set_conn_info(&data, dh->session); -+ break; -+ -+ case NWC_SCAN_CONN_INFO: -+ DbgIocCall("NwScanConnInfo"); -+ retCode = -+ novfs_scan_conn_info(&data, dh->session); -+ break; -+ -+ case NWC_GET_IDENTITY_INFO: -+ DbgIocCall("NwGetIdentityInfo"); -+ retCode = -+ novfs_get_id_info(&data, -+ dh->session); -+ break; -+ -+ case NWC_GET_REQUESTER_VERSION: -+ DbgIocCall("NwGetDaemonVersion"); -+ retCode = -+ novfs_get_daemon_ver(&data, -+ dh->session); -+ break; -+ -+ case NWC_GET_PREFERRED_DS_TREE: -+ DbgIocCall("NwcGetPreferredDsTree"); -+ retCode = -+ novfs_get_preferred_DS_tree(&data, -+ dh->session); -+ break; -+ -+ case NWC_SET_PREFERRED_DS_TREE: -+ DbgIocCall("NwcSetPreferredDsTree"); -+ retCode = -+ novfs_set_preferred_DS_tree(&data, -+ dh->session); -+ break; -+ -+ case NWC_GET_DEFAULT_NAME_CONTEXT: -+ DbgIocCall("NwcGetDefaultNameContext"); -+ retCode = -+ novfs_get_default_ctx(&data, -+ dh->session); -+ break; -+ -+ case NWC_SET_DEFAULT_NAME_CONTEXT: -+ DbgIocCall("NwcSetDefaultNameContext"); -+ retCode = -+ novfs_set_default_ctx(&data, -+ dh->session); -+ break; -+ -+ case NWC_QUERY_FEATURE: -+ DbgIocCall("NwQueryFeature"); -+ retCode = -+ novfs_query_feature(&data, dh->session); -+ break; -+ -+ case NWC_GET_TREE_MONITORED_CONN_REF: -+ DbgIocCall("NwcGetTreeMonitoredConn"); -+ retCode = -+ novfs_get_tree_monitored_conn(&data, -+ dh-> -+ session); -+ break; -+ -+ case NWC_ENUMERATE_IDENTITIES: -+ DbgIocCall("NwcEnumerateIdentities"); -+ retCode = -+ novfs_enum_ids(&data, -+ dh->session); -+ break; -+ -+ case NWC_CHANGE_KEY: -+ DbgIocCall("NwcChangeAuthKey"); -+ retCode = -+ novfs_change_auth_key(&data, -+ dh->session); -+ break; -+ -+ case NWC_CONVERT_LOCAL_HANDLE: -+ DbgIocCall("NwdConvertLocalHandle"); -+ retCode = -+ NwdConvertLocalHandle(&data, dh); -+ break; -+ -+ case NWC_CONVERT_NETWARE_HANDLE: -+ DbgIocCall("NwdConvertNetwareHandle"); -+ retCode = -+ NwdConvertNetwareHandle(&data, dh); -+ break; -+ -+ case NWC_SET_PRIMARY_CONN: -+ DbgIocCall("NwcSetPrimaryConn"); -+ retCode = -+ novfs_set_pri_conn(&data, -+ dh->session); -+ break; -+ -+ case NWC_GET_PRIMARY_CONN: -+ DbgIocCall("NwcGetPrimaryConn"); -+ retCode = -+ novfs_get_pri_conn(&data, -+ dh->session); -+ break; -+ -+ case NWC_MAP_DRIVE: -+ DbgIocCall("NwcMapDrive"); -+ retCode = -+ set_map_drive(&data, dh->session); -+ break; -+ -+ case NWC_UNMAP_DRIVE: -+ DbgIocCall("NwcUnMapDrive"); -+ retCode = -+ unmap_drive(&data, dh->session); -+ break; -+ -+ case NWC_ENUMERATE_DRIVES: -+ DbgIocCall("NwcEnumerateDrives"); -+ retCode = -+ novfs_enum_drives(&data, -+ dh->session); -+ break; -+ -+ case NWC_GET_MOUNT_PATH: -+ DbgIocCall("NwdGetMountPath"); -+ retCode = NwdGetMountPath(&data); -+ break; -+ -+ case NWC_GET_BROADCAST_MESSAGE: -+ DbgIocCall("NwdGetBroadcastMessage"); -+ retCode = -+ novfs_get_bcast_msg(&data, -+ dh->session); -+ break; -+ -+ case NWC_SET_KEY: -+ DbgIocCall("NwdSetKey"); -+ retCode = -+ novfs_set_key_value(&data, dh->session); -+ break; -+ -+ case NWC_VERIFY_KEY: -+ DbgIocCall("NwdVerifyKey"); -+ retCode = -+ novfs_verify_key_value(&data, -+ dh->session); -+ break; -+ -+ case NWC_RAW_NCP_REQUEST_ALL: -+ case NWC_NDS_RESOLVE_NAME_TO_ID: -+ case NWC_FRAGMENT_REQUEST: -+ case NWC_GET_CONFIGURED_NSPS: -+ default: -+ break; -+ -+ } -+ -+ DbgPrint("[NOVFS XPLAT] status Code = %X\n", -+ retCode); -+ break; -+ } -+ } -+ } -+ -+ return (retCode); -+} -+ -+unsigned int novfs_daemon_poll(struct file *file, -+ struct poll_table_struct *poll_table) -+{ -+ struct daemon_cmd *que; -+ unsigned int mask = POLLOUT | POLLWRNORM; -+ -+ que = get_next_queue(0); -+ if (que) -+ mask |= (POLLIN | POLLRDNORM); -+ return mask; -+} -+ -+static int NwdConvertNetwareHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle) -+{ -+ int retVal; -+ struct nwc_convert_netware_handle nh; -+ unsigned long cpylen; -+ -+ DbgPrint("DHandle=0x%p", DHandle); -+ -+ cpylen = -+ copy_from_user(&nh, pdata->reqData, -+ sizeof(struct nwc_convert_netware_handle)); -+ -+ retVal = -+ daemon_added_resource(DHandle, DH_TYPE_STREAM, -+ Uint32toHandle(nh.ConnHandle), -+ nh.NetWareHandle, nh.uAccessMode, -+ nh.uFileSize); -+ -+ return (retVal); -+} -+ -+static int NwdConvertLocalHandle(struct novfs_xplat *pdata, struct daemon_handle * DHandle) -+{ -+ int retVal = NWE_REQUESTER_FAILURE; -+ struct daemon_resource *resource; -+ struct nwc_convert_local_handle lh; -+ struct list_head *l; -+ unsigned long cpylen; -+ -+ DbgPrint("DHandle=0x%p", DHandle); -+ -+ read_lock(&DHandle->lock); -+ -+ list_for_each(l, &DHandle->list) { -+ resource = list_entry(l, struct daemon_resource, list); -+ -+ if (DH_TYPE_STREAM == resource->type) { -+ lh.uConnReference = -+ HandletoUint32(resource->connection); -+ -+//sgled memcpy(lh.NwWareHandle, resource->handle, sizeof(resource->handle)); -+ memcpy(lh.NetWareHandle, resource->handle, sizeof(resource->handle)); //sgled -+ if (pdata->repLen >= sizeof(struct nwc_convert_local_handle)) { -+ cpylen = -+ copy_to_user(pdata->repData, &lh, -+ sizeof(struct nwc_convert_local_handle)); -+ retVal = 0; -+ } else { -+ retVal = NWE_BUFFER_OVERFLOW; -+ } -+ break; -+ } -+ } -+ -+ read_unlock(&DHandle->lock); -+ -+ return (retVal); -+} -+ -+static int NwdGetMountPath(struct novfs_xplat *pdata) -+{ -+ int retVal = NWE_REQUESTER_FAILURE; -+ int len; -+ unsigned long cpylen; -+ struct nwc_get_mount_path mp; -+ -+ cpylen = copy_from_user(&mp, pdata->reqData, pdata->reqLen); -+ -+ if (novfs_current_mnt) { -+ -+ len = strlen(novfs_current_mnt) + 1; -+ if ((len > mp.MountPathLen) && mp.pMountPath) { -+ retVal = NWE_BUFFER_OVERFLOW; -+ } else { -+ if (mp.pMountPath) { -+ cpylen = -+ copy_to_user(mp.pMountPath, -+ novfs_current_mnt, len); -+ } -+ retVal = 0; -+ } -+ -+ mp.MountPathLen = len; -+ -+ if (pdata->repData && (pdata->repLen >= sizeof(mp))) { -+ cpylen = copy_to_user(pdata->repData, &mp, sizeof(mp)); -+ } -+ } -+ -+ return (retVal); -+} -+ -+static int set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ int retVal; -+ unsigned long cpylen; -+ struct nwc_map_drive_ex symInfo; -+ char *path; -+ struct drive_map *drivemap, *dm; -+ struct list_head *list; -+ -+ retVal = novfs_set_map_drive(pdata, Session); -+ if (retVal) -+ return retVal; -+ if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) -+ return -EFAULT; -+ drivemap = -+ kmalloc(sizeof(struct drive_map) + symInfo.linkOffsetLength, -+ GFP_KERNEL); -+ if (!drivemap) -+ return -ENOMEM; -+ -+ path = (char *)pdata->reqData; -+ path += symInfo.linkOffset; -+ cpylen = -+ copy_from_user(drivemap->name, path, -+ symInfo.linkOffsetLength); -+ -+ drivemap->session = Session; -+ drivemap->hash = -+ full_name_hash(drivemap->name, -+ symInfo.linkOffsetLength - 1); -+ drivemap->namelen = symInfo.linkOffsetLength - 1; -+ DbgPrint("hash=0x%lx path=%s", drivemap->hash, drivemap->name); -+ -+ dm = (struct drive_map *) & DriveMapList.next; -+ -+ down(&DriveMapLock); -+ -+ list_for_each(list, &DriveMapList) { -+ dm = list_entry(list, struct drive_map, list); -+ __DbgPrint("%s: dm=0x%p\n" -+ " hash: 0x%lx\n" -+ " namelen: %d\n" -+ " name: %s\n", __func__, -+ dm, dm->hash, dm->namelen, dm->name); -+ -+ if (drivemap->hash == dm->hash) { -+ if (0 == -+ strcmp(dm->name, drivemap->name)) { -+ dm = NULL; -+ break; -+ } -+ } else if (drivemap->hash < dm->hash) { -+ break; -+ } -+ } -+ -+ if (dm) { -+ if ((dm == (struct drive_map *) & DriveMapList) || -+ (dm->hash < drivemap->hash)) { -+ list_add(&drivemap->list, &dm->list); -+ } else { -+ list_add_tail(&drivemap->list, -+ &dm->list); -+ } -+ } -+ else -+ kfree(drivemap); -+ up(&DriveMapLock); -+ return (retVal); -+} -+ -+static int unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ int retVal = NWE_REQUESTER_FAILURE; -+ struct nwc_unmap_drive_ex symInfo; -+ char *path; -+ struct drive_map *dm; -+ struct list_head *list; -+ unsigned long hash; -+ -+ -+ retVal = novfs_unmap_drive(pdata, Session); -+ if (retVal) -+ return retVal; -+ if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) -+ return -EFAULT; -+ -+ path = kmalloc(symInfo.linkLen, GFP_KERNEL); -+ if (!path) -+ return -ENOMEM; -+ if (copy_from_user(path,((struct nwc_unmap_drive_ex *) pdata->reqData)->linkData, symInfo.linkLen)) { -+ kfree(path); -+ return -EFAULT; -+ } -+ -+ hash = full_name_hash(path, symInfo.linkLen - 1); -+ DbgPrint("hash=0x%x path=%s", hash, path); -+ -+ dm = NULL; -+ -+ down(&DriveMapLock); -+ -+ list_for_each(list, &DriveMapList) { -+ dm = list_entry(list, struct drive_map, list); -+ __DbgPrint("%s: dm=0x%p %s\n" -+ " hash: 0x%x\n" -+ " namelen: %d\n", __func__, -+ dm, dm->name, dm->hash, dm->namelen); -+ -+ if (hash == dm->hash) { -+ if (0 == strcmp(dm->name, path)) { -+ break; -+ } -+ } else if (hash < dm->hash) { -+ dm = NULL; -+ break; -+ } -+ } -+ -+ if (dm) { -+ __DbgPrint("%s: Remove dm=0x%p %s\n" -+ " hash: 0x%x\n" -+ " namelen: %d\n", __func__, -+ dm, dm->name, dm->hash, dm->namelen); -+ list_del(&dm->list); -+ kfree(dm); -+ } -+ -+ up(&DriveMapLock); -+ return (retVal); -+} -+ -+static void RemoveDriveMaps(void) -+{ -+ struct drive_map *dm; -+ struct list_head *list; -+ -+ down(&DriveMapLock); -+ list_for_each(list, &DriveMapList) { -+ dm = list_entry(list, struct drive_map, list); -+ -+ __DbgPrint("%s: dm=0x%p\n" -+ " hash: 0x%x\n" -+ " namelen: %d\n" -+ " name: %s\n", __func__, -+ dm, dm->hash, dm->namelen, dm->name); -+ local_unlink(dm->name); -+ list = list->prev; -+ list_del(&dm->list); -+ kfree(dm); -+ } -+ up(&DriveMapLock); -+} -+ -+/* As picked from do_unlinkat() */ -+ -+static long local_unlink(const char *pathname) -+{ -+ int error; -+ struct dentry *dentry; -+ char *name, *c; -+ struct nameidata nd; -+ struct inode *inode = NULL; -+ -+ error = path_lookup(pathname, LOOKUP_PARENT, &nd); -+ DbgPrint("path_lookup %s error: %d\n", pathname, error); -+ if (error) -+ return error; -+ -+ error = -EISDIR; -+ if (nd.last_type != LAST_NORM) -+ goto exit1; -+ mutex_lock(&nd.path.dentry->d_inode->i_mutex); -+ /* Get the filename of pathname */ -+ name=c=(char *)pathname; -+ while (*c!='\0') { -+ if (*c=='/') -+ name=++c; -+ c++; -+ } -+ dentry = lookup_one_len(name, nd.path.dentry, strlen(name)); -+ error = PTR_ERR(dentry); -+ DbgPrint("dentry %p", dentry); -+ if (!(dentry->d_inode->i_mode & S_IFLNK)) { -+ DbgPrint("%s not a link", name); -+ error=-ENOENT; -+ goto exit1; -+ } -+ -+ if (!IS_ERR(dentry)) { -+ /* Why not before? Because we want correct error value */ -+ if (nd.last.name[nd.last.len]) -+ goto slashes; -+ inode = dentry->d_inode; -+ if (inode) -+ atomic_inc(&inode->i_count); -+ error = mnt_want_write(nd.path.mnt); -+ DbgPrint("inode %p mnt_want_write error %d", inode, error); -+ if (error) -+ goto exit2; -+ error = vfs_unlink(nd.path.dentry->d_inode, dentry); -+ mnt_drop_write(nd.path.mnt); -+ exit2: -+ dput(dentry); -+ } -+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex); -+ if (inode) -+ iput(inode); /* truncate the inode here */ -+exit1: -+ path_put(&nd.path); -+ DbgPrint("returning error %d", error); -+ return error; -+ -+slashes: -+ error = !dentry->d_inode ? -ENOENT : -+ S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; -+ goto exit2; -+} -+ ---- /dev/null -+++ b/fs/novfs/file.c -@@ -0,0 +1,1921 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner -+ * -+ * This file contains functions for accessing files through the daemon. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "vfs.h" -+#include "commands.h" -+#include "nwerror.h" -+ -+static ssize_t novfs_tree_read(struct file * file, char *buf, size_t len, loff_t * off); -+extern struct dentry_operations novfs_dentry_operations; -+ -+static struct file_operations novfs_tree_operations = { -+ read:novfs_tree_read, -+}; -+ -+/* -+ * StripTrailingDots was added because some apps will -+ * try and create a file name with a trailing dot. NetWare -+ * doesn't like this and will return an error. -+ */ -+static int StripTrailingDots = 1; -+ -+int novfs_get_alltrees(struct dentry *parent) -+{ -+ unsigned char *p; -+ struct novfs_command_reply_header * reply = NULL; -+ unsigned long replylen = 0; -+ struct novfs_command_request_header cmd; -+ int retCode; -+ struct dentry *entry; -+ struct qstr name; -+ struct inode *inode; -+ -+ cmd.CommandType = 0; -+ cmd.SequenceNumber = 0; -+//sg ??? cmd.SessionId = 0x1234; -+ SC_INITIALIZE(cmd.SessionId); -+ -+ DbgPrint(""); -+ -+ retCode = Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, &replylen, INTERRUPTIBLE); -+ DbgPrint("reply=0x%p replylen=%d", reply, replylen); -+ if (reply) { -+ novfs_dump(replylen, reply); -+ if (!reply->ErrorCode -+ && (replylen > sizeof(struct novfs_command_reply_header))) { -+ p = (char *)reply + 8; -+ while (*p) { -+ DbgPrint("%s", p); -+ name.len = strlen(p); -+ name.name = p; -+ name.hash = full_name_hash(name.name, name.len); -+ entry = d_lookup(parent, &name); -+ if (NULL == entry) { -+ DbgPrint("adding %s", p); -+ entry = d_alloc(parent, &name); -+ if (entry) { -+ entry->d_op = &novfs_dentry_operations; -+ inode = novfs_get_inode(parent->d_sb, S_IFREG | 0400, 0, 0, 0, &name); -+ if (inode) { -+ inode->i_fop = &novfs_tree_operations; -+ d_add(entry, inode); -+ } -+ } -+ } -+ p += (name.len + 1); -+ } -+ } -+ kfree(reply); -+ } -+ return (retCode); -+} -+ -+static ssize_t novfs_tree_read(struct file * file, char *buf, size_t len, loff_t * off) -+{ -+ if (file->f_pos != 0) { -+ return (0); -+ } -+ if (copy_to_user(buf, "Tree\n", 5)) { -+ return (0); -+ } -+ return (5); -+} -+ -+int novfs_get_servers(unsigned char ** ServerList, struct novfs_schandle SessionId) -+{ -+ struct novfs_get_connected_server_list req; -+ struct novfs_get_connected_server_list_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ -+ *ServerList = NULL; -+ -+ req.Command.CommandType = VFS_COMMAND_GET_CONNECTED_SERVER_LIST; -+ req.Command.SessionId = SessionId; -+ -+ retCode = -+ Queue_Daemon_Command(&req, sizeof(req), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ DbgPrint("reply"); -+ replylen -= sizeof(struct novfs_command_reply_header); -+ if (!reply->Reply.ErrorCode && replylen) { -+ memcpy(reply, reply->List, replylen); -+ *ServerList = (unsigned char *) reply; -+ retCode = 0; -+ } else { -+ kfree(reply); -+ retCode = -ENOENT; -+ } -+ } -+ return (retCode); -+} -+ -+int novfs_get_vols(struct qstr *Server, unsigned char ** VolumeList, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_get_server_volume_list *req; -+ struct novfs_get_server_volume_list_reply *reply = NULL; -+ unsigned long replylen = 0, reqlen; -+ int retCode; -+ -+ *VolumeList = NULL; -+ reqlen = sizeof(struct novfs_get_server_volume_list) + Server->len; -+ req = kmalloc(reqlen, GFP_KERNEL); -+ if (!req) -+ return -ENOMEM; -+ req->Command.CommandType = VFS_COMMAND_GET_SERVER_VOLUME_LIST; -+ req->Length = Server->len; -+ memcpy(req->Name, Server->name, Server->len); -+ req->Command.SessionId = SessionId; -+ -+ retCode = -+ Queue_Daemon_Command(req, reqlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ DbgPrint("reply"); -+ novfs_dump(replylen, reply); -+ replylen -= sizeof(struct novfs_command_reply_header); -+ -+ if (!reply->Reply.ErrorCode && replylen) { -+ memcpy(reply, reply->List, replylen); -+ *VolumeList = (unsigned char *) reply; -+ retCode = 0; -+ } else { -+ kfree(reply); -+ retCode = -ENOENT; -+ } -+ } -+ kfree(req); -+ return (retCode); -+} -+ -+int novfs_get_file_info(unsigned char * Path, struct novfs_entry_info * Info, struct novfs_schandle SessionId) -+{ -+ struct novfs_verify_file_reply *reply = NULL; -+ unsigned long replylen = 0; -+ struct novfs_verify_file_request * cmd; -+ int cmdlen; -+ int retCode = -ENOENT; -+ int pathlen; -+ -+ DbgPrint("Path = %s", Path); -+ -+ Info->mode = S_IFDIR | 0700; -+ Info->uid = current_uid(); -+ Info->gid = current_gid(); -+ Info->size = 0; -+ Info->atime = Info->mtime = Info->ctime = CURRENT_TIME; -+ -+ if (Path && *Path) { -+ pathlen = strlen(Path); -+ if (StripTrailingDots) { -+ if ('.' == Path[pathlen - 1]) -+ pathlen--; -+ } -+ cmdlen = offsetof(struct novfs_verify_file_request,path) + pathlen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_VERIFY_FILE; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ cmd->pathLen = pathlen; -+ memcpy(cmd->path, Path, cmd->pathLen); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, -+ (void *)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ -+ if (reply->Reply.ErrorCode) { -+ retCode = -ENOENT; -+ } else { -+ Info->type = 3; -+ Info->mode = S_IRWXU; -+ -+ if (reply-> -+ fileMode & NW_ATTRIBUTE_DIRECTORY) { -+ Info->mode |= S_IFDIR; -+ } else { -+ Info->mode |= S_IFREG; -+ } -+ -+ if (reply-> -+ fileMode & NW_ATTRIBUTE_READ_ONLY) { -+ Info->mode &= ~(S_IWUSR); -+ } -+ -+ Info->uid = current_euid(); -+ Info->gid = current_egid(); -+ Info->size = reply->fileSize; -+ Info->atime.tv_sec = -+ reply->lastAccessTime; -+ Info->atime.tv_nsec = 0; -+ Info->mtime.tv_sec = reply->modifyTime; -+ Info->mtime.tv_nsec = 0; -+ Info->ctime.tv_sec = reply->createTime; -+ Info->ctime.tv_nsec = 0; -+ DbgPrint("replylen=%d sizeof(VERIFY_FILE_REPLY)=%d", -+ replylen, -+ sizeof(struct novfs_verify_file_reply)); -+ if (replylen > -+ sizeof(struct novfs_verify_file_reply)) { -+ unsigned int *lp = -+ &reply->fileMode; -+ lp++; -+ DbgPrint("extra data 0x%x", -+ *lp); -+ Info->mtime.tv_nsec = *lp; -+ } -+ retCode = 0; -+ } -+ -+ kfree(reply); -+ } -+ kfree(cmd); -+ } -+ } -+ -+ DbgPrint("return 0x%x", retCode); -+ return (retCode); -+} -+ -+int novfs_getx_file_info(char *Path, const char *Name, char *buffer, -+ ssize_t buffer_size, ssize_t * dataLen, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_xa_get_reply *reply = NULL; -+ unsigned long replylen = 0; -+ struct novfs_xa_get_request *cmd; -+ int cmdlen; -+ int retCode = -ENOENT; -+ -+ int namelen = strlen(Name); -+ int pathlen = strlen(Path); -+ -+ DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i", -+ Path, pathlen, Name, namelen); -+ -+ if (namelen > MAX_XATTR_NAME_LEN) { -+ return ENOATTR; -+ } -+ -+ cmdlen = offsetof(struct novfs_xa_get_request, data) + pathlen + 1 + namelen + 1; // two '\0' -+ cmd = (struct novfs_xa_get_request *) kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_GET_EXTENDED_ATTRIBUTE; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->pathLen = pathlen; -+ memcpy(cmd->data, Path, cmd->pathLen + 1); //+ '\0' -+ -+ cmd->nameLen = namelen; -+ memcpy(cmd->data + cmd->pathLen + 1, Name, cmd->nameLen + 1); -+ -+ DbgPrint("xattr: PXA_GET_REQUEST BEGIN"); -+ DbgPrint("xattr: Queue_Daemon_Command %d", -+ cmd->Command.CommandType); -+ DbgPrint("xattr: Command.SessionId = %d", -+ cmd->Command.SessionId); -+ DbgPrint("xattr: pathLen = %d", cmd->pathLen); -+ DbgPrint("xattr: Path = %s", cmd->data); -+ DbgPrint("xattr: nameLen = %d", cmd->nameLen); -+ DbgPrint("xattr: name = %s", (cmd->data + cmd->pathLen + 1)); -+ DbgPrint("xattr: PXA_GET_REQUEST END"); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ if (reply) { -+ -+ if (reply->Reply.ErrorCode) { -+ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", -+ reply->Reply.ErrorCode, -+ reply->Reply.ErrorCode); -+ DbgPrint("xattr: replylen=%d", replylen); -+ -+ //0xC9 = EA not found (C9), 0xD1 = EA access denied -+ if ((reply->Reply.ErrorCode == 0xC9) -+ || (reply->Reply.ErrorCode == 0xD1)) { -+ retCode = -ENOATTR; -+ } else { -+ retCode = -ENOENT; -+ } -+ } else { -+ -+ *dataLen = -+ replylen - sizeof(struct novfs_command_reply_header); -+ DbgPrint("xattr: replylen=%u, dataLen=%u", -+ replylen, *dataLen); -+ -+ if (buffer_size >= *dataLen) { -+ DbgPrint("xattr: copying to buffer from &reply->pData"); -+ memcpy(buffer, &reply->pData, *dataLen); -+ -+ retCode = 0; -+ } else { -+ DbgPrint("xattr: (!!!) buffer is smaller then reply"); -+ retCode = -ERANGE; -+ } -+ DbgPrint("xattr: /dumping buffer"); -+ novfs_dump(*dataLen, buffer); -+ DbgPrint("xattr: \\after dumping buffer"); -+ } -+ -+ kfree(reply); -+ } else { -+ DbgPrint("xattr: reply = NULL"); -+ } -+ kfree(cmd); -+ -+ } -+ -+ return retCode; -+} -+ -+int novfs_setx_file_info(char *Path, const char *Name, const void *Value, -+ unsigned long valueLen, unsigned long *bytesWritten, -+ int flags, struct novfs_schandle SessionId) -+{ -+ struct novfs_xa_set_reply *reply = NULL; -+ unsigned long replylen = 0; -+ struct novfs_xa_set_request *cmd; -+ int cmdlen; -+ int retCode = -ENOENT; -+ -+ int namelen = strlen(Name); -+ int pathlen = strlen(Path); -+ -+ DbgPrint("xattr: Path = %s, pathlen = %i, Name = %s, namelen = %i, " -+ "value len = %u", Path, pathlen, Name, namelen, valueLen); -+ -+ if (namelen > MAX_XATTR_NAME_LEN) { -+ return ENOATTR; -+ } -+ -+ cmdlen = offsetof(struct novfs_xa_set_request, data) + pathlen + 1 + namelen + 1 + valueLen; -+ cmd = (struct novfs_xa_set_request *) kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_SET_EXTENDED_ATTRIBUTE; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->flags = flags; -+ cmd->pathLen = pathlen; -+ memcpy(cmd->data, Path, cmd->pathLen); -+ -+ cmd->nameLen = namelen; -+ memcpy(cmd->data + cmd->pathLen + 1, Name, cmd->nameLen + 1); -+ -+ cmd->valueLen = valueLen; -+ memcpy(cmd->data + cmd->pathLen + 1 + cmd->nameLen + 1, Value, -+ valueLen); -+ -+ DbgPrint("xattr: PXA_SET_REQUEST BEGIN"); -+ DbgPrint("attr: Queue_Daemon_Command %d", -+ cmd->Command.CommandType); -+ DbgPrint("xattr: Command.SessionId = %d", -+ cmd->Command.SessionId); -+ DbgPrint("xattr: pathLen = %d", cmd->pathLen); -+ DbgPrint("xattr: Path = %s", cmd->data); -+ DbgPrint("xattr: nameLen = %d", cmd->nameLen); -+ DbgPrint("xattr: name = %s", (cmd->data + cmd->pathLen + 1)); -+ novfs_dump(valueLen < 16 ? valueLen : 16, (char *)Value); -+ -+ DbgPrint("xattr: PXA_SET_REQUEST END"); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ if (reply) { -+ -+ if (reply->Reply.ErrorCode) { -+ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", -+ reply->Reply.ErrorCode, -+ reply->Reply.ErrorCode); -+ DbgPrint("xattr: replylen=%d", replylen); -+ -+ retCode = -reply->Reply.ErrorCode; //-ENOENT; -+ } else { -+ -+ DbgPrint("xattr: replylen=%u, real len = %u", -+ replylen, -+ replylen - sizeof(struct novfs_command_reply_header)); -+ memcpy(bytesWritten, &reply->pData, -+ replylen - sizeof(struct novfs_command_reply_header)); -+ -+ retCode = 0; -+ } -+ -+ kfree(reply); -+ } else { -+ DbgPrint("xattr: reply = NULL"); -+ } -+ kfree(cmd); -+ -+ } -+ -+ return retCode; -+} -+ -+int novfs_listx_file_info(char *Path, char *buffer, ssize_t buffer_size, -+ ssize_t * dataLen, struct novfs_schandle SessionId) -+{ -+ struct novfs_xa_list_reply *reply = NULL; -+ unsigned long replylen = 0; -+ struct novfs_verify_file_request *cmd; -+ int cmdlen; -+ int retCode = -ENOENT; -+ -+ int pathlen = strlen(Path); -+ DbgPrint("xattr: Path = %s, pathlen = %i", Path, pathlen); -+ -+ *dataLen = 0; -+ cmdlen = offsetof(struct novfs_verify_file_request, path) + pathlen; -+ cmd = (struct novfs_verify_file_request *) kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_LIST_EXTENDED_ATTRIBUTES; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ cmd->pathLen = pathlen; -+ memcpy(cmd->path, Path, cmd->pathLen + 1); //+ '\0' -+ DbgPrint("xattr: PVERIFY_FILE_REQUEST BEGIN"); -+ DbgPrint("xattr: Queue_Daemon_Command %d", -+ cmd->Command.CommandType); -+ DbgPrint("xattr: Command.SessionId = %d", -+ cmd->Command.SessionId); -+ DbgPrint("xattr: pathLen = %d", cmd->pathLen); -+ DbgPrint("xattr: Path = %s", cmd->path); -+ DbgPrint("xattr: PVERIFY_FILE_REQUEST END"); -+ -+ retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, -+ (void *)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ -+ if (reply->Reply.ErrorCode) { -+ DbgPrint("xattr: reply->Reply.ErrorCode=%d, %X", -+ reply->Reply.ErrorCode, -+ reply->Reply.ErrorCode); -+ DbgPrint("xattr: replylen=%d", replylen); -+ -+ retCode = -ENOENT; -+ } else { -+ *dataLen = -+ replylen - sizeof(struct novfs_command_reply_header); -+ DbgPrint("xattr: replylen=%u, dataLen=%u", -+ replylen, *dataLen); -+ -+ if (buffer_size >= *dataLen) { -+ DbgPrint("xattr: copying to buffer " -+ "from &reply->pData"); -+ memcpy(buffer, &reply->pData, *dataLen); -+ } else { -+ DbgPrint("xattr: (!!!) buffer is " -+ "smaller then reply\n"); -+ retCode = -ERANGE; -+ } -+ DbgPrint("xattr: /dumping buffer"); -+ novfs_dump(*dataLen, buffer); -+ DbgPrint("xattr: \\after dumping buffer"); -+ -+ retCode = 0; -+ } -+ -+ kfree(reply); -+ } else { -+ DbgPrint("xattr: reply = NULL"); -+ } -+ kfree(cmd); -+ -+ } -+ -+ return retCode; -+} -+ -+static int begin_directory_enumerate(unsigned char * Path, int PathLen, void ** EnumHandle, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_begin_enumerate_directory_request *cmd; -+ struct novfs_begin_enumerate_directory_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode, cmdlen; -+ -+ *EnumHandle = 0; -+ -+ cmdlen = offsetof(struct -+ novfs_begin_enumerate_directory_request, path) + PathLen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_START_ENUMERATE; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->pathLen = PathLen; -+ memcpy(cmd->path, Path, PathLen); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+/* -+ * retCode = Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, &replylen, 0); -+ */ -+ if (reply) { -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } else { -+ *EnumHandle = reply->enumerateHandle; -+ retCode = 0; -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ } else { -+ retCode = -ENOMEM; -+ } -+ return (retCode); -+} -+ -+int novfs_end_directory_enumerate(void *EnumHandle, struct novfs_schandle SessionId) -+{ -+ struct novfs_end_enumerate_directory_request cmd; -+ struct novfs_end_enumerate_directory_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode; -+ -+ cmd.Command.CommandType = VFS_COMMAND_END_ENUMERATE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.enumerateHandle = EnumHandle; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, 0); -+ if (reply) { -+ retCode = 0; -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ -+ return (retCode); -+} -+ -+static int directory_enumerate_ex(void ** EnumHandle, struct novfs_schandle SessionId, int *Count, -+ struct novfs_entry_info **PInfo, int Interrupt) -+{ -+ struct novfs_enumerate_directory_ex_request cmd; -+ struct novfs_enumerate_directory_ex_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ struct novfs_entry_info * info; -+ struct novfs_enumerate_directory_ex_data *data; -+ int isize; -+ -+ if (PInfo) -+ *PInfo = NULL; -+ *Count = 0; -+ -+ cmd.Command.CommandType = VFS_COMMAND_ENUMERATE_DIRECTORY_EX; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.enumerateHandle = *EnumHandle; -+ cmd.pathLen = 0; -+ cmd.path[0] = '\0'; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, Interrupt); -+ -+ if (reply) { -+ retCode = 0; -+ /* -+ * The VFS_COMMAND_ENUMERATE_DIRECTORY call can return an -+ * error but there could still be valid data. -+ */ -+ -+ if (!reply->Reply.ErrorCode || -+ ((replylen > sizeof(struct novfs_command_reply_header)) && -+ (reply->enumCount > 0))) { -+ DbgPrint("isize=%d", replylen); -+ data = -+ (struct novfs_enumerate_directory_ex_data *) ((char *)reply + -+ sizeof -+ (struct novfs_enumerate_directory_ex_reply)); -+ isize = -+ replylen - sizeof(struct novfs_enumerate_directory_ex_reply *) - -+ reply->enumCount * -+ offsetof(struct -+ novfs_enumerate_directory_ex_data, name); -+ isize += -+ (reply->enumCount * -+ offsetof(struct novfs_entry_info, name)); -+ -+ if (PInfo) { -+ *PInfo = info = kmalloc(isize, GFP_KERNEL); -+ if (*PInfo) { -+ DbgPrint("data=0x%p info=0x%p", -+ data, info); -+ *Count = reply->enumCount; -+ do { -+ DbgPrint("data=0x%p length=%d", -+ data); -+ -+ info->type = 3; -+ info->mode = S_IRWXU; -+ -+ if (data-> -+ mode & -+ NW_ATTRIBUTE_DIRECTORY) { -+ info->mode |= S_IFDIR; -+ info->mode |= S_IXUSR; -+ } else { -+ info->mode |= S_IFREG; -+ } -+ -+ if (data-> -+ mode & -+ NW_ATTRIBUTE_READ_ONLY) { -+ info->mode &= -+ ~(S_IWUSR); -+ } -+ -+ if (data-> -+ mode & NW_ATTRIBUTE_EXECUTE) -+ { -+ info->mode |= S_IXUSR; -+ } -+ -+ info->uid = current_euid(); -+ info->gid = current_egid(); -+ info->size = data->size; -+ info->atime.tv_sec = -+ data->lastAccessTime; -+ info->atime.tv_nsec = 0; -+ info->mtime.tv_sec = -+ data->modifyTime; -+ info->mtime.tv_nsec = 0; -+ info->ctime.tv_sec = -+ data->createTime; -+ info->ctime.tv_nsec = 0; -+ info->namelength = -+ data->nameLen; -+ memcpy(info->name, data->name, -+ data->nameLen); -+ data = -+ (struct novfs_enumerate_directory_ex_data *) -+ & data->name[data->nameLen]; -+ replylen = -+ (int)((char *)&info-> -+ name[info-> -+ namelength] - -+ (char *)info); -+ DbgPrint("info=0x%p", info); -+ novfs_dump(replylen, info); -+ -+ info = -+ (struct novfs_entry_info *) & info-> -+ name[info->namelength]; -+ -+ } while (--reply->enumCount); -+ } -+ } -+ -+ if (reply->Reply.ErrorCode) { -+ retCode = -1; /* Eof of data */ -+ } -+ *EnumHandle = reply->enumerateHandle; -+ } else { -+ retCode = -ENODATA; -+ } -+ kfree(reply); -+ } -+ -+ return (retCode); -+} -+ -+int novfs_get_dir_listex(unsigned char * Path, void ** EnumHandle, int *Count, -+ struct novfs_entry_info **Info, -+ struct novfs_schandle SessionId) -+{ -+ int retCode = -ENOENT; -+ -+ if (Count) -+ *Count = 0; -+ if (Info) -+ *Info = NULL; -+ -+ if ((void *) - 1 == *EnumHandle) { -+ return (-ENODATA); -+ } -+ -+ if (0 == *EnumHandle) { -+ retCode = -+ begin_directory_enumerate(Path, strlen(Path), EnumHandle, -+ SessionId); -+ } -+ -+ if (*EnumHandle) { -+ retCode = -+ directory_enumerate_ex(EnumHandle, SessionId, Count, Info, -+ INTERRUPTIBLE); -+ if (retCode) { -+ novfs_end_directory_enumerate(*EnumHandle, SessionId); -+ retCode = 0; -+ *EnumHandle = Uint32toHandle(-1); -+ } -+ } -+ return (retCode); -+} -+ -+int novfs_open_file(unsigned char * Path, int Flags, struct novfs_entry_info * Info, -+ void ** Handle, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_open_file_request *cmd; -+ struct novfs_open_file_reply *reply; -+ unsigned long replylen = 0; -+ int retCode, cmdlen, pathlen; -+ -+ pathlen = strlen(Path); -+ -+ if (StripTrailingDots) { -+ if ('.' == Path[pathlen - 1]) -+ pathlen--; -+ } -+ -+ *Handle = 0; -+ -+ cmdlen = offsetof(struct novfs_open_file_request, path) + pathlen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_OPEN_FILE; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->access = 0; -+ -+ if (!(Flags & O_WRONLY) || (Flags & O_RDWR)) { -+ cmd->access |= NWD_ACCESS_READ; -+ } -+ -+ if ((Flags & O_WRONLY) || (Flags & O_RDWR)) { -+ cmd->access |= NWD_ACCESS_WRITE; -+ } -+ -+ switch (Flags & (O_CREAT | O_EXCL | O_TRUNC)) { -+ case O_CREAT: -+ cmd->disp = NWD_DISP_OPEN_ALWAYS; -+ break; -+ -+ case O_CREAT | O_EXCL: -+ cmd->disp = NWD_DISP_CREATE_NEW; -+ break; -+ -+ case O_TRUNC: -+ cmd->disp = NWD_DISP_CREATE_ALWAYS; -+ break; -+ -+ case O_CREAT | O_TRUNC: -+ cmd->disp = NWD_DISP_CREATE_ALWAYS; -+ break; -+ -+ case O_CREAT | O_EXCL | O_TRUNC: -+ cmd->disp = NWD_DISP_CREATE_NEW; -+ break; -+ -+ default: -+ cmd->disp = NWD_DISP_OPEN_EXISTING; -+ break; -+ } -+ -+ cmd->mode = NWD_SHARE_READ | NWD_SHARE_WRITE | NWD_SHARE_DELETE; -+ -+ cmd->pathLen = pathlen; -+ memcpy(cmd->path, Path, pathlen); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ if (reply) { -+ if (reply->Reply.ErrorCode) { -+ if (NWE_OBJECT_EXISTS == reply->Reply.ErrorCode) { -+ retCode = -EEXIST; -+ } else if (NWE_ACCESS_DENIED == -+ reply->Reply.ErrorCode) { -+ retCode = -EACCES; -+ } else if (NWE_FILE_IN_USE == -+ reply->Reply.ErrorCode) { -+ retCode = -EBUSY; -+ } else { -+ retCode = -ENOENT; -+ } -+ } else { -+ *Handle = reply->handle; -+ retCode = 0; -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ } else { -+ retCode = -ENOMEM; -+ } -+ return (retCode); -+} -+ -+int novfs_create(unsigned char * Path, int DirectoryFlag, struct novfs_schandle SessionId) -+{ -+ struct novfs_create_file_request *cmd; -+ struct novfs_create_file_reply *reply; -+ unsigned long replylen = 0; -+ int retCode, cmdlen, pathlen; -+ -+ pathlen = strlen(Path); -+ -+ if (StripTrailingDots) { -+ if ('.' == Path[pathlen - 1]) -+ pathlen--; -+ } -+ -+ cmdlen = offsetof(struct novfs_create_file_request, path) + pathlen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_CREATE_FILE; -+ if (DirectoryFlag) { -+ cmd->Command.CommandType = VFS_COMMAND_CREATE_DIRECOTRY; -+ } -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->pathlength = pathlen; -+ memcpy(cmd->path, Path, pathlen); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ if (reply) { -+ retCode = 0; -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ if (reply->Reply.ErrorCode == NWE_ACCESS_DENIED) -+ retCode = -EACCES; -+ -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+} -+ -+int novfs_close_file(void *Handle, struct novfs_schandle SessionId) -+{ -+ struct novfs_close_file_request cmd; -+ struct novfs_close_file_reply *reply; -+ unsigned long replylen = 0; -+ int retCode; -+ -+ cmd.Command.CommandType = VFS_COMMAND_CLOSE_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.handle = Handle; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, 0); -+ if (reply) { -+ retCode = 0; -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ return (retCode); -+} -+ -+int novfs_read_file(void *Handle, unsigned char * Buffer, size_t * Bytes, -+ loff_t * Offset, struct novfs_schandle SessionId) -+{ -+ struct novfs_read_file_request cmd; -+ struct novfs_read_file_reply * reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ size_t len; -+ -+ len = *Bytes; -+ *Bytes = 0; -+ -+ if (offsetof(struct novfs_read_file_reply, data) + len -+ > novfs_max_iosize) { -+ len = novfs_max_iosize - offsetof(struct -+ novfs_read_file_reply, data); -+ len = (len / PAGE_SIZE) * PAGE_SIZE; -+ } -+ -+ cmd.Command.CommandType = VFS_COMMAND_READ_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.handle = Handle; -+ cmd.len = len; -+ cmd.offset = *Offset; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ DbgPrint("Queue_Daemon_Command 0x%x replylen=%d", retCode, replylen); -+ -+ if (!retCode) { -+ if (reply->Reply.ErrorCode) { -+ if (NWE_FILE_IO_LOCKED == reply->Reply.ErrorCode) { -+ retCode = -EBUSY; -+ } else { -+ retCode = -EIO; -+ } -+ } else { -+ replylen -= offsetof(struct -+ novfs_read_file_reply, data); -+ -+ if (replylen > 0) { -+ replylen -= -+ copy_to_user(Buffer, reply->data, replylen); -+ *Bytes = replylen; -+ } -+ } -+ } -+ -+ if (reply) { -+ kfree(reply); -+ } -+ -+ DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, retCode); -+ -+ return (retCode); -+} -+ -+int novfs_read_pages(void *Handle, struct novfs_data_list *DList, -+ int DList_Cnt, size_t * Bytes, loff_t * Offset, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_read_file_request cmd; -+ struct novfs_read_file_reply * reply = NULL; -+ struct novfs_read_file_reply lreply; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ size_t len; -+ -+ len = *Bytes; -+ *Bytes = 0; -+ -+ DbgPrint("Handle=0x%p Dlst=0x%p Dlcnt=%d Bytes=%d Offset=%lld " -+ "SessionId=0x%p:%p", Handle, DList, DList_Cnt, len, *Offset, -+ SessionId.hTypeId, SessionId.hId); -+ -+ cmd.Command.CommandType = VFS_COMMAND_READ_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.handle = Handle; -+ cmd.len = len; -+ cmd.offset = *Offset; -+ -+ /* -+ * Dlst first entry is reserved for reply header. -+ */ -+ DList[0].page = NULL; -+ DList[0].offset = &lreply; -+ DList[0].len = offsetof(struct novfs_read_file_reply, data); -+ DList[0].rwflag = DLWRITE; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), DList, DList_Cnt, -+ (void *)&reply, &replylen, INTERRUPTIBLE); -+ -+ DbgPrint("Queue_Daemon_Command 0x%x", retCode); -+ -+ if (!retCode) { -+ if (reply) { -+ memcpy(&lreply, reply, sizeof(lreply)); -+ } -+ -+ if (lreply.Reply.ErrorCode) { -+ if (NWE_FILE_IO_LOCKED == lreply.Reply.ErrorCode) { -+ retCode = -EBUSY; -+ } else { -+ retCode = -EIO; -+ } -+ } -+ *Bytes = replylen - offsetof(struct -+ novfs_read_file_reply, data); -+ } -+ -+ if (reply) { -+ kfree(reply); -+ } -+ -+ DbgPrint("retCode=0x%x", retCode); -+ -+ return (retCode); -+} -+ -+int novfs_write_file(void *Handle, unsigned char * Buffer, size_t * Bytes, -+ loff_t * Offset, struct novfs_schandle SessionId) -+{ -+ struct novfs_write_file_request cmd; -+ struct novfs_write_file_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0, cmdlen; -+ size_t len; -+ -+ unsigned long boff; -+ struct page **pages; -+ struct novfs_data_list *dlist; -+ int res = 0, npage, i; -+ struct novfs_write_file_reply lreply; -+ -+ len = *Bytes; -+ cmdlen = offsetof(struct novfs_write_file_request, data); -+ -+ *Bytes = 0; -+ -+ memset(&lreply, 0, sizeof(lreply)); -+ -+ DbgPrint("cmdlen=%ld len=%ld", cmdlen, len); -+ -+ if ((cmdlen + len) > novfs_max_iosize) { -+ len = novfs_max_iosize - cmdlen; -+ len = (len / PAGE_SIZE) * PAGE_SIZE; -+ } -+ cmd.Command.CommandType = VFS_COMMAND_WRITE_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ cmd.handle = Handle; -+ cmd.len = len; -+ cmd.offset = *Offset; -+ -+ DbgPrint("cmdlen=%ld len=%ld", cmdlen, len); -+ -+ npage = -+ (((unsigned long)Buffer & ~PAGE_MASK) + len + -+ (PAGE_SIZE - 1)) >> PAGE_SHIFT; -+ -+ dlist = kmalloc(sizeof(struct novfs_data_list) * (npage + 1), GFP_KERNEL); -+ if (NULL == dlist) { -+ return (-ENOMEM); -+ } -+ -+ pages = kmalloc(sizeof(struct page *) * npage, GFP_KERNEL); -+ -+ if (NULL == pages) { -+ kfree(dlist); -+ return (-ENOMEM); -+ } -+ -+ down_read(¤t->mm->mmap_sem); -+ -+ res = get_user_pages(current, current->mm, (unsigned long)Buffer, npage, 0, /* read type */ -+ 0, /* don't force */ -+ pages, NULL); -+ -+ up_read(¤t->mm->mmap_sem); -+ -+ DbgPrint("res=%d", res); -+ -+ if (res > 0) { -+ boff = (unsigned long)Buffer & ~PAGE_MASK; -+ -+ flush_dcache_page(pages[0]); -+ dlist[0].page = pages[0]; -+ dlist[0].offset = (char *)boff; -+ dlist[0].len = PAGE_SIZE - boff; -+ dlist[0].rwflag = DLREAD; -+ -+ if (dlist[0].len > len) { -+ dlist[0].len = len; -+ } -+ -+ DbgPrint("page=0x%p offset=0x%p len=%d", -+ dlist[0].page, dlist[0].offset, dlist[0].len); -+ -+ boff = dlist[0].len; -+ -+ DbgPrint("len=%d boff=%d", len, boff); -+ -+ for (i = 1; (i < res) && (boff < len); i++) { -+ flush_dcache_page(pages[i]); -+ -+ dlist[i].page = pages[i]; -+ dlist[i].offset = NULL; -+ dlist[i].len = len - boff; -+ if (dlist[i].len > PAGE_SIZE) { -+ dlist[i].len = PAGE_SIZE; -+ } -+ dlist[i].rwflag = DLREAD; -+ -+ boff += dlist[i].len; -+ DbgPrint("%d: page=0x%p offset=0x%p len=%d", i, -+ dlist[i].page, dlist[i].offset, dlist[i].len); -+ } -+ -+ dlist[i].page = NULL; -+ dlist[i].offset = &lreply; -+ dlist[i].len = sizeof(lreply); -+ dlist[i].rwflag = DLWRITE; -+ res++; -+ -+ DbgPrint("Buffer=0x%p boff=0x%x len=%d", Buffer, boff, len); -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, cmdlen, dlist, res, -+ (void *)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ } else { -+ char *kdata; -+ -+ res = 0; -+ -+ kdata = kmalloc(len, GFP_KERNEL); -+ if (kdata) { -+ len -= copy_from_user(kdata, Buffer, len); -+ dlist[0].page = NULL; -+ dlist[0].offset = kdata; -+ dlist[0].len = len; -+ dlist[0].rwflag = DLREAD; -+ -+ dlist[1].page = NULL; -+ dlist[1].offset = &lreply; -+ dlist[1].len = sizeof(lreply); -+ dlist[1].rwflag = DLWRITE; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, cmdlen, dlist, 2, -+ (void *)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ kfree(kdata); -+ } -+ } -+ -+ DbgPrint("retCode=0x%x reply=0x%p", retCode, reply); -+ -+ if (!retCode) { -+ switch (lreply.Reply.ErrorCode) { -+ case 0: -+ *Bytes = (size_t) lreply.bytesWritten; -+ retCode = 0; -+ break; -+ -+ case NWE_INSUFFICIENT_SPACE: -+ retCode = -ENOSPC; -+ break; -+ -+ case NWE_ACCESS_DENIED: -+ retCode = -EACCES; -+ break; -+ -+ default: -+ retCode = -EIO; -+ break; -+ } -+ } -+ -+ if (res) { -+ for (i = 0; i < res; i++) { -+ if (dlist[i].page) { -+ page_cache_release(dlist[i].page); -+ } -+ } -+ } -+ -+ kfree(pages); -+ kfree(dlist); -+ -+ DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, -+ retCode); -+ -+ return (retCode); -+} -+ -+/* -+ * Arguments: HANDLE Handle - novfsd file handle -+ * struct page *Page - Page to be written out -+ * struct novfs_schandle SessionId - novfsd session handle -+ * -+ * Returns: 0 - Success -+ * -ENOSPC - Out of space on server -+ * -EACCES - Access denied -+ * -EIO - Any other error -+ * -+ * Abstract: Write page to file. -+ */ -+int novfs_write_page(void *Handle, struct page *Page, struct novfs_schandle SessionId) -+{ -+ struct novfs_write_file_request cmd; -+ struct novfs_write_file_reply lreply; -+ struct novfs_write_file_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0, cmdlen; -+ struct novfs_data_list dlst[2]; -+ -+ DbgPrint("Handle=0x%p Page=0x%p Index=%lu SessionId=0x%llx", -+ Handle, Page, Page->index, SessionId); -+ -+ dlst[0].page = NULL; -+ dlst[0].offset = &lreply; -+ dlst[0].len = sizeof(lreply); -+ dlst[0].rwflag = DLWRITE; -+ -+ dlst[1].page = Page; -+ dlst[1].offset = 0; -+ dlst[1].len = PAGE_CACHE_SIZE; -+ dlst[1].rwflag = DLREAD; -+ -+ cmdlen = offsetof(struct novfs_write_file_request, data); -+ -+ cmd.Command.CommandType = VFS_COMMAND_WRITE_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.handle = Handle; -+ cmd.len = PAGE_CACHE_SIZE; -+ cmd.offset = (loff_t) Page->index << PAGE_CACHE_SHIFT;; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, cmdlen, &dlst, 2, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (!retCode) { -+ if (reply) { -+ memcpy(&lreply, reply, sizeof(lreply)); -+ } -+ switch (lreply.Reply.ErrorCode) { -+ case 0: -+ retCode = 0; -+ break; -+ -+ case NWE_INSUFFICIENT_SPACE: -+ retCode = -ENOSPC; -+ break; -+ -+ case NWE_ACCESS_DENIED: -+ retCode = -EACCES; -+ break; -+ -+ default: -+ retCode = -EIO; -+ break; -+ } -+ } -+ -+ if (reply) { -+ kfree(reply); -+ } -+ -+ DbgPrint("retCode=0x%x", retCode); -+ -+ return (retCode); -+} -+ -+int novfs_write_pages(void *Handle, struct novfs_data_list *DList, int DList_Cnt, -+ size_t Bytes, loff_t Offset, struct novfs_schandle SessionId) -+{ -+ struct novfs_write_file_request cmd; -+ struct novfs_write_file_reply lreply; -+ struct novfs_write_file_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0, cmdlen; -+ size_t len; -+ -+ DbgPrint("Handle=0x%p Dlst=0x%p Dlcnt=%d Bytes=%d Offset=%lld " -+ "SessionId=0x%llx\n", Handle, DList, DList_Cnt, Bytes, -+ Offset, SessionId); -+ -+ DList[0].page = NULL; -+ DList[0].offset = &lreply; -+ DList[0].len = sizeof(lreply); -+ DList[0].rwflag = DLWRITE; -+ -+ len = Bytes; -+ cmdlen = offsetof(struct novfs_write_file_request, data); -+ -+ if (len) { -+ cmd.Command.CommandType = VFS_COMMAND_WRITE_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.handle = Handle; -+ cmd.len = len; -+ cmd.offset = Offset; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, cmdlen, DList, DList_Cnt, -+ (void *)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (!retCode) { -+ if (reply) { -+ memcpy(&lreply, reply, sizeof(lreply)); -+ } -+ switch (lreply.Reply.ErrorCode) { -+ case 0: -+ retCode = 0; -+ break; -+ -+ case NWE_INSUFFICIENT_SPACE: -+ retCode = -ENOSPC; -+ break; -+ -+ case NWE_ACCESS_DENIED: -+ retCode = -EACCES; -+ break; -+ -+ default: -+ retCode = -EIO; -+ break; -+ } -+ } -+ if (reply) { -+ kfree(reply); -+ } -+ } -+ DbgPrint("retCode=0x%x", retCode); -+ -+ return (retCode); -+} -+ -+int novfs_read_stream(void *ConnHandle, unsigned char * Handle, u_char * Buffer, -+ size_t * Bytes, loff_t * Offset, int User, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_read_stream_request cmd; -+ struct novfs_read_stream_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0; -+ size_t len; -+ -+ len = *Bytes; -+ *Bytes = 0; -+ -+ if (offsetof(struct novfs_read_file_reply, data) + len -+ > novfs_max_iosize) { -+ len = novfs_max_iosize - offsetof(struct -+ novfs_read_file_reply, data); -+ len = (len / PAGE_SIZE) * PAGE_SIZE; -+ } -+ -+ cmd.Command.CommandType = VFS_COMMAND_READ_STREAM; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.connection = ConnHandle; -+ memcpy(cmd.handle, Handle, sizeof(cmd.handle)); -+ cmd.len = len; -+ cmd.offset = *Offset; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ DbgPrint("Queue_Daemon_Command 0x%x replylen=%d", retCode, replylen); -+ -+ if (reply) { -+ retCode = 0; -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } else { -+ replylen -= offsetof(struct -+ novfs_read_stream_reply, data); -+ if (replylen > 0) { -+ if (User) { -+ replylen -= -+ copy_to_user(Buffer, reply->data, -+ replylen); -+ } else { -+ memcpy(Buffer, reply->data, replylen); -+ } -+ -+ *Bytes = replylen; -+ } -+ } -+ kfree(reply); -+ } -+ -+ DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, retCode); -+ -+ return (retCode); -+} -+ -+int novfs_write_stream(void *ConnHandle, unsigned char * Handle, u_char * Buffer, -+ size_t * Bytes, loff_t * Offset, struct novfs_schandle SessionId) -+{ -+ struct novfs_write_stream_request * cmd; -+ struct novfs_write_stream_reply * reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0, cmdlen; -+ size_t len; -+ -+ len = *Bytes; -+ cmdlen = len + offsetof(struct novfs_write_stream_request, data); -+ *Bytes = 0; -+ -+ if (cmdlen > novfs_max_iosize) { -+ cmdlen = novfs_max_iosize; -+ len = cmdlen - offsetof(struct -+ novfs_write_stream_request, data); -+ } -+ -+ DbgPrint("cmdlen=%d len=%d", cmdlen, len); -+ -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (cmd) { -+ if (Buffer && len) { -+ len -= copy_from_user(cmd->data, Buffer, len); -+ } -+ -+ DbgPrint("len=%d", len); -+ -+ cmd->Command.CommandType = VFS_COMMAND_WRITE_STREAM; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->connection = ConnHandle; -+ memcpy(cmd->handle, Handle, sizeof(cmd->handle)); -+ cmd->len = len; -+ cmd->offset = *Offset; -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ switch (reply->Reply.ErrorCode) { -+ case 0: -+ retCode = 0; -+ break; -+ -+ case NWE_INSUFFICIENT_SPACE: -+ retCode = -ENOSPC; -+ break; -+ -+ case NWE_ACCESS_DENIED: -+ retCode = -EACCES; -+ break; -+ -+ default: -+ retCode = -EIO; -+ break; -+ } -+ DbgPrint("reply->bytesWritten=0x%lx", -+ reply->bytesWritten); -+ *Bytes = reply->bytesWritten; -+ kfree(reply); -+ } -+ kfree(cmd); -+ } -+ DbgPrint("*Bytes=0x%x retCode=0x%x", *Bytes, retCode); -+ -+ return (retCode); -+} -+ -+int novfs_close_stream(void *ConnHandle, unsigned char * Handle, struct novfs_schandle SessionId) -+{ -+ struct novfs_close_stream_request cmd; -+ struct novfs_close_stream_reply *reply; -+ unsigned long replylen = 0; -+ int retCode; -+ -+ cmd.Command.CommandType = VFS_COMMAND_CLOSE_STREAM; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.connection = ConnHandle; -+ memcpy(cmd.handle, Handle, sizeof(cmd.handle)); -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, 0); -+ if (reply) { -+ retCode = 0; -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ return (retCode); -+} -+ -+int novfs_delete(unsigned char * Path, int DirectoryFlag, struct novfs_schandle SessionId) -+{ -+ struct novfs_delete_file_request *cmd; -+ struct novfs_delete_file_reply *reply; -+ unsigned long replylen = 0; -+ int retCode, cmdlen, pathlen; -+ -+ pathlen = strlen(Path); -+ -+ if (StripTrailingDots) { -+ if ('.' == Path[pathlen - 1]) -+ pathlen--; -+ } -+ -+ cmdlen = offsetof(struct novfs_delete_file_request, path) + pathlen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_DELETE_FILE; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->isDirectory = DirectoryFlag; -+ cmd->pathlength = pathlen; -+ memcpy(cmd->path, Path, pathlen); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ retCode = 0; -+ if (reply->Reply.ErrorCode) { -+ if ((reply->Reply.ErrorCode & 0xFFFF) == 0x0006) { /* Access Denied Error */ -+ retCode = -EACCES; -+ } else { -+ retCode = -EIO; -+ } -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ } else { -+ retCode = -ENOMEM; -+ } -+ return (retCode); -+} -+ -+int novfs_trunc(unsigned char * Path, int PathLen, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_truncate_file_request *cmd; -+ struct novfs_truncate_file_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode, cmdlen; -+ -+ if (StripTrailingDots) { -+ if ('.' == Path[PathLen - 1]) -+ PathLen--; -+ } -+ cmdlen = offsetof(struct novfs_truncate_file_request, path) -+ + PathLen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_TRUNCATE_FILE; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->pathLen = PathLen; -+ memcpy(cmd->path, Path, PathLen); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ if (reply->Reply.ErrorCode) { -+ retCode = -EIO; -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ } else { -+ retCode = -ENOMEM; -+ } -+ return (retCode); -+} -+ -+int novfs_trunc_ex(void *Handle, loff_t Offset, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_write_file_request cmd; -+ struct novfs_write_file_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode = 0, cmdlen; -+ -+ DbgPrint("Handle=0x%p Offset=%lld", Handle, Offset); -+ -+ cmdlen = offsetof(struct novfs_write_file_request, data); -+ -+ cmd.Command.CommandType = VFS_COMMAND_WRITE_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ cmd.handle = Handle; -+ cmd.len = 0; -+ cmd.offset = Offset; -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ DbgPrint("retCode=0x%x reply=0x%p", retCode, reply); -+ -+ if (!retCode) { -+ switch (reply->Reply.ErrorCode) { -+ case 0: -+ retCode = 0; -+ break; -+ -+ case NWE_INSUFFICIENT_SPACE: -+ retCode = -ENOSPC; -+ break; -+ -+ case NWE_ACCESS_DENIED: -+ retCode = -EACCES; -+ break; -+ -+ case NWE_FILE_IO_LOCKED: -+ retCode = -EBUSY; -+ break; -+ -+ default: -+ retCode = -EIO; -+ break; -+ } -+ } -+ -+ if (reply) { -+ kfree(reply); -+ } -+ -+ DbgPrint("retCode=%d", retCode); -+ -+ return (retCode); -+} -+ -+int novfs_rename_file(int DirectoryFlag, unsigned char * OldName, int OldLen, -+ unsigned char * NewName, int NewLen, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_rename_file_request cmd; -+ struct novfs_rename_file_reply *reply; -+ unsigned long replylen = 0; -+ int retCode; -+ -+ __DbgPrint("%s:\n" -+ " DirectoryFlag: %d\n" -+ " OldName: %.*s\n" -+ " NewName: %.*s\n" -+ " SessionId: 0x%llx\n", __func__, -+ DirectoryFlag, OldLen, OldName, NewLen, NewName, SessionId); -+ -+ cmd.Command.CommandType = VFS_COMMAND_RENAME_FILE; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = SessionId; -+ -+ cmd.directoryFlag = DirectoryFlag; -+ -+ if (StripTrailingDots) { -+ if ('.' == OldName[OldLen - 1]) -+ OldLen--; -+ if ('.' == NewName[NewLen - 1]) -+ NewLen--; -+ } -+ -+ cmd.newnameLen = NewLen; -+ memcpy(cmd.newname, NewName, NewLen); -+ -+ cmd.oldnameLen = OldLen; -+ memcpy(cmd.oldname, OldName, OldLen); -+ -+ retCode = -+ Queue_Daemon_Command(&cmd, sizeof(cmd), NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ retCode = 0; -+ if (reply->Reply.ErrorCode) { -+ retCode = -ENOENT; -+ } -+ kfree(reply); -+ } -+ return (retCode); -+} -+ -+int novfs_set_attr(unsigned char * Path, struct iattr *Attr, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_set_file_info_request *cmd; -+ struct novfs_set_file_info_reply *reply; -+ unsigned long replylen = 0; -+ int retCode, cmdlen, pathlen; -+ -+ pathlen = strlen(Path); -+ -+ if (StripTrailingDots) { -+ if ('.' == Path[pathlen - 1]) -+ pathlen--; -+ } -+ -+ cmdlen = offsetof(struct novfs_set_file_info_request,path) + pathlen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_SET_FILE_INFO; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ cmd->fileInfo.ia_valid = Attr->ia_valid; -+ cmd->fileInfo.ia_mode = Attr->ia_mode; -+ cmd->fileInfo.ia_uid = Attr->ia_uid; -+ cmd->fileInfo.ia_gid = Attr->ia_uid; -+ cmd->fileInfo.ia_size = Attr->ia_size; -+ cmd->fileInfo.ia_atime = Attr->ia_atime.tv_sec; -+ cmd->fileInfo.ia_mtime = Attr->ia_mtime.tv_sec;; -+ cmd->fileInfo.ia_ctime = Attr->ia_ctime.tv_sec;; -+/* -+ cmd->fileInfo.ia_attr_flags = Attr->ia_attr_flags; -+*/ -+ cmd->fileInfo.ia_attr_flags = 0; -+ -+ cmd->pathlength = pathlen; -+ memcpy(cmd->path, Path, pathlen); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply, -+ &replylen, INTERRUPTIBLE); -+ if (reply) { -+ switch (reply->Reply.ErrorCode) { -+ case 0: -+ retCode = 0; -+ break; -+ -+ case NWE_PARAM_INVALID: -+ retCode = -EINVAL; -+ break; -+ -+ case NWE_FILE_IO_LOCKED: -+ retCode = -EBUSY; -+ break; -+ -+ default: -+ retCode = -EIO; -+ break; -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ } else { -+ retCode = -ENOMEM; -+ } -+ return (retCode); -+} -+ -+int novfs_get_file_cache_flag(unsigned char * Path, -+ struct novfs_schandle SessionId) -+{ -+ struct novfs_get_cache_flag *cmd; -+ struct novfs_get_cache_flag_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int cmdlen; -+ int retCode = 0; -+ int pathlen; -+ -+ DbgPrint("Path = %s", Path); -+ -+ if (Path && *Path) { -+ pathlen = strlen(Path); -+ if (StripTrailingDots) { -+ if ('.' == Path[pathlen - 1]) -+ pathlen--; -+ } -+ cmdlen = offsetof(struct novfs_get_cache_flag, path) + -+ pathlen; -+ cmd = (struct novfs_get_cache_flag *) -+ kmalloc(cmdlen, GFP_KERNEL); -+ if (cmd) { -+ cmd->Command.CommandType = VFS_COMMAND_GET_CACHE_FLAG; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ cmd->pathLen = pathlen; -+ memcpy(cmd->path, Path, cmd->pathLen); -+ -+ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, -+ (void *)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ -+ if (!reply->Reply.ErrorCode) { -+ retCode = reply->CacheFlag; -+ } -+ -+ kfree(reply); -+ } -+ kfree(cmd); -+ } -+ } -+ -+ DbgPrint("return %d", retCode); -+ return (retCode); -+} -+ -+/* -+ * Arguments: -+ * SessionId, file handle, type of lock (read/write or unlock), -+ * start of lock area, length of lock area -+ * -+ * Notes: lock type - fcntl -+ */ -+int novfs_set_file_lock(struct novfs_schandle SessionId, void *Handle, -+ unsigned char fl_type, loff_t fl_start, loff_t fl_len) -+{ -+ struct novfs_set_file_lock_request *cmd; -+ struct novfs_set_file_lock_reply *reply = NULL; -+ unsigned long replylen = 0; -+ int retCode; -+ -+ retCode = -1; -+ -+ DbgPrint("SessionId: 0x%llx\n", SessionId); -+ -+ cmd = -+ (struct novfs_set_file_lock_request *) kmalloc(sizeof(struct novfs_set_file_lock_request), GFP_KERNEL); -+ -+ if (cmd) { -+ DbgPrint("2"); -+ -+ cmd->Command.CommandType = VFS_COMMAND_SET_FILE_LOCK; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = SessionId; -+ -+ cmd->handle = Handle; -+ if (F_RDLCK == fl_type) { -+ fl_type = 1; // LockRegionExclusive -+ } else if (F_WRLCK == fl_type) { -+ fl_type = 0; // LockRegionShared -+ } -+ -+ cmd->fl_type = fl_type; -+ cmd->fl_start = fl_start; -+ cmd->fl_len = fl_len; -+ -+ DbgPrint("3"); -+ -+ DbgPrint("BEGIN dump arguments"); -+ DbgPrint("Queue_Daemon_Command %d", -+ cmd->Command.CommandType); -+ DbgPrint("cmd->handle = 0x%p", cmd->handle); -+ DbgPrint("cmd->fl_type = %u", cmd->fl_type); -+ DbgPrint("cmd->fl_start = 0x%X", cmd->fl_start); -+ DbgPrint("cmd->fl_len = 0x%X", cmd->fl_len); -+ DbgPrint("sizeof(SET_FILE_LOCK_REQUEST) = %u", -+ sizeof(struct novfs_set_file_lock_request)); -+ DbgPrint("END dump arguments"); -+ -+ retCode = -+ Queue_Daemon_Command(cmd, sizeof(struct novfs_set_file_lock_request), -+ NULL, 0, (void *)&reply, &replylen, -+ INTERRUPTIBLE); -+ DbgPrint("4"); -+ -+ if (reply) { -+ DbgPrint("5, ErrorCode = %X", reply->Reply.ErrorCode); -+ -+ if (reply->Reply.ErrorCode) { -+ retCode = reply->Reply.ErrorCode; -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ } -+ -+ DbgPrint("6"); -+ -+ return (retCode); -+} ---- /dev/null -+++ b/fs/novfs/inode.c -@@ -0,0 +1,4638 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner -+ * -+ * This file contains functions used to control access to the Linux file -+ * system. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/*===[ Include files specific to this module ]============================*/ -+#include "vfs.h" -+ -+ -+struct inode_data { -+ void *Scope; -+ unsigned long Flags; -+ struct list_head IList; -+ struct inode *Inode; -+ unsigned long cntDC; -+ struct list_head DirCache; -+ struct semaphore DirCacheLock; -+ void * FileHandle; -+ int CacheFlag; -+ char Name[1]; /* Needs to be last entry */ -+}; -+ -+#define FILE_UPDATE_TIMEOUT 2 -+ -+/*===[ Function prototypes ]=============================================*/ -+ -+static unsigned long novfs_internal_hash(struct qstr *name); -+static int novfs_d_add(struct dentry *p, struct dentry *d, struct inode *i, int add); -+ -+static int novfs_get_sb(struct file_system_type *Fstype, int Flags, -+ const char *Dev_name, void *Data, struct vfsmount *Mnt); -+ -+static void novfs_kill_sb(struct super_block *SB); -+ -+ -+/* -+ * Declared dentry_operations -+ */ -+int novfs_d_revalidate(struct dentry *, struct nameidata *); -+int novfs_d_hash(struct dentry *, struct qstr *); -+int novfs_d_compare(struct dentry *, struct qstr *, struct qstr *); -+int novfs_d_delete(struct dentry *dentry); -+void novfs_d_release(struct dentry *dentry); -+void novfs_d_iput(struct dentry *dentry, struct inode *inode); -+ -+/* -+ * Declared directory operations -+ */ -+int novfs_dir_open(struct inode *inode, struct file *file); -+int novfs_dir_release(struct inode *inode, struct file *file); -+loff_t novfs_dir_lseek(struct file *file, loff_t offset, int origin); -+ssize_t novfs_dir_read(struct file *file, char *buf, size_t len, loff_t * off); -+void addtodentry(struct dentry *Parent, unsigned char *List, int Level); -+int novfs_filldir(void *data, const char *name, int namelen, loff_t off, -+ ino_t ino, unsigned ftype); -+int novfs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir); -+int novfs_dir_fsync(struct file *file, struct dentry *dentry, int datasync); -+ -+/* -+ * Declared address space operations -+ */ -+int novfs_a_writepage(struct page *page, struct writeback_control *wbc); -+int novfs_a_writepages(struct address_space *mapping, -+ struct writeback_control *wbc); -+int novfs_a_write_begin(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned flags, -+ struct page **pagep, void **fsdata); -+int novfs_a_write_end(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned copied, -+ struct page *pagep, void *fsdata); -+int novfs_a_readpage(struct file *file, struct page *page); -+int novfs_a_readpages(struct file *file, struct address_space *mapping, -+ struct list_head *page_lst, unsigned nr_pages); -+ssize_t novfs_a_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov, -+ loff_t offset, unsigned long nr_segs); -+ -+/* -+ * Declared file_operations -+ */ -+ssize_t novfs_f_read(struct file *, char *, size_t, loff_t *); -+ssize_t novfs_f_write(struct file *, const char *, size_t, loff_t *); -+int novfs_f_readdir(struct file *, void *, filldir_t); -+int novfs_f_ioctl(struct inode *, struct file *, unsigned int, unsigned long); -+int novfs_f_mmap(struct file *file, struct vm_area_struct *vma); -+int novfs_f_open(struct inode *, struct file *); -+int novfs_f_flush(struct file *, fl_owner_t); -+int novfs_f_release(struct inode *, struct file *); -+int novfs_f_fsync(struct file *, struct dentry *, int datasync); -+int novfs_f_lock(struct file *, int, struct file_lock *); -+ -+/* -+ * Declared inode_operations -+ */ -+int novfs_i_create(struct inode *, struct dentry *, int, struct nameidata *); -+struct dentry *novfs_i_lookup(struct inode *, struct dentry *, -+ struct nameidata *); -+int novfs_i_mkdir(struct inode *, struct dentry *, int); -+int novfs_i_unlink(struct inode *dir, struct dentry *dentry); -+int novfs_i_rmdir(struct inode *, struct dentry *); -+int novfs_i_mknod(struct inode *, struct dentry *, int, dev_t); -+int novfs_i_rename(struct inode *, struct dentry *, struct inode *, -+ struct dentry *); -+int novfs_i_setattr(struct dentry *, struct iattr *); -+int novfs_i_getattr(struct vfsmount *mnt, struct dentry *, struct kstat *); -+int novfs_i_revalidate(struct dentry *dentry); -+ -+/* -+ * Extended attributes operations -+ */ -+ -+ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, -+ size_t size); -+int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, -+ size_t value_size, int flags); -+ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); -+ -+void update_inode(struct inode *Inode, struct novfs_entry_info *Info); -+ -+/* -+ * Declared super_operations -+ */ -+void novfs_read_inode(struct inode *inode); -+void novfs_write_inode(struct inode *inode); -+int novfs_notify_change(struct dentry *dentry, struct iattr *attr); -+void novfs_clear_inode(struct inode *inode); -+int novfs_show_options(struct seq_file *s, struct vfsmount *m); -+ -+int novfs_statfs(struct dentry *de, struct kstatfs *buf); -+ -+/* -+ * Declared control interface functions -+ */ -+ssize_t -+novfs_control_Read(struct file *file, char *buf, size_t nbytes, loff_t * ppos); -+ -+ssize_t -+novfs_control_write(struct file *file, const char *buf, size_t nbytes, -+ loff_t * ppos); -+ -+int novfs_control_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg); -+ -+int __init init_novfs(void); -+void __exit exit_novfs(void); -+ -+int novfs_lock_inode_cache(struct inode *i); -+void novfs_unlock_inode_cache(struct inode *i); -+int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, -+ ino_t * ino, struct novfs_entry_info *info); -+int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, -+ struct novfs_entry_info *info); -+int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, -+ struct novfs_entry_info *info); -+int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, -+ struct novfs_entry_info *info, u64 * EntryTime); -+int novfs_get_remove_entry(struct inode *i, ino_t * ino, struct novfs_entry_info *info); -+void novfs_invalidate_inode_cache(struct inode *i); -+struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, -+ ino_t ino); -+int novfs_lookup_validate(struct inode *i, struct qstr *name, ino_t ino); -+int novfs_add_inode_entry(struct inode *i, struct qstr *name, ino_t ino, -+ struct novfs_entry_info *info); -+int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, -+ struct novfs_entry_info *info); -+void novfs_remove_inode_entry(struct inode *i, struct qstr *name, ino_t ino); -+void novfs_free_invalid_entries(struct inode *i); -+void novfs_free_inode_cache(struct inode *i); -+ -+/*===[ Global variables ]=================================================*/ -+struct dentry_operations novfs_dentry_operations = { -+ .d_revalidate = novfs_d_revalidate, -+ .d_hash = novfs_d_hash, -+ .d_compare = novfs_d_compare, -+ //.d_delete = novfs_d_delete, -+ .d_release = novfs_d_release, -+ .d_iput = novfs_d_iput, -+}; -+ -+struct file_operations novfs_dir_operations = { -+ .owner = THIS_MODULE, -+ .open = novfs_dir_open, -+ .release = novfs_dir_release, -+ .llseek = novfs_dir_lseek, -+ .read = novfs_dir_read, -+ .readdir = novfs_dir_readdir, -+ .fsync = novfs_dir_fsync, -+}; -+ -+static struct file_operations novfs_file_operations = { -+ .owner = THIS_MODULE, -+ .read = novfs_f_read, -+ .write = novfs_f_write, -+ .readdir = novfs_f_readdir, -+ .ioctl = novfs_f_ioctl, -+ .mmap = novfs_f_mmap, -+ .open = novfs_f_open, -+ .flush = novfs_f_flush, -+ .release = novfs_f_release, -+ .fsync = novfs_f_fsync, -+ .llseek = generic_file_llseek, -+ .lock = novfs_f_lock, -+}; -+ -+static struct address_space_operations novfs_nocache_aops = { -+ .readpage = novfs_a_readpage, -+}; -+ -+struct backing_dev_info novfs_backing_dev_info = { -+ .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE, -+ .state = 0, -+ .capabilities = BDI_CAP_NO_WRITEBACK | BDI_CAP_MAP_COPY, -+ .unplug_io_fn = default_unplug_io_fn, -+}; -+ -+static struct address_space_operations novfs_aops = { -+ .readpage = novfs_a_readpage, -+ .readpages = novfs_a_readpages, -+ .writepage = novfs_a_writepage, -+ .writepages = novfs_a_writepages, -+ .write_begin = novfs_a_write_begin, -+ .write_end = novfs_a_write_end, -+ .set_page_dirty = __set_page_dirty_nobuffers, -+ .direct_IO = novfs_a_direct_IO, -+}; -+ -+static struct inode_operations novfs_inode_operations = { -+ .create = novfs_i_create, -+ .lookup = novfs_i_lookup, -+ .unlink = novfs_i_unlink, -+ .mkdir = novfs_i_mkdir, -+ .rmdir = novfs_i_rmdir, -+ .mknod = novfs_i_mknod, -+ .rename = novfs_i_rename, -+ .setattr = novfs_i_setattr, -+ .getattr = novfs_i_getattr, -+ .getxattr = novfs_i_getxattr, -+ .setxattr = novfs_i_setxattr, -+ .listxattr = novfs_i_listxattr, -+}; -+ -+static struct inode_operations novfs_file_inode_operations = { -+ .setattr = novfs_i_setattr, -+ .getattr = novfs_i_getattr, -+ .getxattr = novfs_i_getxattr, -+ .setxattr = novfs_i_setxattr, -+ .listxattr = novfs_i_listxattr, -+}; -+ -+static struct super_operations novfs_ops = { -+ .statfs = novfs_statfs, -+ .clear_inode = novfs_clear_inode, -+ .drop_inode = generic_delete_inode, -+ .show_options = novfs_show_options, -+ -+}; -+ -+/* Not currently used -+static struct file_operations novfs_Control_operations = { -+ .read = novfs_Control_read, -+ .write = novfs_Control_write, -+ .ioctl = novfs_Control_ioctl, -+}; -+*/ -+ -+static atomic_t novfs_Inode_Number = ATOMIC_INIT(0); -+ -+ -+struct dentry *novfs_root = NULL; -+char *novfs_current_mnt = NULL; -+ -+DECLARE_MUTEX(InodeList_lock); -+ -+LIST_HEAD(InodeList); -+ -+DECLARE_MUTEX(TimeDir_Lock); -+uint64_t lastTime; -+char lastDir[PATH_MAX]; -+ -+uint64_t inHAXTime; -+int inHAX; -+ -+unsigned long InodeCount = 0, DCCount = 0; -+unsigned long novfs_update_timeout = FILE_UPDATE_TIMEOUT; -+int novfs_page_cache = 0; -+ -+struct file_private { -+ int listedall; -+ void *enumHandle; -+}; -+ -+static void PRINT_DENTRY(const char *s, struct dentry *d) -+{ -+ __DbgPrint("%s: 0x%p\n", s, d); -+ __DbgPrint(" d_count: 0x%x\n", d->d_count); -+ __DbgPrint(" d_lock: 0x%x\n", d->d_lock); -+ __DbgPrint(" d_inode: 0x%x\n", d->d_inode); -+ __DbgPrint(" d_lru: 0x%p\n" -+ " next: 0x%p\n" -+ " prev: 0x%p\n", &d->d_lru, d->d_lru.next, -+ d->d_lru.prev); -+ __DbgPrint(" d_child: 0x%p\n" " next: 0x%p\n" -+ " prev: 0x%p\n", &d->d_u.d_child, -+ d->d_u.d_child.next, d->d_u.d_child.prev); -+ __DbgPrint(" d_subdirs: 0x%p\n" " next: 0x%p\n" -+ " prev: 0x%p\n", &d->d_subdirs, d->d_subdirs.next, -+ d->d_subdirs.prev); -+ __DbgPrint(" d_alias: 0x%p\n" " next: 0x%p\n" -+ " prev: 0x%p\n", &d->d_alias, d->d_alias.next, -+ d->d_alias.prev); -+ __DbgPrint(" d_time: 0x%x\n", d->d_time); -+ __DbgPrint(" d_op: 0x%p\n", d->d_op); -+ __DbgPrint(" d_sb: 0x%p\n", d->d_sb); -+ __DbgPrint(" d_flags: 0x%x\n", d->d_flags); -+ __DbgPrint(" d_mounted: 0x%x\n", d->d_mounted); -+ __DbgPrint(" d_fsdata: 0x%p\n", d->d_fsdata); -+/* DbgPrint(" d_cookie: 0x%x\n", d->d_cookie); */ -+ __DbgPrint(" d_parent: 0x%p\n", d->d_parent); -+ __DbgPrint(" d_name: 0x%p %.*s\n", &d->d_name, d->d_name.len, -+ d->d_name.name); -+ __DbgPrint(" name: 0x%p\n" " len: %d\n" -+ " hash: 0x%x\n", d->d_name.name, d->d_name.len, -+ d->d_name.hash); -+ __DbgPrint(" d_hash: 0x%x\n" " next: 0x%x\n" -+ " pprev: 0x%x\n", d->d_hash, d->d_hash.next, -+ d->d_hash.pprev); -+} -+ -+/*++======================================================================*/ -+int novfs_remove_from_root(char *RemoveName) -+{ -+ struct qstr name; -+ struct dentry *dentry; -+ struct inode *dir; -+ -+ DbgPrint("%s", RemoveName); -+ name.len = strlen(RemoveName); -+ name.name = RemoveName; -+ novfs_d_hash(novfs_root, &name); -+ -+ dentry = d_lookup(novfs_root, &name); -+ if (dentry) { -+ if (dentry->d_inode && dentry->d_inode->i_private) { -+ struct inode_data *n_inode = -+ dentry->d_inode->i_private; -+ n_inode->Scope = NULL; -+ } -+ dput(dentry); -+ } -+ -+ dir = novfs_root->d_inode; -+ -+ novfs_lock_inode_cache(dir); -+ novfs_remove_inode_entry(dir, &name, 0); -+ novfs_unlock_inode_cache(dir); -+ -+ return (0); -+} -+ -+/*++======================================================================*/ -+int novfs_add_to_root(char *AddName) -+{ -+ struct qstr name; -+ struct inode *dir; -+ struct novfs_entry_info info; -+ ino_t ino; -+ -+ DbgPrint("%s", AddName); -+ name.len = strlen(AddName); -+ name.name = AddName; -+ novfs_d_hash(novfs_root, &name); -+ -+ dir = novfs_root->d_inode; -+ -+ novfs_lock_inode_cache(dir); -+ -+ ino = 0; -+ -+ if (!novfs_lookup_inode_cache(dir, &name, 0)) { -+ info.mode = S_IFDIR | 0700; -+ info.size = 0; -+ info.atime = info.ctime = info.mtime = CURRENT_TIME; -+ -+ ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ novfs_add_inode_entry(dir, &name, ino, &info); -+ } -+ -+ novfs_unlock_inode_cache(dir); -+ -+ return (0); -+} -+ -+/*++======================================================================*/ -+int novfs_Add_to_Root2(char *AddName) -+{ -+ struct dentry *entry; -+ struct qstr name; -+ struct inode *inode; -+ void *scope; -+ -+ DbgPrint("%s", AddName); -+ name.len = strlen(AddName); -+ name.name = AddName; -+ -+ novfs_d_hash(novfs_root, &name); -+ -+ entry = d_lookup(novfs_root, &name); -+ DbgPrint("novfs_d_lookup 0x%p", entry); -+ if (NULL == entry) { -+ scope = novfs_scope_lookup(); -+ -+ entry = d_alloc(novfs_root, &name); -+ DbgPrint("d_alloc 0x%p", entry); -+ if (entry) { -+ entry->d_op = &novfs_dentry_operations; -+ entry->d_time = jiffies + (novfs_update_timeout * HZ); -+ /* -+ * done in novfs_d_add now... entry->d_fsdata = (void *)novfs_internal_hash( &name ); -+ */ -+ inode = -+ novfs_get_inode(novfs_root->d_sb, S_IFDIR | 0700, 0, novfs_scope_get_uid(scope), 0, &name); -+ DbgPrint("Inode=0x%p", inode); -+ if (inode) { -+ inode->i_atime = -+ inode->i_ctime = -+ inode->i_mtime = CURRENT_TIME; -+ if (!novfs_d_add(novfs_root, entry, inode, 1)) { -+ if (inode->i_private) { -+ struct inode_data *n_inode = inode->i_private; -+ n_inode->Flags = USER_INODE; -+ } -+ PRINT_DENTRY("After novfs_d_add", -+ entry); -+ } else { -+ dput(entry); -+ iput(inode); -+ } -+ } -+ } -+ } else { -+ dput(entry); -+ PRINT_DENTRY("novfs_Add_to_Root: After dput Dentry", entry); -+ } -+ return (0); -+} -+ -+char *novfs_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen) -+{ -+ char *retval = &Buf[Buflen]; -+ struct dentry *p = Dentry; -+ -+ *(--retval) = '\0'; -+ Buflen--; -+ -+ if (!IS_ROOT(p) && !IS_ROOT(p->d_parent)) { -+ while (Buflen && !IS_ROOT(p) && !IS_ROOT(p->d_parent)) { -+ if (Buflen > p->d_name.len) { -+ retval -= p->d_name.len; -+ Buflen -= p->d_name.len; -+ memcpy(retval, p->d_name.name, p->d_name.len); -+ *(--retval) = '\\'; -+ Buflen--; -+ p = p->d_parent; -+ } else { -+ retval = NULL; -+ break; -+ } -+ } -+ } else { -+ *(--retval) = '\\'; -+ } -+ -+ if (retval) -+ DbgPrint("%s", retval); -+ return (retval); -+} -+ -+int verify_dentry(struct dentry *dentry, int Flags) -+{ -+ int retVal = -ENOENT; -+ struct inode *dir; -+ struct novfs_entry_info *info = NULL; -+ struct inode_data *id; -+ struct novfs_schandle session; -+ char *path, *list = NULL, *cp; -+ ino_t ino = 0; -+ struct qstr name; -+ int iLock = 0; -+ struct dentry *parent = NULL; -+ u64 ctime; -+ struct inode *inode; -+ -+ if (IS_ROOT(dentry)) { -+ DbgPrint("Root entry"); -+ return (0); -+ } -+ -+ if (dentry && dentry->d_parent && -+ (dir = dentry->d_parent->d_inode) && (id = dir->i_private)) { -+ parent = dget_parent(dentry); -+ -+ info = kmalloc(sizeof(struct novfs_entry_info) + PATH_LENGTH_BUFFER, GFP_KERNEL); -+ -+ if (info) { -+ if (novfs_lock_inode_cache(dir)) { -+ name.len = dentry->d_name.len; -+ name.name = dentry->d_name.name; -+ name.hash = novfs_internal_hash(&name); -+ if (!novfs_get_entry_time(dir, &name, &ino, info, &ctime)) { -+ inode = dentry->d_inode; -+ if (inode && inode->i_private && -+ ((inode->i_size != info->size) || -+ (inode->i_mtime.tv_sec != -+ info->mtime.tv_sec) -+ || (inode->i_mtime.tv_nsec != -+ info->mtime.tv_nsec))) { -+ /* -+ * Values don't match so update. -+ */ -+ struct inode_data *n_inode = inode->i_private; -+ n_inode->Flags |= UPDATE_INODE; -+ } -+ -+ ctime = get_jiffies_64() - ctime; -+ if (Flags || ctime < (u64) (novfs_update_timeout * HZ)) { -+ retVal = 0; -+ novfs_unlock_inode_cache(dir); -+ dput(parent); -+ kfree(info); -+ return (0); -+ } -+ } -+ novfs_unlock_inode_cache(dir); -+ } -+ -+ if (IS_ROOT(dentry->d_parent)) { -+ session = novfs_scope_get_sessionId( -+ novfs_get_scope_from_name(&dentry->d_name)); -+ } else -+ session = novfs_scope_get_sessionId(id->Scope); -+ -+ if (!SC_PRESENT(session)) { -+ id->Scope = novfs_get_scope(dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ ino = 0; -+ retVal = 0; -+ -+ if (IS_ROOT(dentry->d_parent)) { -+ DbgPrint("parent is Root directory"); -+ list = novfs_get_scopeusers(); -+ -+ iLock = novfs_lock_inode_cache(dir); -+ novfs_invalidate_inode_cache(dir); -+ -+ if (list) { -+ cp = list; -+ while (*cp) { -+ name.name = cp; -+ name.len = strlen(cp); -+ name.hash = novfs_internal_hash(&name); -+ cp += (name.len + 1); -+ ino = 0; -+ if (novfs_get_entry(dir, &name, &ino, info)) { -+ info->mode = S_IFDIR | 0700; -+ info->size = 0; -+ info->atime = info->ctime = info->mtime = CURRENT_TIME; -+ ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ novfs_add_inode_entry(dir, &name, ino, info); -+ } -+ } -+ } -+ novfs_free_invalid_entries(dir); -+ } else { -+ -+ path = -+ novfs_dget_path(dentry, info->name, -+ PATH_LENGTH_BUFFER); -+ if (path) { -+ if (dentry->d_name.len <= -+ NW_MAX_PATH_LENGTH) { -+ name.hash = -+ novfs_internal_hash -+ (&dentry->d_name); -+ name.len = dentry->d_name.len; -+ name.name = dentry->d_name.name; -+ -+ retVal = -+ novfs_get_file_info(path, -+ info, -+ session); -+ if (0 == retVal) { -+ dentry->d_time = -+ jiffies + -+ (novfs_update_timeout -+ * HZ); -+ iLock = -+ novfs_lock_inode_cache -+ (dir); -+ if (novfs_update_entry -+ (dir, &name, 0, -+ info)) { -+ if (dentry-> -+ d_inode) { -+ ino = dentry->d_inode->i_ino; -+ } else { -+ ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ } -+ novfs_add_inode_entry -+ (dir, &name, -+ ino, info); -+ } -+ if (dentry->d_inode) { -+ update_inode -+ (dentry-> -+ d_inode, -+ info); -+ id->Flags &= -+ ~UPDATE_INODE; -+ -+ dentry-> -+ d_inode-> -+ i_flags &= -+ ~S_DEAD; -+ if (dentry-> -+ d_inode-> -+ i_private) { -+ ((struct inode_data *) dentry->d_inode->i_private)->Scope = id->Scope; -+ } -+ } -+ } else if (-EINTR != retVal) { -+ retVal = 0; -+ iLock = novfs_lock_inode_cache(dir); -+ novfs_remove_inode_entry(dir, &name, 0); -+ if (dentry->d_inode -+ && !(dentry->d_inode->i_flags & S_DEAD)) { -+ dentry->d_inode->i_flags |= S_DEAD; -+ dentry->d_inode-> i_size = 0; -+ dentry->d_inode->i_atime.tv_sec = -+ dentry->d_inode->i_atime.tv_nsec = -+ dentry->d_inode->i_ctime.tv_sec = -+ dentry->d_inode->i_ctime.tv_nsec = -+ dentry->d_inode->i_mtime.tv_sec = -+ dentry->d_inode->i_mtime.tv_nsec = 0; -+ dentry->d_inode->i_blocks = 0; -+ d_delete(dentry); /* Remove from cache */ -+ } -+ } -+ } else { -+ retVal = -ENAMETOOLONG; -+ } -+ } -+ } -+ } else { -+ retVal = -ENOMEM; -+ } -+ if (iLock) { -+ novfs_unlock_inode_cache(dir); -+ } -+ dput(parent); -+ } -+ -+ if (list) -+ kfree(list); -+ if (info) -+ kfree(info); -+ -+ DbgPrint("return=0x%x", retVal); -+ -+ return (retVal); -+} -+ -+ -+static int novfs_d_add(struct dentry *Parent, struct dentry *d, struct inode *i, int a) -+{ -+ void *scope; -+ struct inode_data *id = NULL; -+ -+ char *path, *buf; -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_dget_path(d, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ DbgPrint("inode=0x%p ino=%d path %s", i, -+ i->i_ino, path); -+ } -+ kfree(buf); -+ } -+ -+ if (Parent && Parent->d_inode && Parent->d_inode->i_private) { -+ id = (struct inode_data *) Parent->d_inode->i_private; -+ } -+ -+ if (id && id->Scope) { -+ scope = id->Scope; -+ } else { -+ scope = novfs_get_scope(d); -+ } -+ -+ ((struct inode_data *) i->i_private)->Scope = scope; -+ -+ d->d_time = jiffies + (novfs_update_timeout * HZ); -+ if (a) { -+ d_add(d, i); -+ } else { -+ d_instantiate(d, i); -+ } -+ -+ return (0); -+} -+ -+int novfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) -+{ -+ int retCode = 0; -+ struct inode *dir; -+ struct inode_data *id; -+ struct qstr name; -+ -+ __DbgPrint("%s: 0x%p %.*s\n" -+ " d_count: %d\n" -+ " d_inode: 0x%p\n", __func__, -+ dentry, dentry->d_name.len, dentry->d_name.name, -+ dentry->d_count, dentry->d_inode); -+ -+ if (IS_ROOT(dentry)) { -+ retCode = 1; -+ } else { -+ if (dentry->d_inode && -+ dentry->d_parent && -+ (dir = dentry->d_parent->d_inode) && -+ (id = dir->i_private)) { -+ /* -+ * Check timer to see if in valid time limit -+ */ -+ if (jiffies > dentry->d_time) { -+ /* -+ * Revalidate entry -+ */ -+ name.len = dentry->d_name.len; -+ name.name = dentry->d_name.name; -+ name.hash = -+ novfs_internal_hash(&dentry->d_name); -+ dentry->d_time = 0; -+ -+ if (0 == verify_dentry(dentry, 0)) { -+ if (novfs_lock_inode_cache(dir)) { -+ if (novfs_lookup_inode_cache -+ (dir, &name, 0)) { -+ dentry->d_time = -+ jiffies + -+ (novfs_update_timeout -+ * HZ); -+ retCode = 1; -+ } -+ novfs_unlock_inode_cache(dir); -+ } -+ } -+ } else { -+ retCode = 1; -+ } -+ } -+ } -+ -+ if ((0 == retCode) && dentry->d_inode) { -+ /* -+ * Entry has become invalid -+ */ -+/* dput(dentry); -+*/ -+ } -+ -+ DbgPrint("return 0x%x %.*s", retCode, -+ dentry->d_name.len, dentry->d_name.name); -+ -+ return (retCode); -+} -+ -+static unsigned long novfs_internal_hash(struct qstr *name) -+{ -+ unsigned long hash = 0; -+ unsigned int len = name->len; -+ unsigned char *c = (unsigned char *)name->name; -+ -+ while (len--) { -+ /* -+ * Lower case values for the hash. -+ */ -+ hash = partial_name_hash(tolower(*c++), hash); -+ } -+ -+ return (hash); -+} -+ -+int novfs_d_hash(struct dentry *dentry, struct qstr *name) -+{ -+ DbgPrint("%.*s", name->len, name->name); -+ -+ name->hash = novfs_internal_hash(name); -+ -+ return (0); -+} -+ -+int novfs_d_strcmp(struct qstr *s1, struct qstr *s2) -+{ -+ int retCode = 1; -+ unsigned char *str1, *str2; -+ unsigned int len; -+ -+ DbgPrint("s1=%.*s s2=%.*s", s1->len, s1->name, -+ s2->len, s2->name); -+ -+ if (s1->len && (s1->len == s2->len) && (s1->hash == s2->hash)) { -+ len = s1->len; -+ str1 = (unsigned char *)s1->name; -+ str2 = (unsigned char *)s2->name; -+ for (retCode = 0; len--; str1++, str2++) { -+ if (*str1 != *str2) { -+ if (tolower(*str1) != tolower(*str2)) { -+ retCode = 1; -+ break; -+ } -+ } -+ } -+ } -+ -+ DbgPrint("retCode=0x%x", retCode); -+ return (retCode); -+} -+ -+int novfs_d_compare(struct dentry *parent, struct qstr *s1, struct qstr *s2) -+{ -+ int retCode; -+ -+ retCode = novfs_d_strcmp(s1, s2); -+ -+ DbgPrint("retCode=0x%x", retCode); -+ return (retCode); -+} -+ -+int novfs_d_delete(struct dentry *dentry) -+{ -+ int retVal = 0; -+ -+ DbgPrint("0x%p %.*s; d_count: %d; d_inode: 0x%p", -+ dentry, dentry->d_name.len, dentry->d_name.name, -+ dentry->d_count, dentry->d_inode); -+ -+ if (dentry->d_inode && (dentry->d_inode->i_flags & S_DEAD)) { -+ retVal = 1; -+ } -+ -+ dentry->d_time = 0; -+ -+ return (retVal); -+} -+ -+void novfs_d_release(struct dentry *dentry) -+{ -+ DbgPrint("0x%p %.*s", dentry, dentry->d_name.len, -+ dentry->d_name.name); -+} -+ -+void novfs_d_iput(struct dentry *dentry, struct inode *inode) -+{ -+ DbgPrint("Inode=0x%p Ino=%d Dentry=0x%p i_state=%d Name=%.*s", -+ inode, inode->i_ino, dentry, inode->i_state, dentry->d_name.len, -+ dentry->d_name.name); -+ -+ iput(inode); -+ -+} -+ -+int novfs_dir_open(struct inode *dir, struct file *file) -+{ -+ char *path, *buf; -+ struct file_private *file_private = NULL; -+ -+ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_dget_path(file->f_dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ DbgPrint("path %s", path); -+ } -+ kfree(buf); -+ } -+ -+ file_private = kmalloc(sizeof(struct file_private), GFP_KERNEL); -+ file_private->listedall = 0; -+ file_private->enumHandle = NULL; -+ -+ file->private_data = file_private; -+ -+ return (0); -+} -+ -+int novfs_dir_release(struct inode *dir, struct file *file) -+{ -+ struct file_private *file_private = file->private_data; -+ struct inode *inode = file->f_dentry->d_inode; -+ struct novfs_schandle sessionId; -+ -+ DbgPrint("Inode 0x%p %d Name %.*s", dir, dir->i_ino, -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ -+ if (file_private) { -+ if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) { -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); -+ if (SC_PRESENT(sessionId) == 0) { -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); -+ } -+ novfs_end_directory_enumerate(file_private->enumHandle, sessionId); -+ } -+ kfree(file_private); -+ file->private_data = NULL; -+ } -+ -+ return (0); -+} -+ -+loff_t novfs_dir_lseek(struct file * file, loff_t offset, int origin) -+{ -+ struct file_private *file_private = NULL; -+ -+ DbgPrint("offset %lld %d Name %.*s", offset, origin, -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ //printk("<1> seekdir file = %.*s offset = %i\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name, (int)offset); -+ -+ if (0 != offset) { -+ return -ESPIPE; -+ } -+ -+ file->f_pos = 0; -+ -+ file_private = (struct file_private *) file->private_data; -+ file_private->listedall = 0; -+ if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) { -+ struct novfs_schandle sessionId; -+ struct inode *inode = file->f_dentry->d_inode; -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); -+ if (SC_PRESENT(sessionId) == 0) { -+ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry); -+ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope); -+ } -+ novfs_end_directory_enumerate(file_private->enumHandle, sessionId); -+ } -+ file_private->enumHandle = NULL; -+ -+ return 0; -+ //return(default_llseek(file, offset, origin)); -+} -+ -+ssize_t novfs_dir_read(struct file * file, char *buf, size_t len, loff_t * off) -+{ -+/* -+ int rlen = 0; -+ -+ DbgPrint("dentry path %.*s buf=0x%p len=%d off=%lld", file->f_dentry->d_name.len, file->f_dentry->d_name.name, buf, len, *off); -+ -+ if (0 == *off) -+ { -+ rlen = 8; -+ rlen -= copy_to_user(buf, "Testing\n", 8); -+ *off += rlen; -+ } -+ return(rlen); -+*/ -+ DbgPrint("%lld %d Name %.*s", *off, len, -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ return (generic_read_dir(file, buf, len, off)); -+} -+ -+static void novfs_Dump_Info(struct novfs_entry_info *info) -+{ -+ char atime_buf[32], mtime_buf[32], ctime_buf[32]; -+ char namebuf[512]; -+ int len = 0; -+ -+ if (info == NULL) { -+ DbgPrint("Dump_Info info == NULL"); -+ return; -+ } -+ -+ if (info->namelength >= 512) { -+ len = 511; -+ } else { -+ len = info->namelength; -+ } -+ -+ memcpy(namebuf, info->name, len); -+ namebuf[len] = '\0'; -+ -+ ctime_r(&info->atime.tv_sec, atime_buf); -+ ctime_r(&info->mtime.tv_sec, mtime_buf); -+ ctime_r(&info->ctime.tv_sec, ctime_buf); -+ DbgPrint("type = %i", info->type); -+ DbgPrint("mode = %x", info->mode); -+ DbgPrint("uid = %d", info->uid); -+ DbgPrint("gid = %d", info->gid); -+ DbgPrint("size = %i", info->size); -+ DbgPrint("atime = %s", atime_buf); -+ DbgPrint("mtime = %s", mtime_buf); -+ DbgPrint("ctime = %s", ctime_buf); -+ DbgPrint("namelength = %i", info->namelength); -+ DbgPrint("name = %s", namebuf); -+} -+ -+void processList(struct file *file, void *dirent, filldir_t filldir, char *list, -+ int type, struct novfs_schandle SessionId) -+{ -+ unsigned char *path, *buf = NULL, *cp; -+ struct qstr name; -+ struct novfs_entry_info *pinfo = NULL; -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ path = buf; -+ if (buf) { -+ path = novfs_dget_path(file->f_dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ strcpy(buf, path); -+ } -+ path = buf + strlen(buf); -+ *path++ = '\\'; -+ } -+ -+ if (list) { -+ cp = list; -+ while (*cp) { -+ name.name = cp; -+ DbgPrint("name.name = %s", name.name); -+ name.len = strlen(cp); -+ name.hash = novfs_internal_hash(&name); -+ cp += (name.len + 1); -+ -+ pinfo = -+ kmalloc(sizeof(struct novfs_entry_info) + -+ PATH_LENGTH_BUFFER, GFP_KERNEL); -+ pinfo->mode = S_IFDIR | 0700; -+ pinfo->size = 0; -+ pinfo->atime = pinfo->ctime = pinfo->mtime = -+ CURRENT_TIME; -+ strcpy(pinfo->name, name.name); -+ pinfo->namelength = name.len; -+ -+ novfs_Dump_Info(pinfo); -+ -+ filldir(dirent, pinfo->name, pinfo->namelength, -+ file->f_pos, file->f_pos, pinfo->mode >> 12); -+ file->f_pos += 1; -+ -+ kfree(pinfo); -+ } -+ } -+ -+ if (buf) { -+ kfree(buf); -+ } -+} -+ -+int processEntries(struct file *file, void *dirent, filldir_t filldir, -+ void ** enumHandle, struct novfs_schandle sessionId) -+{ -+ unsigned char *path = NULL, *buf = NULL; -+ int count = 0, status = 0; -+ struct novfs_entry_info *pinfo = NULL; -+ struct novfs_entry_info *pInfoMem = NULL; -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (!buf) { -+ return -ENOMEM; -+ } -+ -+ path = novfs_dget_path(file->f_dentry, buf, PATH_LENGTH_BUFFER); -+ if (!path) { -+ kfree(buf); -+ return -ENOMEM; -+ } -+ //NWSearchfiles -+ count = 0; -+ status = -+ novfs_get_dir_listex(path, enumHandle, &count, &pinfo, -+ sessionId); -+ pInfoMem = pinfo; -+ -+ if ((count == -1) || (count == 0) || (status != 0)) { -+ kfree(pInfoMem); -+ kfree(buf); -+ return -1; -+ } -+ // parse resultset -+ while (pinfo && count--) { -+ filldir(dirent, pinfo->name, pinfo->namelength, file->f_pos, -+ file->f_pos, pinfo->mode >> 12); -+ file->f_pos += 1; -+ -+ pinfo = (struct novfs_entry_info *) (pinfo->name + pinfo->namelength); -+ } -+ -+ kfree(pInfoMem); -+ kfree(buf); -+ return 0; -+} -+ -+int novfs_dir_readdir(struct file *file, void *dirent, filldir_t filldir) -+{ -+ unsigned char *list = NULL; -+ int status = 0; //-ENOMEM; -+ struct inode *inode = file->f_dentry->d_inode; -+ struct novfs_schandle sessionId; -+ uid_t uid; -+ int type = 0; -+ struct file_private *file_private = NULL; -+ int lComm; -+ -+ file_private = (struct file_private *) file->private_data; -+ DbgPrint("Name %.*s", file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name); -+ -+ //printk("<1> file = %.*s\n", file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ -+// Use this hack by default -+#ifndef SKIP_CROSSOVER_HACK -+ // Hack for crossover - begin -+ down(&TimeDir_Lock); -+ if ((file->f_dentry->d_name.len == 7) && -+ ((0 == strncmp(file->f_dentry->d_name.name, " !xover", 7)) || -+ (0 == strncmp(file->f_dentry->d_name.name, "z!xover", 7)))) { -+ //printk("<1> xoverhack: we are in xoverHack\n"); -+ -+ inHAX = 1; -+ inHAXTime = get_nanosecond_time(); -+ //up( &TimeDir_Lock ); -+ //return 0; -+ file_private->listedall = 1; -+ } else { -+ if (inHAX) { -+ if (get_nanosecond_time() - inHAXTime > -+ 100 * 1000 * 1000) { -+ //printk("<1> xoverhack: it was long, long, long ago...\n"); -+ inHAX = 0; -+ } else { -+ //printk("<1> xoverhack: word gotcha in xoverHack...\n"); -+ inHAXTime = get_nanosecond_time(); -+ //up( &TimeDir_Lock ); -+ //return 0; -+ file_private->listedall = 1; -+ } -+ } -+ } -+ -+ up(&TimeDir_Lock); -+ // Hack for crossover - end -+#endif -+ -+ if (file->f_pos == 0) { -+ if (filldir(dirent, ".", 1, file->f_pos, inode->i_ino, DT_DIR) < -+ 0) -+ return 1; -+ file->f_pos++; -+ return 1; -+ } -+ -+ if (file->f_pos == 1) { -+ if (filldir -+ (dirent, "..", 2, file->f_pos, -+ file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) -+ return 1; -+ file->f_pos++; -+ return 1; -+ } -+ -+ if (file_private->listedall != 0) { -+ return 0; -+ } -+ -+ inode = file->f_dentry->d_inode; -+ if (inode && inode->i_private) { -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -+ Scope); -+ if (0 == SC_PRESENT(sessionId)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(file->f_dentry); -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ } -+ uid = novfs_scope_get_uid(((struct inode_data *) inode->i_private)->Scope); -+ } else { -+ SC_INITIALIZE(sessionId); -+ uid = current_euid(); -+ } -+ -+ if (IS_ROOT(file->f_dentry) || // Root -+ IS_ROOT(file->f_dentry->d_parent) || // User -+ IS_ROOT(file->f_dentry->d_parent->d_parent)) // Server -+ { -+ if (IS_ROOT(file->f_dentry)) { -+ DbgPrint("Root directory"); -+ list = novfs_get_scopeusers(); -+ type = USER_LIST; -+ } else if (IS_ROOT(file->f_dentry->d_parent)) { -+ DbgPrint("Parent is Root directory"); -+ novfs_get_servers(&list, sessionId); -+ type = SERVER_LIST; -+ } else { -+ DbgPrint("Parent-Parent is Root directory"); -+ novfs_get_vols(&file->f_dentry->d_name, -+ &list, sessionId); -+ type = VOLUME_LIST; -+ } -+ -+ processList(file, dirent, filldir, list, type, sessionId); -+ file_private->listedall = 1; -+ } else { -+ status = -+ processEntries(file, dirent, filldir, -+ &file_private->enumHandle, sessionId); -+ -+ if (status != 0) { -+ file_private->listedall = 1; -+#ifndef SKIP_CROSSOVER_HACK -+ // Hack for crossover part 2 - begin -+ lComm = strlen(current->comm); -+ if ((lComm > 4) -+ && (0 == -+ strcmp(current->comm + lComm - 4, ".EXE"))) { -+ if (filldir -+ (dirent, " !xover", 7, file->f_pos, -+ inode->i_ino, DT_DIR) < 0) -+ return 1; -+ if (filldir -+ (dirent, "z!xover", 7, file->f_pos, -+ inode->i_ino, DT_DIR) < 0) -+ return 1; -+ file->f_pos += 2; -+ } -+ // Hack for crossover part2 - end -+#endif -+ } -+ } -+ -+ file->private_data = file_private; -+ return 1; -+} -+ -+int novfs_dir_fsync(struct file *file, struct dentry *dentry, int datasync) -+{ -+ DbgPrint("Name %.*s", file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name); -+ return (simple_sync_file(file, dentry, datasync)); -+} -+ -+ssize_t novfs_f_read(struct file * file, char *buf, size_t len, loff_t * off) -+{ -+ size_t thisread, totalread = 0; -+ loff_t offset = *off; -+ struct inode *inode; -+ struct novfs_schandle session; -+ struct inode_data *id; -+ -+ if (file->f_dentry && -+ (inode = file->f_dentry->d_inode) && -+ (id = (struct inode_data *) inode->i_private)) { -+ -+ DbgPrint("(0x%p 0x%p %d %lld %.*s)", -+ file->private_data, -+ buf, len, offset, -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name); -+ -+ if (novfs_page_cache && !(file->f_flags & O_DIRECT) && id->CacheFlag) { -+ totalread = do_sync_read(file, buf, len, off); -+ } else { -+ session = novfs_scope_get_sessionId(id->Scope); -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = -+ novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ while (len > 0 && (offset < i_size_read(inode))) { -+ int retval; -+ thisread = len; -+ retval = -+ novfs_read_file(file->private_data, buf, -+ &thisread, &offset, -+ session); -+ if (retval || !thisread) { -+ if (retval) { -+ totalread = retval; -+ } -+ break; -+ } -+ DbgPrint("thisread = 0x%x", thisread); -+ len -= thisread; -+ buf += thisread; -+ offset += thisread; -+ totalread += thisread; -+ } -+ *off = offset; -+ } -+ } -+ DbgPrint("return = %d", totalread); -+ -+ return (totalread); -+} -+ -+ssize_t novfs_f_write(struct file * file, const char *buf, size_t len, -+ loff_t * off) -+{ -+ ssize_t thiswrite, totalwrite = 0; -+ loff_t offset = *off; -+ struct novfs_schandle session; -+ struct inode *inode; -+ int status; -+ struct inode_data *id; -+ -+ if (file->f_dentry && -+ (inode = file->f_dentry->d_inode) && -+ (id = file->f_dentry->d_inode->i_private)) { -+ DbgPrint("(0x%p 0x%p 0x%p %d %lld %.*s)", -+ file->private_data, inode, id->FileHandle, len, offset, -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name); -+ -+ if (novfs_page_cache && -+ !(file->f_flags & O_DIRECT) && -+ id->CacheFlag && !(file->f_flags & O_WRONLY)) { -+ totalwrite = do_sync_write(file, buf, len, off); -+ } else { -+ if (file->f_flags & O_APPEND) { -+ offset = i_size_read(inode); -+ DbgPrint("appending to end %lld %.*s", -+ offset, file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name); -+ } -+ -+ session = novfs_scope_get_sessionId(id->Scope); -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = -+ novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ while (len > 0) { -+ thiswrite = len; -+ if ((status = -+ novfs_write_file(file->private_data, -+ (unsigned char *)buf, -+ &thiswrite, &offset, -+ session)) || !thiswrite) { -+ totalwrite = status; -+ break; -+ } -+ DbgPrint("thiswrite = 0x%x", -+ thiswrite); -+ len -= thiswrite; -+ buf += thiswrite; -+ offset += thiswrite; -+ totalwrite += thiswrite; -+ if (offset > i_size_read(inode)) { -+ i_size_write(inode, offset); -+ inode->i_blocks = -+ (offset + inode->i_sb->s_blocksize - -+ 1) >> inode->i_blkbits; -+ } -+ inode->i_mtime = inode->i_atime = CURRENT_TIME; -+ id->Flags |= UPDATE_INODE; -+ -+ } -+ *off = offset; -+ } -+ } -+ DbgPrint("return = 0x%x", totalwrite); -+ -+ return (totalwrite); -+} -+ -+int novfs_f_readdir(struct file *file, void *data, filldir_t fill) -+{ -+ return -EISDIR; -+} -+ -+int novfs_f_ioctl(struct inode *inode, struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ DbgPrint("file=0x%p cmd=0x%x arg=0x%p", file, cmd, arg); -+ -+ return -ENOSYS; -+} -+ -+int novfs_f_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ int retCode = -EINVAL; -+ -+ DbgPrint("file=0x%p %.*s", file, file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name); -+ -+ retCode = generic_file_mmap(file, vma); -+ -+ DbgPrint("retCode=0x%x", retCode); -+ return (retCode); -+} -+ -+int novfs_f_open(struct inode *inode, struct file *file) -+{ -+ struct novfs_entry_info *info = NULL; -+ int retCode = -ENOENT; -+ struct novfs_schandle session; -+ char *path; -+ struct dentry *parent; -+ ino_t ino; -+ struct inode_data *id; -+ int errInfo; -+ -+ DbgPrint("inode=0x%p file=0x%p dentry=0x%p dentry->d_inode=0x%p %.*s", -+ inode, file, file->f_dentry, file->f_dentry->d_inode, -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name); -+ if (file->f_dentry) { -+ DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name, -+ file->f_flags, file->f_mode, inode->i_mode); -+ } -+ -+ if (inode && inode->i_private) { -+ id = (struct inode_data *) file->f_dentry->d_inode->i_private; -+ session = novfs_scope_get_sessionId(id->Scope); -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ info = kmalloc(sizeof(struct novfs_entry_info) + -+ PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (info) { -+ path = -+ novfs_dget_path(file->f_dentry, info->name, -+ PATH_LENGTH_BUFFER); -+ if (path) { -+ if (file->f_flags & O_TRUNC) { -+ errInfo = -+ novfs_get_file_info(path, info, -+ session); -+ -+ if (errInfo || info->size == 0) { -+ // clear O_TRUNC flag, bug #275366 -+ file->f_flags = -+ file->f_flags & (~O_TRUNC); -+ } -+ } -+ -+ DbgPrint("%s", path); -+ retCode = novfs_open_file(path, -+ file-> -+ f_flags & ~O_EXCL, -+ info, -+ &file->private_data, -+ session); -+ -+ DbgPrint("0x%x 0x%p", retCode, -+ file->private_data); -+ if (!retCode) { -+ /* -+ *update_inode(inode, &info); -+ */ -+ //id->FileHandle = file->private_data; -+ id->CacheFlag = -+ novfs_get_file_cache_flag(path, -+ session); -+ -+ if (!novfs_get_file_info -+ (path, info, session)) { -+ update_inode(inode, info); -+ } -+ -+ parent = dget_parent(file->f_dentry); -+ -+ if (parent && parent->d_inode) { -+ struct inode *dir = -+ parent->d_inode; -+ novfs_lock_inode_cache(dir); -+ ino = 0; -+ if (novfs_get_entry -+ (dir, -+ &file->f_dentry->d_name, -+ &ino, info)) { -+ ((struct inode_data *) inode-> -+ i_private)->Flags |= -+ UPDATE_INODE; -+ } -+ -+ novfs_unlock_inode_cache(dir); -+ } -+ dput(parent); -+ } -+ } -+ kfree(info); -+ } -+ } -+ DbgPrint("retCode=0x%x", retCode); -+ return (retCode); -+} -+ -+int novfs_flush_mapping(void *Handle, struct address_space *mapping, -+ struct novfs_schandle Session) -+{ -+ struct pagevec pagevec; -+ unsigned nrpages; -+ pgoff_t index = 0; -+ int done, rc = 0; -+ -+ pagevec_init(&pagevec, 0); -+ -+ do { -+ done = 1; -+ nrpages = pagevec_lookup_tag(&pagevec, -+ mapping, -+ &index, -+ PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE); -+ -+ if (nrpages) { -+ struct page *page; -+ int i; -+ -+ DbgPrint("%u", nrpages); -+ -+ done = 0; -+ for (i = 0; !rc && (i < nrpages); i++) { -+ page = pagevec.pages[i]; -+ -+ DbgPrint("page 0x%p %lu", page, page->index); -+ -+ lock_page(page); -+ page_cache_get(page); -+ if (page->mapping == mapping) { -+ if (clear_page_dirty_for_io(page)) { -+ rc = novfs_write_page(Handle, -+ page, -+ Session); -+ if (!rc) { -+ //ClearPageDirty(page); -+ radix_tree_tag_clear -+ (&mapping-> -+ page_tree, -+ page_index(page), -+ PAGECACHE_TAG_DIRTY); -+ } -+ } -+ } -+ -+ page_cache_release(page); -+ unlock_page(page); -+ } -+ pagevec_release(&pagevec); -+ } -+ } while (!rc && !done); -+ -+ DbgPrint("return %d", rc); -+ -+ return (rc); -+} -+ -+int novfs_f_flush(struct file *file, fl_owner_t ownid) -+{ -+ -+ int rc = 0; -+#ifdef FLUSH -+ struct inode *inode; -+ struct novfs_schandle session; -+ struct inode_data *id; -+ -+ DbgPrint("Called from 0x%p", __builtin_return_address(0)); -+ if (file->f_dentry && (inode = file->f_dentry->d_inode) -+ && (id = file->f_dentry->d_inode->i_private)) { -+ -+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) { -+ inode = file->f_dentry->d_inode; -+ DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name, file->f_flags, -+ file->f_mode, inode->i_mode); -+ -+ session = novfs_scope_get_sessionId(id->Scope); -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = -+ novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ if (inode && -+ inode->i_mapping && inode->i_mapping->nrpages) { -+ -+ DbgPrint("%.*s pages=%lu", -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name, -+ inode->i_mapping->nrpages); -+ -+ if (file->f_dentry && -+ file->f_dentry->d_inode && -+ file->f_dentry->d_inode->i_mapping && -+ file->f_dentry->d_inode->i_mapping->a_ops && -+ file->f_dentry->d_inode->i_mapping->a_ops-> -+ writepage) { -+ rc = filemap_fdatawrite(file->f_dentry-> -+ d_inode-> -+ i_mapping); -+ } else { -+ rc = novfs_flush_mapping(file-> -+ private_data, -+ file-> -+ f_dentry-> -+ d_inode-> -+ i_mapping, -+ session); -+ } -+ } -+ } -+ } -+#endif -+ return (rc); -+} -+ -+int novfs_f_release(struct inode *inode, struct file *file) -+{ -+ int retCode = -EACCES; -+ struct novfs_schandle session; -+ struct inode_data *id; -+ -+ DbgPrint("path=%.*s handle=%p", -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name, file->private_data); -+ -+ if (inode && (id = inode->i_private)) { -+ session = novfs_scope_get_sessionId(id->Scope); -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) { -+ DbgPrint("%.*s f_flags=0%o f_mode=0%o i_mode=0%o", -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name, file->f_flags, -+ file->f_mode, inode->i_mode); -+ -+ if (inode->i_mapping && inode->i_mapping->nrpages) { -+ -+ DbgPrint("%.*s pages=%lu", -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name, -+ inode->i_mapping->nrpages); -+ -+ if (inode->i_mapping->a_ops && -+ inode->i_mapping->a_ops->writepage) { -+ filemap_fdatawrite(file->f_dentry-> -+ d_inode->i_mapping); -+ } else { -+ novfs_flush_mapping(file->private_data, -+ file->f_dentry-> -+ d_inode->i_mapping, -+ session); -+ } -+ } -+ } -+ -+ if (file->f_dentry && file->f_dentry->d_inode) { -+ invalidate_remote_inode(file->f_dentry->d_inode); -+ } -+ -+ retCode = novfs_close_file(file->private_data, session); -+ //id->FileHandle = 0; -+ } -+ return (retCode); -+} -+ -+int novfs_f_fsync(struct file *file, struct dentry *dentry, int datasync) -+{ -+ return 0; -+} -+ -+int novfs_f_llseek(struct file *file, loff_t offset, int origin) -+{ -+ DbgPrint("File=0x%p Name=%.*s offset=%lld origin=%d", -+ file, file->f_dentry->d_name.len, file->f_dentry->d_name.name, -+ offset, origin); -+ return (generic_file_llseek(file, offset, origin)); -+} -+ -+/*++======================================================================*/ -+int novfs_f_lock(struct file *file, int cmd, struct file_lock *lock) -+/* -+ * Arguments: -+ * "file" - pointer to file structure - contains file handle in "file->private_data" -+ * -+ * "cmd" could be F_SETLK, F_SETLKW, F_GETLK -+ * F_SETLK/F_SETLKW are for setting/unsetting file lock -+ * F_GETLK is for getting infomation about region - is it locked, or not -+ * -+ * "lock" structure - contains "start" and "end" of locking region -+ * -+ * Returns: -+ * 0 on success -+ * -ENOSYS on F_GETLK cmd. It's not implemented. -+ * -EINVAL if (lock->fl_start > lock->fl_end) -+ * -EAGAIN on all other errors -+ * Abstract: -+ * -+ * Notes: -+ * "lock->fl_start" and "lock->fl_end" are of type "long long", -+ * but xtier functions in novfsd "NCFsdLockFile" and "NCFsdUnlockFile" -+ * receive arguments in u64 type. -+ * -+ * -+ *========================================================================*/ -+{ -+ int err_code; -+ -+ struct inode *inode; -+ struct novfs_schandle session; -+ struct inode_data *id; -+ loff_t len; -+ -+ DbgPrint("(0x%p): begin in novfs_f_lock 0x%p", -+ __builtin_return_address(0), file->private_data); -+ DbgPrint("cmd = %d, F_GETLK = %d, F_SETLK = %d, F_SETLKW = %d", -+ cmd, F_GETLK, F_SETLK, F_SETLKW); -+ DbgPrint("lock->fl_start = 0x%llX, lock->fl_end = 0x%llX", -+ lock->fl_start, lock->fl_end); -+ -+ err_code = -1; -+ if (lock->fl_start <= lock->fl_end) { -+ /* Get len from "start" and "end" */ -+ len = lock->fl_end - lock->fl_start + 1; -+ if ((0 == lock->fl_start) && (OFFSET_MAX == lock->fl_end)) { -+ len = 0; -+ } -+ -+ if (file->f_dentry && -+ (inode = file->f_dentry->d_inode) && -+ (id = (struct inode_data *) inode->i_private)) { -+ DbgPrint("(0x%p 0x%p %.*s)", -+ file->private_data, inode, -+ file->f_dentry->d_name.len, -+ file->f_dentry->d_name.name); -+ -+ session = novfs_scope_get_sessionId(id->Scope); -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = -+ novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ /* fl_type = F_RDLCK, F_WRLCK, F_UNLCK */ -+ switch (cmd) { -+ case F_SETLK: -+#ifdef F_GETLK64 -+ case F_SETLK64: -+#endif -+ -+ err_code = -+ novfs_set_file_lock(session, -+ file->private_data, -+ lock->fl_type, -+ lock->fl_start, len); -+ break; -+ -+ case F_SETLKW: -+#ifdef F_GETLK64 -+ case F_SETLKW64: -+#endif -+ err_code = -+ novfs_set_file_lock(session, -+ file->private_data, -+ lock->fl_type, -+ lock->fl_start, len); -+ break; -+ -+ case F_GETLK: -+#ifdef F_GETLK64 -+ case F_GETLK64: -+#endif -+ err_code = -ENOSYS; -+ /* -+ * Not implemented. We doesn't have appropriate xtier function. -+ * */ -+ break; -+ -+ default: -+ printk -+ ("<1> novfs in novfs_f_lock, not implemented cmd = %d\n", -+ cmd); -+ DbgPrint("novfs in novfs_f_lock, not implemented cmd = %d", -+ cmd); -+ break; -+ } -+ } -+ -+ DbgPrint("lock->fl_type = %u, err_code 0x%X", -+ lock->fl_type, err_code); -+ -+ if ((err_code != 0) && (err_code != -1) -+ && (err_code != -ENOSYS)) { -+ err_code = -EAGAIN; -+ } -+ } else { -+ err_code = -EINVAL; -+ } -+ -+ return (err_code); -+} -+ -+/*++======================================================================*/ -+static void novfs_copy_cache_pages(struct address_space *mapping, -+ struct list_head *pages, int bytes_read, -+ char *data, struct pagevec *plru_pvec) -+{ -+ struct page *page; -+ char *target; -+ -+ while (bytes_read > 0) { -+ if (list_empty(pages)) -+ break; -+ -+ page = list_entry(pages->prev, struct page, lru); -+ list_del(&page->lru); -+ -+ if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { -+ page_cache_release(page); -+ data += PAGE_CACHE_SIZE; -+ bytes_read -= PAGE_CACHE_SIZE; -+ continue; -+ } -+ -+ target = kmap_atomic(page, KM_USER0); -+ -+ if (PAGE_CACHE_SIZE > bytes_read) { -+ memcpy(target, data, bytes_read); -+ /* zero the tail end of this partial page */ -+ memset(target + bytes_read, 0, -+ PAGE_CACHE_SIZE - bytes_read); -+ bytes_read = 0; -+ } else { -+ memcpy(target, data, PAGE_CACHE_SIZE); -+ bytes_read -= PAGE_CACHE_SIZE; -+ } -+ kunmap_atomic(target, KM_USER0); -+ -+ flush_dcache_page(page); -+ SetPageUptodate(page); -+ unlock_page(page); -+ if (!pagevec_add(plru_pvec, page)) -+ __pagevec_lru_add_file(plru_pvec); -+ data += PAGE_CACHE_SIZE; -+ } -+ return; -+} -+ -+int novfs_a_writepage(struct page *page, struct writeback_control *wbc) -+{ -+ int retCode = -EFAULT; -+ struct inode *inode = page->mapping->host; -+ struct inode_data *id = inode->i_private; -+ loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT); -+ struct novfs_schandle session; -+ struct novfs_data_list dlst[2]; -+ size_t len = PAGE_CACHE_SIZE; -+ -+ session = novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)->Scope); -+ -+ page_cache_get(page); -+ -+ pos = ((loff_t) page->index << PAGE_CACHE_SHIFT); -+ -+ /* -+ * Leave first dlst entry for reply header. -+ */ -+ dlst[1].page = page; -+ dlst[1].offset = NULL; -+ dlst[1].len = len; -+ dlst[1].rwflag = DLREAD; -+ -+ /* -+ * Check size so we don't write pass end of file. -+ */ -+ if ((pos + (loff_t) len) > i_size_read(inode)) { -+ len = (size_t) (i_size_read(inode) - pos); -+ } -+ -+ retCode = novfs_write_pages(id->FileHandle, dlst, 2, len, pos, session); -+ if (!retCode) { -+ SetPageUptodate(page); -+ } -+ -+ unlock_page(page); -+ page_cache_release(page); -+ -+ return (retCode); -+} -+ -+int novfs_a_writepages(struct address_space *mapping, -+ struct writeback_control *wbc) -+{ -+ int retCode = 0; -+ struct inode *inode = mapping->host; -+ struct novfs_schandle session; -+ void *fh = NULL; -+ struct inode_data *id = NULL; -+ -+ int max_page_lookup = novfs_max_iosize / PAGE_CACHE_SIZE; -+ -+ struct novfs_data_list *dlist, *dlptr; -+ struct page **pages; -+ -+ int dlist_idx, i = 0; -+ pgoff_t index, next_index = 0; -+ loff_t pos = 0; -+ size_t tsize; -+ -+ SC_INITIALIZE(session); -+ DbgPrint("inode=0x%p mapping=0x%p wbc=0x%p nr_to_write=%d", -+ inode, mapping, wbc, wbc->nr_to_write); -+ -+ if (inode) { -+ DbgPrint("Inode=0x%p Ino=%d Id=0x%p", inode, inode->i_ino, -+ inode->i_private); -+ -+ if (NULL != (id = inode->i_private)) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ fh = ((struct inode_data *) inode->i_private)->FileHandle; -+ } -+ } -+ -+ dlist = kmalloc(sizeof(struct novfs_data_list) * max_page_lookup, GFP_KERNEL); -+ pages = -+ kmalloc(sizeof(struct page *) * max_page_lookup, GFP_KERNEL); -+ -+ if (id) -+ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p %s", -+ inode, fh, dlist, pages, id->Name); -+ else -+ DbgPrint("inode=0x%p fh=0x%p dlist=0x%p pages=0x%p", -+ inode, fh, dlist, pages); -+ -+ if (dlist && pages) { -+ struct backing_dev_info *bdi = mapping->backing_dev_info; -+ int done = 0; -+ int nr_pages = 0; -+ int scanned = 0; -+ -+ if (wbc->nonblocking && bdi_write_congested(bdi)) { -+ wbc->encountered_congestion = 1; -+ return 0; -+ } -+ -+ if (wbc->sync_mode == WB_SYNC_NONE) { -+ index = mapping->writeback_index; /* Start from prev offset */ -+ } else { -+ index = 0; /* whole-file sweep */ -+ scanned = 1; -+ } -+ -+ next_index = index; -+ -+ while (!done && (wbc->nr_to_write > 0)) { -+ dlist_idx = 0; -+ dlptr = &dlist[1]; -+ -+ DbgPrint("nr_pages=%d", nr_pages); -+ if (!nr_pages) { -+ memset(pages, 0, -+ sizeof(struct page *) * max_page_lookup); -+ -+ spin_lock_irq(&mapping->tree_lock); -+ -+ /* -+ * Need to ask for one less then max_page_lookup or we -+ * will overflow the request buffer. This also frees -+ * the first entry for the reply buffer. -+ */ -+ nr_pages = -+ radix_tree_gang_lookup_tag(&mapping-> -+ page_tree, -+ (void **)pages, -+ index, -+ max_page_lookup - -+ 1, -+ PAGECACHE_TAG_DIRTY); -+ -+ DbgPrint("2; nr_pages=%d\n", nr_pages); -+ /* -+ * Check to see if there are dirty pages and there is a valid -+ * file handle. -+ */ -+ if (nr_pages && !fh) { -+ set_bit(AS_EIO, &mapping->flags); -+ done = 1; -+ DbgPrint("set_bit AS_EIO"); -+ break; -+ } -+ -+ for (i = 0; i < nr_pages; i++) { -+ page_cache_get(pages[i]); -+ } -+ -+ spin_unlock_irq(&mapping->tree_lock); -+ -+ if (nr_pages) { -+ index = pages[nr_pages - 1]->index + 1; -+ pos = -+ (loff_t) pages[0]-> -+ index << PAGE_CACHE_SHIFT; -+ } -+ -+ if (!nr_pages) { -+ if (scanned) { -+ index = 0; -+ scanned = 0; -+ continue; -+ } -+ done = 1; -+ } else { -+ next_index = pages[0]->index; -+ i = 0; -+ } -+ } else { -+ if (pages[i]) { -+ pos = -+ (loff_t) pages[i]-> -+ index << PAGE_CACHE_SHIFT; -+ } -+ } -+ -+ for (; i < nr_pages; i++) { -+ struct page *page = pages[i]; -+ -+ /* -+ * At this point we hold neither mapping->tree_lock nor -+ * lock on the page itself: the page may be truncated or -+ * invalidated (changing page->mapping to NULL), or even -+ * swizzled back from swapper_space to tmpfs file -+ * mapping -+ */ -+ -+ DbgPrint -+ ("novfs_a_writepages: pos=0x%llx index=%d page->index=%d next_index=%d\n", -+ pos, index, page->index, next_index); -+ -+ if (page->index != next_index) { -+ next_index = page->index; -+ break; -+ } -+ next_index = page->index + 1; -+ -+ lock_page(page); -+ -+ if (wbc->sync_mode != WB_SYNC_NONE) -+ wait_on_page_writeback(page); -+ -+ if (page->mapping != mapping -+ || PageWriteback(page) -+ || !clear_page_dirty_for_io(page)) { -+ unlock_page(page); -+ continue; -+ } -+ -+ dlptr[dlist_idx].page = page; -+ dlptr[dlist_idx].offset = NULL; -+ dlptr[dlist_idx].len = PAGE_CACHE_SIZE; -+ dlptr[dlist_idx].rwflag = DLREAD; -+ dlist_idx++; -+ DbgPrint("Add page=0x%p index=0x%lx", -+ page, page->index); -+ } -+ -+ DbgPrint("dlist_idx=%d", dlist_idx); -+ if (dlist_idx) { -+ tsize = dlist_idx * PAGE_CACHE_SIZE; -+ /* -+ * Check size so we don't write pass end of file. -+ */ -+ if ((pos + tsize) > i_size_read(inode)) { -+ tsize = -+ (size_t) (i_size_read(inode) - pos); -+ } -+ -+ retCode = -+ novfs_write_pages(fh, dlist, dlist_idx + 1, -+ tsize, pos, session); -+ switch (retCode) { -+ case 0: -+ wbc->nr_to_write -= dlist_idx; -+ break; -+ -+ case -ENOSPC: -+ set_bit(AS_ENOSPC, &mapping->flags); -+ done = 1; -+ break; -+ -+ default: -+ set_bit(AS_EIO, &mapping->flags); -+ done = 1; -+ break; -+ } -+ -+ do { -+ unlock_page((struct page *) -+ dlptr[dlist_idx - 1].page); -+ page_cache_release((struct page *) -+ dlptr[dlist_idx - -+ 1].page); -+ DbgPrint("release page=0x%p index=0x%lx", -+ dlptr[dlist_idx - 1].page, -+ ((struct page *) -+ dlptr[dlist_idx - -+ 1].page)->index); -+ if (!retCode) { -+ wbc->nr_to_write--; -+ } -+ } while (--dlist_idx); -+ } -+ -+ if (i >= nr_pages) { -+ nr_pages = 0; -+ } -+ } -+ -+ mapping->writeback_index = index; -+ -+ } else { -+ DbgPrint("set_bit AS_EIO"); -+ set_bit(AS_EIO, &mapping->flags); -+ } -+ if (dlist) -+ kfree(dlist); -+ if (pages) -+ kfree(pages); -+ -+ DbgPrint("retCode=%d", retCode); -+ return (0); -+ -+} -+ -+int novfs_a_readpage(struct file *file, struct page *page) -+{ -+ int retCode = 0; -+ void *pbuf; -+ struct inode *inode = NULL; -+ struct dentry *dentry = NULL; -+ loff_t offset; -+ size_t len; -+ struct novfs_schandle session; -+ -+ SC_INITIALIZE(session); -+ DbgPrint("File=0x%p Name=%.*s Page=0x%p", file, -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, page); -+ -+ dentry = file->f_dentry; -+ -+ if (dentry) { -+ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, -+ dentry->d_name.name); -+ if (dentry->d_inode) { -+ inode = dentry->d_inode; -+ } -+ } -+ -+ if (inode) { -+ DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino); -+ -+ if (inode->i_private) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(file->f_dentry); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ } -+ } -+ } -+ -+ if (!PageUptodate(page)) { -+ struct novfs_data_list dlst[2]; -+ -+ offset = page->index << PAGE_CACHE_SHIFT; -+ len = PAGE_CACHE_SIZE; -+ -+ /* -+ * Save the first entry for the reply header. -+ */ -+ dlst[1].page = page; -+ dlst[1].offset = NULL; -+ dlst[1].len = PAGE_CACHE_SIZE; -+ dlst[1].rwflag = DLWRITE; -+ -+ DbgPrint("calling= novfs_Read_Pages %lld", -+ offset); -+ retCode = -+ novfs_read_pages(file->private_data, dlst, 2, &len, &offset, -+ session); -+ if (len && (len < PAGE_CACHE_SIZE)) { -+ pbuf = kmap_atomic(page, KM_USER0); -+ memset(&((char *)pbuf)[len], 0, PAGE_CACHE_SIZE - len); -+ kunmap_atomic(pbuf, KM_USER0); -+ } -+ -+ flush_dcache_page(page); -+ SetPageUptodate(page); -+ } -+ unlock_page(page); -+ -+ DbgPrint("retCode=%d", retCode); -+ return (retCode); -+ -+} -+ -+int novfs_a_readpages(struct file *file, struct address_space *mapping, -+ struct list_head *page_lst, unsigned nr_pages) -+{ -+ int retCode = 0; -+ struct inode *inode = NULL; -+ struct dentry *dentry = NULL; -+ struct novfs_schandle session; -+ loff_t offset; -+ size_t len; -+ -+ unsigned page_idx; -+ struct pagevec lru_pvec; -+ pgoff_t next_index; -+ -+ char *rbuf, done = 0; -+ SC_INITIALIZE(session); -+ -+ DbgPrint("File=0x%p Name=%.*s Pages=%d", file, -+ file->f_dentry->d_name.len, file->f_dentry->d_name.name, -+ nr_pages); -+ -+ dentry = file->f_dentry; -+ -+ if (dentry) { -+ DbgPrint("Dentry=0x%p Name=%.*s", dentry, dentry->d_name.len, -+ dentry->d_name.name); -+ if (dentry->d_inode) { -+ inode = dentry->d_inode; -+ } -+ } -+ -+ if (inode) { -+ DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino); -+ -+ if (inode->i_private) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(file->f_dentry); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ } -+ } -+ } -+ -+ rbuf = kmalloc(novfs_max_iosize, GFP_KERNEL); -+ if (rbuf) { -+ pagevec_init(&lru_pvec, 0); -+ for (page_idx = 0; page_idx < nr_pages && !done;) { -+ struct page *page, *tpage; -+ -+ if (list_empty(page_lst)) -+ break; -+ -+ page = list_entry(page_lst->prev, struct page, lru); -+ -+ next_index = page->index; -+ offset = (loff_t) page->index << PAGE_CACHE_SHIFT; -+ len = 0; -+ -+ /* -+ * Count number of contiguous pages. -+ */ -+ list_for_each_entry_reverse(tpage, page_lst, lru) { -+ if ((next_index != tpage->index) || -+ (len >= novfs_max_iosize - PAGE_SIZE)) { -+ break; -+ } -+ len += PAGE_SIZE; -+ next_index++; -+ } -+ -+ if (len && !done) { -+ struct novfs_data_list dllst[2]; -+ -+ dllst[1].page = NULL; -+ dllst[1].offset = rbuf; -+ dllst[1].len = len; -+ dllst[1].rwflag = DLWRITE; -+ -+ DbgPrint("calling novfs_Read_Pages %lld", -+ offset); -+ if (!novfs_read_pages -+ (file->private_data, dllst, 2, &len, -+ &offset, session)) { -+ novfs_copy_cache_pages(mapping, -+ page_lst, len, -+ rbuf, &lru_pvec); -+ page_idx += len >> PAGE_CACHE_SHIFT; -+ if ((int)(len & PAGE_CACHE_MASK) != len) { -+ page_idx++; -+ } -+ if (len == 0) { -+ done = 1; -+ } -+ } else { -+ done = 1; -+ } -+ } -+ } -+ -+ /* -+ * Free any remaining pages. -+ */ -+ while (!list_empty(page_lst)) { -+ struct page *page = -+ list_entry(page_lst->prev, struct page, lru); -+ -+ list_del(&page->lru); -+ page_cache_release(page); -+ } -+ -+ pagevec_lru_add_file(&lru_pvec); -+ kfree(rbuf); -+ } else { -+ retCode = -ENOMEM; -+ } -+ -+ DbgPrint("retCode=%d", retCode); -+ return (retCode); -+ -+} -+ -+int novfs_a_write_begin(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned flags, -+ struct page **pagep, void **fsdata) -+{ -+ int retVal = 0; -+ loff_t offset = pos; -+ struct novfs_schandle session; -+ struct novfs_data_list dllst[2]; -+ struct inode *inode = file->f_dentry->d_inode; -+ struct page *page; -+ pgoff_t index; -+ unsigned from, to; -+ SC_INITIALIZE(session); -+ -+ index = pos >> PAGE_CACHE_SHIFT; -+ from = pos & (PAGE_CACHE_SIZE - 1); -+ to = from + len; -+ -+ page = grab_cache_page_write_begin(mapping, index, flags); -+ if (!page) -+ return -ENOMEM; -+ -+ *pagep = page; -+ -+ DbgPrint("File=0x%p Page=0x%p offset=0x%llx From=%u To=%u " -+ "filesize=%lld\n", file, page, offset, from, to, -+ i_size_read(file->f_dentry->d_inode)); -+ if (!PageUptodate(page)) { -+ /* -+ * Check to see if whole page -+ */ -+ if ((to == PAGE_CACHE_SIZE) && (from == 0)) { -+ SetPageUptodate(page); -+ } -+ -+ /* -+ * Check to see if we can read page. -+ */ -+ else if ((file->f_flags & O_ACCMODE) != O_WRONLY) { -+ /* -+ * Get session. -+ */ -+ if (file->f_dentry && file->f_dentry->d_inode) { -+ if (file->f_dentry->d_inode->i_private) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) -+ inode-> -+ i_private)-> -+ Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *) inode-> -+ i_private)->Scope = -+ novfs_get_scope(file->f_dentry); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)->Scope); -+ } -+ } -+ } -+ -+ page_cache_get(page); -+ -+ len = i_size_read(inode) - offset; -+ if (len > PAGE_CACHE_SIZE) { -+ len = PAGE_CACHE_SIZE; -+ } -+ -+ if (len) { -+ /* -+ * Read page from server. -+ */ -+ -+ dllst[1].page = page; -+ dllst[1].offset = 0; -+ dllst[1].len = len; -+ dllst[1].rwflag = DLWRITE; -+ -+ DbgPrint("calling novfs_Read_Pages %lld", -+ offset); -+ novfs_read_pages(file->private_data, dllst, 2, -+ &len, &offset, session); -+ -+ /* -+ * Zero unnsed page. -+ */ -+ } -+ -+ if (len < PAGE_CACHE_SIZE) { -+ char *adr = kmap_atomic(page, KM_USER0); -+ memset(adr + len, 0, PAGE_CACHE_SIZE - len); -+ kunmap_atomic(adr, KM_USER0); -+ } -+ } else { -+ /* -+ * Zero section of memory that not going to be used. -+ */ -+ char *adr = kmap_atomic(page, KM_USER0); -+ memset(adr, 0, from); -+ memset(adr + to, 0, PAGE_CACHE_SIZE - to); -+ kunmap_atomic(adr, KM_USER0); -+ -+ DbgPrint("memset 0x%p", adr); -+ } -+ flush_dcache_page(page); -+ SetPageUptodate(page); -+ } -+// DbgPrint("return %d", retVal); -+ return (retVal); -+} -+ -+int novfs_a_write_end(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned copied, -+ struct page *page, void *fsdata) -+{ -+ int retCode = 0; -+ struct inode *inode = page->mapping->host; -+ loff_t offset = pos; -+ struct novfs_schandle session; -+ struct inode_data *id; -+ struct novfs_data_list dlst[1]; -+ pgoff_t index; -+ unsigned from, to; -+ SC_INITIALIZE(session); -+ -+ index = pos >> PAGE_CACHE_SHIFT; -+ from = pos & (PAGE_CACHE_SIZE - 1); -+ to = from + len; -+ -+ -+ DbgPrint("File=0x%p Page=0x%p offset=0x%x To=%u filesize=%lld", -+ file, page, offset, to, i_size_read(file->f_dentry->d_inode)); -+ if (file->f_dentry->d_inode -+ && (id = file->f_dentry->d_inode->i_private)) { -+ session = novfs_scope_get_sessionId(id->Scope); -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = novfs_get_scope(file->f_dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ /* -+ * Setup file handle -+ */ -+ id->FileHandle = file->private_data; -+ -+ if (pos > inode->i_size) { -+ i_size_write(inode, pos); -+ } -+ -+ if (!PageUptodate(page)) { -+ pos = -+ ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; -+ -+ if (to < offset) { -+ return (retCode); -+ } -+ dlst[0].page = page; -+ dlst[0].offset = (void *)(unsigned long) offset; -+ dlst[0].len = len; -+ dlst[0].rwflag = DLREAD; -+ -+ retCode = -+ novfs_write_pages(id->FileHandle, dlst, 1, len, pos, -+ session); -+ -+ } else { -+ set_page_dirty(page); -+ } -+ } -+ -+ return (retCode); -+} -+ -+/*++======================================================================*/ -+ssize_t novfs_a_direct_IO(int rw, struct kiocb * kiocb, -+ const struct iovec * iov, -+ loff_t offset, unsigned long nr_segs) -+/* -+ * -+ * Notes: This is a dummy function so that we can allow a file -+ * to get the direct IO flag set. novfs_f_read and -+ * novfs_f_write will do the work. Maybe not the best -+ * way to do but it was the easiest to implement. -+ * -+ *========================================================================*/ -+{ -+ return (-EIO); -+} -+ -+/*++======================================================================*/ -+int novfs_i_create(struct inode *dir, struct dentry *dentry, int mode, -+ struct nameidata *nd) -+{ -+ char *path, *buf; -+ struct novfs_entry_info info; -+ void *handle; -+ struct novfs_schandle session; -+ int retCode = -EACCES; -+ -+ DbgPrint("mode=0%o flags=0%o %.*s", mode, -+ nd->NDOPENFLAGS, dentry->d_name.len, dentry->d_name.name); -+ -+ if (IS_ROOT(dentry) || /* Root */ -+ IS_ROOT(dentry->d_parent) || /* User */ -+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */ -+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */ -+ return (-EACCES); -+ } -+ -+ if (mode | S_IFREG) { -+ if (dir->i_private) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)-> -+ Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *) dir->i_private)->Scope = -+ novfs_get_scope(dentry); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) dir-> -+ i_private)->Scope); -+ } -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = -+ novfs_dget_path(dentry, buf, -+ PATH_LENGTH_BUFFER); -+ if (path) { -+ retCode = -+ novfs_open_file(path, -+ nd-> -+ NDOPENFLAGS | -+ O_RDWR, &info, -+ &handle, session); -+ if (!retCode && handle) { -+ novfs_close_file(handle, -+ session); -+ if (!novfs_i_mknod -+ (dir, dentry, -+ mode | S_IFREG, 0)) { -+ if (dentry->d_inode) { -+ ((struct inode_data *) -+ dentry-> -+ d_inode-> -+ i_private)-> -+ Flags |= UPDATE_INODE; -+ } -+ } -+ } -+ } -+ kfree(buf); -+ } -+ } -+ } -+ return (retCode); -+} -+ -+void update_inode(struct inode *Inode, struct novfs_entry_info *Info) -+{ -+ static char dbuf[128]; -+ -+ DbgPrint("Inode=0x%p I_ino=%d", Inode, Inode->i_ino); -+ -+ DbgPrint("atime=%s", ctime_r(&Info->atime.tv_sec, dbuf)); -+ DbgPrint("ctime=%s", ctime_r(&Info->ctime.tv_sec, dbuf)); -+ DbgPrint("mtime=%s %d", ctime_r(&Info->mtime.tv_sec, dbuf), -+ Info->mtime.tv_nsec); -+ DbgPrint("size=%lld", Info->size); -+ DbgPrint("mode=0%o", Info->mode); -+ -+ if (Inode && -+ ((Inode->i_size != Info->size) || -+ (Inode->i_mtime.tv_sec != Info->mtime.tv_sec) || -+ (Inode->i_mtime.tv_nsec != Info->mtime.tv_nsec))) { -+ DbgPrint ("calling invalidate_remote_inode sz %d %d", -+ Inode->i_size, Info->size); -+ DbgPrint ("calling invalidate_remote_inode sec %d %d", -+ Inode->i_mtime.tv_sec, Info->mtime.tv_sec); -+ DbgPrint ("calling invalidate_remote_inode ns %d %d", -+ Inode->i_mtime.tv_nsec, Info->mtime.tv_nsec); -+ -+ if (Inode && Inode->i_mapping) { -+ invalidate_remote_inode(Inode); -+ } -+ } -+ -+ Inode->i_mode = Info->mode; -+ Inode->i_size = Info->size; -+ Inode->i_atime = Info->atime; -+ Inode->i_ctime = Info->ctime; -+ Inode->i_mtime = Info->mtime; -+ -+ if (Inode->i_size && Inode->i_sb->s_blocksize) { -+ Inode->i_blocks = -+ (unsigned long) (Info->size >> (loff_t) Inode->i_blkbits); -+ Inode->i_bytes = Info->size & (Inode->i_sb->s_blocksize - 1); -+ -+ DbgPrint("i_sb->s_blocksize=%d", Inode->i_sb->s_blocksize); -+ DbgPrint("i_blkbits=%d", Inode->i_blkbits); -+ DbgPrint("i_blocks=%d", Inode->i_blocks); -+ DbgPrint("i_bytes=%d", Inode->i_bytes); -+ } -+} -+ -+struct dentry *novfs_i_lookup(struct inode *dir, struct dentry *dentry, -+ struct nameidata *nd) -+{ -+ struct dentry *retVal = ERR_PTR(-ENOENT); -+ struct dentry *parent; -+ struct novfs_entry_info *info = NULL; -+ struct inode_data *id; -+ struct inode *inode = NULL; -+ uid_t uid = current_euid(); -+ ino_t ino = 0; -+ struct qstr name; -+ char *buf; -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ char *path; -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ DbgPrint("dir 0x%p %d hash %d inode 0x%0p %s", -+ dir, dir->i_ino, dentry->d_name.hash, -+ dentry->d_inode, path); -+ } -+ kfree(buf); -+ } else { -+ DbgPrint("dir 0x%p %d name %.*s hash %d inode 0x%0p", -+ dir, dir->i_ino, dentry->d_name.len, dentry->d_name.name, -+ dentry->d_name.hash, dentry->d_inode); -+ } -+ -+ if ((dentry->d_name.len == 7) -+ && (0 == strncmp(dentry->d_name.name, " !xover", 7))) { -+ dentry->d_op = &novfs_dentry_operations; -+ igrab(dir); -+ d_add(dentry, dir); -+ return NULL; -+ } -+ if ((dentry->d_name.len == 7) -+ && (0 == strncmp(dentry->d_name.name, "z!xover", 7))) { -+ dentry->d_op = &novfs_dentry_operations; -+ igrab(dir); -+ d_add(dentry, dir); -+ return NULL; -+ } -+ -+ if (dir && (id = dir->i_private)) { -+ retVal = 0; -+ if (IS_ROOT(dentry)) { -+ DbgPrint("Root entry=0x%p", novfs_root); -+ inode = novfs_root->d_inode; -+ return (0); -+ } else { -+ info = -+ kmalloc(sizeof(struct novfs_entry_info) + -+ PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (info) { -+ if (NULL == -+ (retVal = -+ ERR_PTR(verify_dentry(dentry, 1)))) { -+ name.name = dentry->d_name.name; -+ name.len = dentry->d_name.len; -+ name.hash = novfs_internal_hash(&name); -+ -+ if (novfs_lock_inode_cache(dir)) { -+ if (!novfs_get_entry -+ (dir, &name, &ino, info)) { -+ inode = -+ ilookup(dentry-> -+ d_sb, ino); -+ if (inode) { -+ update_inode -+ (inode, -+ info); -+ } -+ } -+ novfs_unlock_inode_cache(dir); -+ } -+ -+ if (!inode && ino) { -+ uid = novfs_scope_get_uid(id->Scope); -+ if (novfs_lock_inode_cache(dir)) { -+ inode = novfs_get_inode (dentry->d_sb, info->mode, 0, uid, ino, &name); -+ if (inode) { -+ if (!novfs_get_entry(dir, &dentry->d_name, &ino, info)) { -+ update_inode -+ (inode, -+ info); -+ } -+ } -+ novfs_unlock_inode_cache -+ (dir); -+ } -+ } -+ } -+ } -+ } -+ } -+ -+ if (!retVal) { -+ dentry->d_op = &novfs_dentry_operations; -+ if (inode) { -+ parent = dget_parent(dentry); -+ novfs_d_add(dentry->d_parent, dentry, inode, 1); -+ dput(parent); -+ } else { -+ d_add(dentry, inode); -+ } -+ } -+ -+ if (info) -+ kfree(info); -+ -+ DbgPrint("inode=0x%p dentry->d_inode=0x%p return=0x%p", -+ dir, dentry->d_inode, retVal); -+ -+ return (retVal); -+} -+ -+int novfs_i_unlink(struct inode *dir, struct dentry *dentry) -+{ -+ int retCode = -ENOENT; -+ struct inode *inode; -+ struct novfs_schandle session; -+ char *path, *buf; -+ uint64_t t64; -+ -+ DbgPrint("dir=0x%p dir->i_ino=%d %.*s", dir, -+ dir->i_ino, dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("IS_ROOT(dentry)=%d", IS_ROOT(dentry)); -+ DbgPrint("IS_ROOT(dentry->d_parent)=%d", -+ IS_ROOT(dentry->d_parent)); -+ DbgPrint("IS_ROOT(dentry->d_parent->d_parent)=%d", -+ IS_ROOT(dentry->d_parent->d_parent)); -+ DbgPrint("IS_ROOT(dentry->d_parent->d_parent->d_parent)=%d", -+ IS_ROOT(dentry->d_parent->d_parent->d_parent)); -+ -+ if (IS_ROOT(dentry) || /* Root */ -+ IS_ROOT(dentry->d_parent) || /* User */ -+ (!IS_ROOT(dentry->d_parent->d_parent) && /* Server */ -+ IS_ROOT(dentry->d_parent->d_parent->d_parent))) { /* Volume */ -+ return (-EACCES); -+ } -+ -+ inode = dentry->d_inode; -+ if (inode) { -+ DbgPrint("dir=0x%p dir->i_ino=%d inode=0x%p ino=%d", -+ dir, dir->i_ino, inode, inode->i_ino); -+ if (inode->i_private) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(dentry); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ } -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = -+ novfs_dget_path(dentry, buf, -+ PATH_LENGTH_BUFFER); -+ if (path) { -+ DbgPrint("path %s mode 0%o", -+ path, inode->i_mode); -+ if (IS_ROOT(dentry->d_parent->d_parent)) { -+ retCode = novfs_daemon_logout(&dentry->d_name, &session); -+ } else { -+ retCode = -+ novfs_delete(path, -+ S_ISDIR(inode->i_mode), session); -+ if (retCode) { -+ struct iattr ia; -+ memset(&ia, 0, sizeof(ia)); -+ ia.ia_valid = ATTR_MODE; -+ ia.ia_mode = S_IRWXU; -+ novfs_set_attr(path, &ia, session); -+ retCode = novfs_delete(path, S_ISDIR(inode->i_mode), session); -+ } -+ } -+ if (!retCode || IS_DEADDIR(inode)) { -+ novfs_remove_inode_entry(dir, -+ &dentry-> -+ d_name, -+ 0); -+ dentry->d_time = 0; -+ t64 = 0; -+ novfs_scope_set_userspace(&t64, &t64, -+ &t64, &t64); -+ retCode = 0; -+ } -+ } -+ kfree(buf); -+ } -+ } -+ } -+ -+ DbgPrint("retCode 0x%x", retCode); -+ return (retCode); -+} -+ -+int novfs_i_mkdir(struct inode *dir, struct dentry *dentry, int mode) -+{ -+ char *path, *buf; -+ struct novfs_schandle session; -+ int retCode = 0; -+ struct inode *inode; -+ struct novfs_entry_info info; -+ uid_t uid; -+ -+ DbgPrint("dir=0x%p ino=%d dentry=0x%p %.*s mode=0%lo", -+ dir, dir->i_ino, dentry, dentry->d_name.len, -+ dentry->d_name.name, mode); -+ -+ if (IS_ROOT(dentry) || /* Root */ -+ IS_ROOT(dentry->d_parent) || /* User */ -+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */ -+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */ -+ return (-EACCES); -+ } -+ -+ mode |= S_IFDIR; -+ mode &= (S_IFMT | S_IRWXU); -+ if (dir->i_private) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)->Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *) dir->i_private)->Scope = -+ novfs_get_scope(dentry); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) dir->i_private)-> -+ Scope); -+ } -+ -+ uid = novfs_scope_get_uid(((struct inode_data *) dir->i_private)->Scope); -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ DbgPrint("path %s", path); -+ retCode = -+ novfs_create(path, S_ISDIR(mode), session); -+ if (!retCode) { -+ retCode = -+ novfs_get_file_info(path, &info, -+ session); -+ if (!retCode) { -+ retCode = -+ novfs_i_mknod(dir, dentry, -+ mode, 0); -+ inode = dentry->d_inode; -+ if (inode) { -+ update_inode(inode, -+ &info); -+ ((struct inode_data *) inode-> -+ i_private)->Flags &= -+ ~UPDATE_INODE; -+ -+ dentry->d_time = -+ jiffies + -+ (novfs_update_timeout -+ * HZ); -+ -+ novfs_lock_inode_cache -+ (dir); -+ if (novfs_update_entry -+ (dir, -+ &dentry->d_name, 0, -+ &info)) { -+ novfs_add_inode_entry -+ (dir, -+ &dentry-> -+ d_name, -+ inode-> -+ i_ino, -+ &info); -+ } -+ novfs_unlock_inode_cache -+ (dir); -+ } -+ -+ } -+ } -+ } -+ kfree(buf); -+ } -+ } -+ -+ return (retCode); -+} -+ -+int novfs_i_rmdir(struct inode *inode, struct dentry *dentry) -+{ -+ return (novfs_i_unlink(inode, dentry)); -+} -+ -+int novfs_i_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) -+{ -+ struct inode *inode = NULL; -+ int retCode = -EACCES; -+ uid_t uid; -+ struct dentry *parent; -+ -+ if (IS_ROOT(dentry) || /* Root */ -+ IS_ROOT(dentry->d_parent) || /* User */ -+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */ -+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */ -+ return (-EACCES); -+ } -+ -+ if (((struct inode_data *) dir->i_private)) { -+ uid = novfs_scope_get_uid(((struct inode_data *) dir->i_private)->Scope); -+ if (mode & (S_IFREG | S_IFDIR)) { -+ inode = -+ novfs_get_inode(dir->i_sb, mode, dev, uid, 0, &dentry->d_name); -+ } -+ } -+ if (inode) { -+ struct novfs_entry_info info; -+ -+ dentry->d_op = &novfs_dentry_operations; -+ parent = dget_parent(dentry); -+ novfs_d_add(parent, dentry, inode, 0); -+ memset(&info, 0, sizeof(info)); -+ info.mode = inode->i_mode; -+ novfs_lock_inode_cache(dir); -+ novfs_add_inode_entry(dir, &dentry->d_name, inode->i_ino, -+ &info); -+ novfs_unlock_inode_cache(dir); -+ -+ dput(parent); -+ -+ retCode = 0; -+ } -+ DbgPrint("return 0x%x", retCode); -+ return retCode; -+} -+ -+int novfs_i_rename(struct inode *odir, struct dentry *od, struct inode *ndir, -+ struct dentry *nd) -+{ -+ int retCode = -ENOTEMPTY; -+ char *newpath, *newbuf, *newcon; -+ char *oldpath, *oldbuf, *oldcon; -+ struct qstr newname, oldname; -+ struct novfs_entry_info *info = NULL; -+ int oldlen, newlen; -+ struct novfs_schandle session; -+ ino_t ino; -+ -+ if (IS_ROOT(od) || /* Root */ -+ IS_ROOT(od->d_parent) || /* User */ -+ IS_ROOT(od->d_parent->d_parent) || /* Server */ -+ IS_ROOT(od->d_parent->d_parent->d_parent)) { /* Volume */ -+ return (-EACCES); -+ } -+ -+ DbgPrint("odir=0x%p ino=%d ndir=0x%p ino=%d", odir, -+ odir->i_ino, ndir, ndir->i_ino); -+ -+ oldbuf = kmalloc(PATH_LENGTH_BUFFER * 2, GFP_KERNEL); -+ newbuf = oldbuf + PATH_LENGTH_BUFFER; -+ if (oldbuf && newbuf) { -+ oldpath = novfs_dget_path(od, oldbuf, PATH_LENGTH_BUFFER); -+ newpath = novfs_dget_path(nd, newbuf, PATH_LENGTH_BUFFER); -+ if (oldpath && newpath) { -+ oldlen = PATH_LENGTH_BUFFER - (int)(oldpath - oldbuf); -+ newlen = PATH_LENGTH_BUFFER - (int)(newpath - newbuf); -+ -+ DbgPrint("od=0x%p od->inode=0x%p od->inode->i_ino=%d %s", -+ od, od->d_inode, od->d_inode->i_ino, oldpath); -+ if (nd->d_inode) { -+ DbgPrint("nd=0x%p nd->inode=0x%p nd->inode->i_ino=%d %s", -+ nd, nd->d_inode, nd->d_inode->i_ino, -+ newpath); -+ } else { -+ DbgPrint("nd=0x%p nd->inode=0x%p %s", -+ nd, nd->d_inode, newpath); -+ } -+ -+ /* -+ * Check to see if two different servers or different volumes -+ */ -+ newcon = strchr(newpath + 1, '\\'); -+ oldcon = strchr(oldpath + 1, '\\'); -+ DbgPrint("newcon=0x%p newpath=0x%p", newcon, newpath); -+ DbgPrint("oldcon=0x%p oldpath=0x%p", oldcon, oldpath); -+ retCode = -EXDEV; -+ if (newcon && oldcon -+ && ((int)(newcon - newpath) == -+ (int)(oldcon - oldpath))) { -+ newcon = strchr(newcon + 1, '\\'); -+ oldcon = strchr(oldcon + 1, '\\'); -+ DbgPrint("2; newcon=0x%p newpath=0x%p", -+ newcon, newpath); -+ DbgPrint("2; oldcon=0x%p oldpath=0x%p", -+ oldcon, oldpath); -+ if (newcon && oldcon && -+ ((int)(newcon - newpath) == (int)(oldcon - oldpath))) { -+ newname.name = newpath; -+ newname.len = (int)(newcon - newpath); -+ newname.hash = 0; -+ -+ oldname.name = oldpath; -+ oldname.len = (int)(oldcon - oldpath); -+ oldname.hash = 0; -+ if (!novfs_d_strcmp(&newname, &oldname)) { -+ -+ if (od->d_inode -+ && od->d_inode->i_private) { -+ -+ if (nd->d_inode -+ && nd->d_inode-> -+ i_private) { -+ session = -+ novfs_scope_get_sessionId -+ (((struct inode_data *) ndir->i_private)->Scope); -+ if (0 == -+ SC_PRESENT -+ (session)) { -+ ((struct inode_data *) ndir->i_private)->Scope = novfs_get_scope(nd); -+ session -+ = -+ novfs_scope_get_sessionId -+ (((struct inode_data *) ndir->i_private)->Scope); -+ } -+ -+ retCode = -+ novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), session); -+ if (retCode) { -+ struct iattr ia; -+ memset(&ia, 0, sizeof(ia)); -+ ia.ia_valid = ATTR_MODE; -+ ia.ia_mode = S_IRWXU; -+ novfs_set_attr(newpath, &ia, session); -+ retCode = novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), session); -+ } -+ -+ } -+ -+ session = novfs_scope_get_sessionId(((struct inode_data *) ndir->i_private)->Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *)ndir->i_private)->Scope = novfs_get_scope(nd); -+ session = novfs_scope_get_sessionId(((struct inode_data *) ndir->i_private)->Scope); -+ } -+ retCode = novfs_rename_file(S_ISDIR(od->d_inode->i_mode), oldpath, oldlen - 1, newpath, newlen - 1, session); -+ -+ if (!retCode) { -+ info = (struct novfs_entry_info *) oldbuf; -+ od->d_time = 0; -+ novfs_remove_inode_entry(odir, &od->d_name, 0); -+ novfs_remove_inode_entry(ndir, &nd->d_name, 0); -+ novfs_get_file_info(newpath, info, session); -+ nd->d_time = jiffies + (novfs_update_timeout * HZ); -+ -+ if (od->d_inode && od->d_inode->i_ino) { -+ ino = od->d_inode-> i_ino; -+ } else { -+ ino = (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ } -+ novfs_add_inode_entry(ndir, &nd->d_name, ino, info); -+ } -+ } -+ } -+ } -+ } -+ } -+ } -+ -+ if (oldbuf) -+ kfree(oldbuf); -+ -+ DbgPrint("return %d", retCode); -+ return (retCode); -+} -+ -+ -+int novfs_i_setattr(struct dentry *dentry, struct iattr *attr) -+{ -+ char *path, *buf; -+ struct inode *inode = dentry->d_inode; -+ char atime_buf[32]; -+ char mtime_buf[32]; -+ char ctime_buf[32]; -+ unsigned int ia_valid = attr->ia_valid; -+ struct novfs_schandle session; -+ int retVal = 0; -+ struct iattr mattr; -+ -+ if (IS_ROOT(dentry) || /* Root */ -+ IS_ROOT(dentry->d_parent) || /* User */ -+ IS_ROOT(dentry->d_parent->d_parent) || /* Server */ -+ IS_ROOT(dentry->d_parent->d_parent->d_parent)) { /* Volume */ -+ return (-EACCES); -+ } -+ -+ if (inode && inode->i_private) { -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -+ Scope); -+ if (0 == SC_PRESENT(session)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(dentry); -+ session = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ } -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ strcpy(atime_buf, "Unspecified"); -+ strcpy(mtime_buf, "Unspecified"); -+ strcpy(ctime_buf, "Unspecified"); -+ if (attr->ia_valid & ATTR_ATIME) { -+ ctime_r(&attr->ia_atime.tv_sec, -+ atime_buf); -+ } -+ if (attr->ia_valid & ATTR_MTIME) { -+ ctime_r(&attr->ia_mtime.tv_sec, -+ mtime_buf); -+ } -+ if (attr->ia_valid & ATTR_CTIME) { -+ ctime_r(&attr->ia_ctime.tv_sec, -+ ctime_buf); -+ } -+ /* Removed for Bug 132374. jlt */ -+ __DbgPrint("%s: %s\n" -+ " ia_valid: 0x%x\n" -+ " ia_mode: 0%o\n" -+ " ia_uid: %d\n" -+ " ia_gid: %d\n" -+ " ia_size: %lld\n" -+ " ia_atime: %s\n" -+ " ia_mtime: %s\n" -+ " ia_ctime: %s\n", __func__, -+ path, -+ attr->ia_valid, -+ attr->ia_mode, -+ attr->ia_uid, -+ attr->ia_gid, -+ attr->ia_size, -+ atime_buf, mtime_buf, ctime_buf); -+ -+ if ((attr->ia_valid & ATTR_FILE) -+ && (attr->ia_valid & ATTR_SIZE)) { -+ memcpy(&mattr, attr, sizeof(mattr)); -+ mattr.ia_valid &= -+ ~(ATTR_FILE | ATTR_SIZE); -+ attr = &mattr; -+ ia_valid = attr->ia_valid; -+#if 0 // thanks to vfs changes in our tree... -+ retVal = -+ novfs_trunc_ex(attr-> -+ ia_file-> -+ private_data, -+ attr-> -+ ia_size, -+ session); -+ if (!retVal) { -+ inode->i_size = attr->ia_size; -+ ((struct inode_data *) inode-> -+ i_private)->Flags |= -+ UPDATE_INODE; -+ } -+#endif -+ } -+ -+ if (ia_valid -+ && !(retVal = -+ novfs_set_attr(path, attr, session))) { -+ ((struct inode_data *) inode->i_private)-> -+ Flags |= UPDATE_INODE; -+ -+ if (ia_valid & ATTR_ATIME) -+ inode->i_atime = attr->ia_atime; -+ if (ia_valid & ATTR_MTIME) -+ inode->i_mtime = attr->ia_mtime; -+ if (ia_valid & ATTR_CTIME) -+ inode->i_ctime = attr->ia_ctime; -+ if (ia_valid & ATTR_MODE) { -+ inode->i_mode = -+ attr-> -+ ia_mode & (S_IFMT | -+ S_IRWXU); -+ } -+ } -+ } -+ } -+ kfree(buf); -+ } -+ DbgPrint("return 0x%x", retVal); -+ -+ return (retVal); -+} -+ -+int novfs_i_getattr(struct vfsmount *mnt, struct dentry *dentry, -+ struct kstat *kstat) -+{ -+ int retCode = 0; -+ char atime_buf[32]; -+ char mtime_buf[32]; -+ char ctime_buf[32]; -+ struct inode *inode = dentry->d_inode; -+ -+ struct novfs_entry_info info; -+ char *path, *buf; -+ struct novfs_schandle session; -+ struct inode_data *id; -+ -+ if (!IS_ROOT(dentry) && !IS_ROOT(dentry->d_parent)) { -+ SC_INITIALIZE(session); -+ id = dentry->d_inode->i_private; -+ -+ if (id && (id->Flags & UPDATE_INODE)) { -+ session = novfs_scope_get_sessionId(id->Scope); -+ -+ if (0 == SC_PRESENT(session)) { -+ id->Scope = novfs_get_scope(dentry); -+ session = novfs_scope_get_sessionId(id->Scope); -+ } -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = -+ novfs_dget_path(dentry, buf, -+ PATH_LENGTH_BUFFER); -+ if (path) { -+ retCode = -+ novfs_get_file_info(path, &info, -+ session); -+ if (!retCode) { -+ update_inode(inode, &info); -+ id->Flags &= ~UPDATE_INODE; -+ } -+ } -+ kfree(buf); -+ } -+ } -+ } -+ -+ kstat->ino = inode->i_ino; -+ kstat->dev = inode->i_sb->s_dev; -+ kstat->mode = inode->i_mode; -+ kstat->nlink = inode->i_nlink; -+ kstat->uid = inode->i_uid; -+ kstat->gid = inode->i_gid; -+ kstat->rdev = inode->i_rdev; -+ kstat->size = i_size_read(inode); -+ kstat->atime = inode->i_atime; -+ kstat->mtime = inode->i_mtime; -+ kstat->ctime = inode->i_ctime; -+ kstat->blksize = inode->i_sb->s_blocksize; -+ kstat->blocks = inode->i_blocks; -+ if (inode->i_bytes) { -+ kstat->blocks++; -+ } -+ ctime_r(&kstat->atime.tv_sec, atime_buf); -+ ctime_r(&kstat->mtime.tv_sec, mtime_buf); -+ ctime_r(&kstat->ctime.tv_sec, ctime_buf); -+ -+ __DbgPrint("%s: 0x%x 0x%p <%.*s>\n" -+ " ino: %d\n" -+ " dev: 0x%x\n" -+ " mode: 0%o\n" -+ " nlink: 0x%x\n" -+ " uid: 0x%x\n" -+ " gid: 0x%x\n" -+ " rdev: 0x%x\n" -+ " size: 0x%llx\n" -+ " atime: %s\n" -+ " mtime: %s\n" -+ " ctime: %s\n" -+ " blksize: 0x%x\n" -+ " blocks: 0x%x\n", __func__, -+ retCode, dentry, dentry->d_name.len, dentry->d_name.name, -+ kstat->ino, -+ kstat->dev, -+ kstat->mode, -+ kstat->nlink, -+ kstat->uid, -+ kstat->gid, -+ kstat->rdev, -+ kstat->size, -+ atime_buf, -+ mtime_buf, ctime_buf, kstat->blksize, kstat->blocks); -+ return (retCode); -+} -+ -+ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer, -+ size_t buffer_size) -+{ -+ struct inode *inode = dentry->d_inode; -+ struct novfs_schandle sessionId; -+ char *path, *buf, *bufRead; -+ ssize_t dataLen; -+ -+ int retxcode = 0; -+ -+ SC_INITIALIZE(sessionId); -+ -+ DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */ -+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", -+ dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("name %s", name); -+ DbgPrint("size %u", buffer_size); -+ -+ if (inode && inode->i_private) { -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -+ Scope); -+ DbgPrint("SessionId = %u", sessionId); -+ //if (0 == sessionId) -+ if (0 == SC_PRESENT(sessionId)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(dentry); -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ DbgPrint("SessionId = %u", sessionId); -+ } -+ } -+ -+ dataLen = 0; -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ bufRead = kmalloc(XA_BUFFER, GFP_KERNEL); -+ if (bufRead) { -+ retxcode = -+ novfs_getx_file_info(path, name, bufRead, -+ XA_BUFFER, &dataLen, -+ sessionId); -+ DbgPrint("after novfs_GetX_File_Info retxcode = %d", -+ retxcode); -+ if (!retxcode) { -+ novfs_dump(64, bufRead); -+ if (buffer_size != 0) { -+ if (buffer_size >= dataLen) { -+ memcpy(buffer, bufRead, -+ dataLen); -+ } else { -+ DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d", -+ buffer_size, -+ dataLen); -+ retxcode = -ERANGE; -+ } -+ } -+ -+ if (bufRead) { -+ kfree(bufRead); -+ } -+ } -+ } -+ } -+ kfree(buf); -+ } -+ -+ if (retxcode) { -+ dataLen = retxcode; -+ } else { -+ if ((buffer_size > 0) && (buffer_size < dataLen)) { -+ dataLen = -ERANGE; -+ } -+ } -+ -+ return (dataLen); -+} -+ -+int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value, -+ size_t value_size, int flags) -+{ -+ -+ struct inode *inode = dentry->d_inode; -+ struct novfs_schandle sessionId; -+ char *path, *buf; -+ unsigned long bytesWritten = 0; -+ int retError = 0; -+ int retxcode = 0; -+ -+ SC_INITIALIZE(sessionId); -+ -+ DbgPrint("Ian"); /*%.*s\n", dentry->d_name.len, dentry->d_name.name); */ -+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", -+ dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("name %s", name); -+ DbgPrint("value_size %u", value_size); -+ DbgPrint("flags %d", flags); -+ -+ if (inode && inode->i_private) { -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -+ Scope); -+ DbgPrint("SessionId = %u", sessionId); -+ //if (0 == sessionId) -+ if (0 == SC_PRESENT(sessionId)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(dentry); -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ DbgPrint("SessionId = %u", sessionId); -+ } -+ } -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ retxcode = -+ novfs_setx_file_info(path, name, value, value_size, -+ &bytesWritten, flags, -+ sessionId); -+ if (!retxcode) { -+ DbgPrint("bytesWritten = %u", bytesWritten); -+ } -+ } -+ kfree(buf); -+ } -+ -+ if (retxcode) { -+ retError = retxcode; -+ } -+ -+ if (bytesWritten < value_size) { -+ retError = retxcode; -+ } -+ return (retError); -+} -+ -+ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) -+{ -+ struct inode *inode = dentry->d_inode; -+ struct novfs_schandle sessionId; -+ char *path, *buf, *bufList; -+ ssize_t dataLen; -+ int retxcode = 0; -+ -+ SC_INITIALIZE(sessionId); -+ -+ DbgPrint("Ian"); //%.*s\n", dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("dentry->d_name.len %u, dentry->d_name.name %s", -+ dentry->d_name.len, dentry->d_name.name); -+ DbgPrint("size %u", buffer_size); -+ -+ if (inode && inode->i_private) { -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode->i_private)-> -+ Scope); -+ DbgPrint("SessionId = %u", sessionId); -+ //if (0 == sessionId) -+ if (0 == SC_PRESENT(sessionId)) { -+ ((struct inode_data *) inode->i_private)->Scope = -+ novfs_get_scope(dentry); -+ sessionId = -+ novfs_scope_get_sessionId(((struct inode_data *) inode-> -+ i_private)->Scope); -+ DbgPrint("SessionId = %u", sessionId); -+ } -+ } -+ -+ dataLen = 0; -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_dget_path(dentry, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ bufList = kmalloc(XA_BUFFER, GFP_KERNEL); -+ if (bufList) { -+ retxcode = -+ novfs_listx_file_info(path, bufList, -+ XA_BUFFER, &dataLen, -+ sessionId); -+ -+ novfs_dump(64, bufList); -+ if (buffer_size != 0) { -+ if (buffer_size >= dataLen) { -+ memcpy(buffer, bufList, -+ dataLen); -+ } else { -+ DbgPrint("(!!!) not enough buffer_size. buffer_size = %d, dataLen = %d", -+ buffer_size, dataLen); -+ retxcode = -1; -+ } -+ } -+ -+ if (bufList) { -+ kfree(bufList); -+ } -+ } -+ -+ } -+ kfree(buf); -+ } -+ -+ if (retxcode) { -+ dataLen = -1; -+ } else { -+ -+ if ((buffer_size > 0) && (buffer_size < dataLen)) { -+ dataLen = -ERANGE; -+ } -+ } -+ return (dataLen); -+} -+ -+int novfs_i_revalidate(struct dentry *dentry) -+{ -+ -+ DbgPrint("name %.*s", dentry->d_name.len, dentry->d_name.name); -+ -+ return (0); -+} -+ -+void novfs_read_inode(struct inode *inode) -+{ -+ DbgPrint("0x%p %d", inode, inode->i_ino); -+} -+ -+void novfs_write_inode(struct inode *inode) -+{ -+ DbgPrint("Inode=0x%p Ino=%d", inode, inode->i_ino); -+} -+ -+int novfs_notify_change(struct dentry *dentry, struct iattr *attr) -+{ -+ struct inode *inode = dentry->d_inode; -+ -+ DbgPrint("Dentry=0x%p Name=%.*s Inode=0x%p Ino=%d ia_valid=0x%x", -+ dentry, dentry->d_name.len, dentry->d_name.name, inode, -+ inode->i_ino, attr->ia_valid); -+ return (0); -+} -+ -+void novfs_clear_inode(struct inode *inode) -+{ -+ InodeCount--; -+ -+ if (inode->i_private) { -+ struct inode_data *id = inode->i_private; -+ -+ DbgPrint("inode=0x%p ino=%d Scope=0x%p Name=%s", -+ inode, inode->i_ino, id->Scope, id->Name); -+ -+ novfs_free_inode_cache(inode); -+ -+ down(&InodeList_lock); -+ list_del(&id->IList); -+ up(&InodeList_lock); -+ -+ kfree(inode->i_private); -+ inode->i_private = NULL; -+ -+ remove_inode_hash(inode); -+ -+ } else { -+ DbgPrint("inode=0x%p ino=%d", inode, inode->i_ino); -+ } -+} -+ -+/* Called when /proc/mounts is read */ -+int novfs_show_options(struct seq_file *s, struct vfsmount *m) -+{ -+ char *buf, *path, *tmp; -+ -+ buf = kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ struct path my_path; -+ my_path.mnt = m; -+ my_path.dentry = m->mnt_root; -+ path = d_path(&my_path, buf, PATH_LENGTH_BUFFER); -+ if (path) { -+ if (!novfs_current_mnt -+ || (novfs_current_mnt -+ && strcmp(novfs_current_mnt, path))) { -+ DbgPrint("%.*s %.*s %s", -+ m->mnt_root->d_name.len, -+ m->mnt_root->d_name.name, -+ m->mnt_mountpoint->d_name.len, -+ m->mnt_mountpoint->d_name.name, path); -+ tmp = kmalloc(PATH_LENGTH_BUFFER - -+ (int)(path - buf), -+ GFP_KERNEL); -+ if (tmp) { -+ strcpy(tmp, path); -+ path = novfs_current_mnt; -+ novfs_current_mnt = tmp; -+ novfs_daemon_set_mnt_point(novfs_current_mnt); -+ -+ if (path) { -+ kfree(path); -+ } -+ } -+ } -+ } -+ kfree(buf); -+ } -+ return (0); -+} -+ -+/* Called when statfs(2) system called. */ -+int novfs_statfs(struct dentry *de, struct kstatfs *buf) -+{ -+ uint64_t td, fd, te, fe; -+ struct super_block *sb = de->d_sb; -+ -+ DbgPrint(""); -+ -+ td = fd = te = fe = 0; -+ -+ novfs_scope_get_userspace(&td, &fd, &te, &fe); -+ -+ DbgPrint("td=%llu", td); -+ DbgPrint("fd=%llu", fd); -+ DbgPrint("te=%llu", te); -+ DbgPrint("fe=%llu", fd); -+ /* fix for Nautilus */ -+ if (sb->s_blocksize == 0) -+ sb->s_blocksize = 4096; -+ -+ buf->f_type = sb->s_magic; -+ buf->f_bsize = sb->s_blocksize; -+ buf->f_namelen = NW_MAX_PATH_LENGTH; -+ buf->f_blocks = -+ (sector_t) (td + -+ (uint64_t) (sb->s_blocksize - -+ 1)) >> (uint64_t) sb->s_blocksize_bits; -+ buf->f_bfree = (sector_t) fd >> (uint64_t) sb->s_blocksize_bits; -+ buf->f_bavail = (sector_t) buf->f_bfree; -+ buf->f_files = (sector_t) te; -+ buf->f_ffree = (sector_t) fe; -+ buf->f_frsize = sb->s_blocksize; -+ if (te > 0xffffffff) -+ buf->f_files = 0xffffffff; -+ -+ if (fe > 0xffffffff) -+ buf->f_ffree = 0xffffffff; -+ -+ DbgPrint("f_type: 0x%x", buf->f_type); -+ DbgPrint("f_bsize: %u", buf->f_bsize); -+ DbgPrint("f_namelen: %d", buf->f_namelen); -+ DbgPrint("f_blocks: %llu", buf->f_blocks); -+ DbgPrint("f_bfree: %llu", buf->f_bfree); -+ DbgPrint("f_bavail: %llu", buf->f_bavail); -+ DbgPrint("f_files: %llu", buf->f_files); -+ DbgPrint("f_ffree: %llu", buf->f_ffree); -+ DbgPrint("f_frsize: %u", buf->f_frsize); -+ -+ return 0; -+} -+ -+struct inode *novfs_get_inode(struct super_block *sb, int mode, int dev, -+ uid_t Uid, ino_t ino, struct qstr *name) -+{ -+ struct inode *inode = new_inode(sb); -+ -+ if (inode) { -+ InodeCount++; -+ inode->i_mode = mode; -+ inode->i_uid = Uid; -+ inode->i_gid = 0; -+ inode->i_blkbits = sb->s_blocksize_bits; -+ inode->i_blocks = 0; -+ inode->i_rdev = 0; -+ inode->i_ino = (ino) ? ino : (ino_t)atomic_inc_return(&novfs_Inode_Number); -+ if (novfs_page_cache) { -+ inode->i_mapping->a_ops = &novfs_aops; -+ } else { -+ inode->i_mapping->a_ops = &novfs_nocache_aops; -+ } -+ inode->i_mapping->backing_dev_info = &novfs_backing_dev_info; -+ inode->i_atime.tv_sec = 0; -+ inode->i_atime.tv_nsec = 0; -+ inode->i_mtime = inode->i_ctime = inode->i_atime; -+ -+ DbgPrint("Inode=0x%p I_ino=%d len=%d", -+ inode, inode->i_ino, name->len); -+ -+ if (NULL != -+ (inode->i_private = -+ kmalloc(sizeof(struct inode_data) + name->len, -+ GFP_KERNEL))) { -+ struct inode_data *id; -+ id = inode->i_private; -+ -+ DbgPrint("i_private 0x%p", id); -+ -+ id->Scope = NULL; -+ id->Flags = 0; -+ id->Inode = inode; -+ -+ id->cntDC = 1; -+ -+ INIT_LIST_HEAD(&id->DirCache); -+ init_MUTEX(&id->DirCacheLock); -+ -+ id->FileHandle = 0; -+ id->CacheFlag = 0; -+ -+ down(&InodeList_lock); -+ -+ list_add_tail(&id->IList, &InodeList); -+ up(&InodeList_lock); -+ -+ id->Name[0] = '\0'; -+ -+ memcpy(id->Name, name->name, name->len); -+ id->Name[name->len] = '\0'; -+ -+ DbgPrint("name %s", id->Name); -+ } -+ -+ insert_inode_hash(inode); -+ -+ switch (mode & S_IFMT) { -+ -+ case S_IFREG: -+ inode->i_op = &novfs_file_inode_operations; -+ inode->i_fop = &novfs_file_operations; -+ break; -+ -+ case S_IFDIR: -+ inode->i_op = &novfs_inode_operations; -+ inode->i_fop = &novfs_dir_operations; -+ inode->i_blkbits = 0; -+ break; -+ -+ default: -+ init_special_inode(inode, mode, dev); -+ break; -+ } -+ -+ DbgPrint("size=%lld", inode->i_size); -+ DbgPrint("mode=0%o", inode->i_mode); -+ DbgPrint("i_sb->s_blocksize=%d", inode->i_sb->s_blocksize); -+ DbgPrint("i_blkbits=%d", inode->i_blkbits); -+ DbgPrint("i_blocks=%d", inode->i_blocks); -+ DbgPrint("i_bytes=%d", inode->i_bytes); -+ } -+ -+ DbgPrint("0x%p %d", inode, inode->i_ino); -+ return (inode); -+} -+ -+int novfs_fill_super(struct super_block *SB, void *Data, int Silent) -+{ -+ struct inode *inode; -+ struct dentry *server, *tree; -+ struct qstr name; -+ struct novfs_entry_info info; -+ -+ SB->s_blocksize = PAGE_CACHE_SIZE; -+ SB->s_blocksize_bits = PAGE_CACHE_SHIFT; -+ SB->s_maxbytes = 0xFFFFFFFFFFFFFFFFULL; /* Max file size */ -+ SB->s_op = &novfs_ops; -+ SB->s_flags |= (MS_NODIRATIME | MS_NODEV | MS_POSIXACL); -+ SB->s_magic = NOVFS_MAGIC; -+ -+ name.len = 1; -+ name.name = "/"; -+ -+ inode = novfs_get_inode(SB, S_IFDIR | 0777, 0, 0, 0, &name); -+ if (!inode) { -+ return (-ENOMEM); -+ } -+ -+ novfs_root = d_alloc_root(inode); -+ -+ if (!novfs_root) { -+ iput(inode); -+ return (-ENOMEM); -+ } -+ novfs_root->d_time = jiffies + (novfs_update_timeout * HZ); -+ -+ inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME; -+ -+ SB->s_root = novfs_root; -+ -+ DbgPrint("root 0x%p", novfs_root); -+ -+ if (novfs_root) { -+ novfs_root->d_op = &novfs_dentry_operations; -+ -+ name.name = SERVER_DIRECTORY_NAME; -+ name.len = strlen(SERVER_DIRECTORY_NAME); -+ name.hash = novfs_internal_hash(&name); -+ -+ inode = novfs_get_inode(SB, S_IFDIR | 0777, 0, 0, 0, &name); -+ if (inode) { -+ info.mode = inode->i_mode; -+ info.namelength = 0; -+ inode->i_size = info.size = 0; -+ inode->i_uid = info.uid = 0; -+ inode->i_gid = info.gid = 0; -+ inode->i_atime = info.atime = -+ inode->i_ctime = info.ctime = -+ inode->i_mtime = info.mtime = CURRENT_TIME; -+ -+ server = d_alloc(novfs_root, &name); -+ if (server) { -+ server->d_op = &novfs_dentry_operations; -+ server->d_time = 0xffffffff; -+ d_add(server, inode); -+ DbgPrint("d_add %s 0x%p", -+ SERVER_DIRECTORY_NAME, server); -+ novfs_add_inode_entry(novfs_root->d_inode, -+ &name, inode->i_ino, -+ &info); -+ } -+ } -+ -+ name.name = TREE_DIRECTORY_NAME; -+ name.len = strlen(TREE_DIRECTORY_NAME); -+ name.hash = novfs_internal_hash(&name); -+ -+ inode = novfs_get_inode(SB, S_IFDIR | 0777, 0, 0, 0, &name); -+ if (inode) { -+ info.mode = inode->i_mode; -+ info.namelength = 0; -+ inode->i_size = info.size = 0; -+ inode->i_uid = info.uid = 0; -+ inode->i_gid = info.gid = 0; -+ inode->i_atime = info.atime = -+ inode->i_ctime = info.ctime = -+ inode->i_mtime = info.mtime = CURRENT_TIME; -+ tree = d_alloc(novfs_root, &name); -+ if (tree) { -+ tree->d_op = &novfs_dentry_operations; -+ tree->d_time = 0xffffffff; -+ -+ d_add(tree, inode); -+ DbgPrint("d_add %s 0x%p", -+ TREE_DIRECTORY_NAME, tree); -+ novfs_add_inode_entry(novfs_root->d_inode, -+ &name, inode->i_ino, -+ &info); -+ } -+ } -+ } -+ -+ return (0); -+} -+ -+static int novfs_get_sb(struct file_system_type *Fstype, int Flags, -+ const char *Dev_name, void *Data, struct vfsmount *Mnt) -+{ -+ DbgPrint("Fstype=0x%x Dev_name=%s", Fstype, Dev_name); -+ return get_sb_nodev(Fstype, Flags, Data, novfs_fill_super, Mnt); -+} -+ -+static void novfs_kill_sb(struct super_block *super) -+{ -+ shrink_dcache_sb(super); -+ kill_litter_super(super); -+} -+ -+ssize_t novfs_Control_read(struct file *file, char *buf, size_t nbytes, -+ loff_t * ppos) -+{ -+ ssize_t retval = 0; -+ -+ DbgPrint("kernel_locked 0x%x", kernel_locked()); -+ -+ return retval; -+} -+ -+ssize_t novfs_Control_write(struct file * file, const char *buf, size_t nbytes, -+ loff_t * ppos) -+{ -+ ssize_t retval = 0; -+ -+ DbgPrint("kernel_locked 0x%x", kernel_locked()); -+ if (buf && nbytes) { -+ } -+ -+ return (retval); -+} -+ -+int novfs_Control_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ int retval = 0; -+ -+ DbgPrint("kernel_locked 0x%x", kernel_locked()); -+ -+ return (retval); -+} -+ -+static struct file_system_type novfs_fs_type = { -+ .name = "novfs", -+ .get_sb = novfs_get_sb, -+ .kill_sb = novfs_kill_sb, -+ .owner = THIS_MODULE, -+}; -+ -+int __init init_novfs(void) -+{ -+ int retCode; -+ -+ lastDir[0] = 0; -+ lastTime = get_nanosecond_time(); -+ -+ inHAX = 0; -+ inHAXTime = get_nanosecond_time(); -+ -+ retCode = novfs_proc_init(); -+ -+ novfs_profile_init(); -+ -+ if (!retCode) { -+ DbgPrint("%s %s %s", __DATE__, __TIME__, NOVFS_VERSION_STRING); -+ novfs_daemon_queue_init(); -+ novfs_scope_init(); -+ retCode = register_filesystem(&novfs_fs_type); -+ if (retCode) { -+ novfs_proc_exit(); -+ novfs_daemon_queue_exit(); -+ novfs_scope_exit(); -+ } -+ } -+ return (retCode); -+} -+ -+void __exit exit_novfs(void) -+{ -+ novfs_scope_exit(); -+ novfs_daemon_queue_exit(); -+ novfs_profile_exit(); -+ novfs_proc_exit(); -+ unregister_filesystem(&novfs_fs_type); -+ -+ if (novfs_current_mnt) { -+ kfree(novfs_current_mnt); -+ novfs_current_mnt = NULL; -+ } -+} -+ -+int novfs_lock_inode_cache(struct inode *i) -+{ -+ struct inode_data *id; -+ int retVal = 0; -+ -+ DbgPrint("0x%p", i); -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ down(&id->DirCacheLock); -+ retVal = 1; -+ } -+ DbgPrint("return %d", retVal); -+ return (retVal); -+} -+ -+void novfs_unlock_inode_cache(struct inode *i) -+{ -+ struct inode_data *id; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ up(&id->DirCacheLock); -+ } -+} -+ -+int novfs_enumerate_inode_cache(struct inode *i, struct list_head **iteration, -+ ino_t * ino, struct novfs_entry_info *info) -+/* -+ * Arguments: struct inode *i - pointer to directory inode -+ * -+ * Returns: 0 - item found -+ * -1 - done -+ * -+ * Abstract: Unlocks inode cache. -+ * -+ * Notes: DirCacheLock should be held before calling this routine. -+ *========================================================================*/ -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ struct list_head *l = NULL; -+ int retVal = -1; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ if ((NULL == iteration) || (NULL == *iteration)) { -+ l = id->DirCache.next; -+ } else { -+ l = *iteration; -+ } -+ -+ if (l == &id->DirCache) { -+ l = NULL; -+ } else { -+ dc = list_entry(l, struct novfs_dir_cache, list); -+ -+ *ino = dc->ino; -+ info->type = 0; -+ info->mode = dc->mode; -+ info->size = dc->size; -+ info->atime = dc->atime; -+ info->mtime = dc->mtime; -+ info->ctime = dc->ctime; -+ info->namelength = dc->nameLen; -+ memcpy(info->name, dc->name, dc->nameLen); -+ info->name[dc->nameLen] = '\0'; -+ retVal = 0; -+ -+ l = l->next; -+ } -+ } -+ *iteration = l; -+ return (retVal); -+} -+ -+/* DirCacheLock should be held before calling this routine. */ -+int novfs_get_entry(struct inode *i, struct qstr *name, ino_t * ino, -+ struct novfs_entry_info *info) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ int retVal = -1; -+ char *n = ""; -+ int nl = 6; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ if (name && name->len) { -+ n = (char *)name->name; -+ nl = name->len; -+ } -+ -+ dc = novfs_lookup_inode_cache(i, name, *ino); -+ if (dc) { -+ dc->flags |= ENTRY_VALID; -+ retVal = 0; -+ *ino = dc->ino; -+ info->type = 0; -+ info->mode = dc->mode; -+ info->size = dc->size; -+ info->atime = dc->atime; -+ info->mtime = dc->mtime; -+ info->ctime = dc->ctime; -+ info->namelength = dc->nameLen; -+ memcpy(info->name, dc->name, dc->nameLen); -+ info->name[dc->nameLen] = '\0'; -+ retVal = 0; -+ } -+ -+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d\n", i, nl, n, *ino); -+ } -+ DbgPrint("return %d", retVal); -+ return (retVal); -+} -+ -+ /*DirCacheLock should be held before calling this routine. */ -+int novfs_get_entry_by_pos(struct inode *i, loff_t pos, ino_t * ino, -+ struct novfs_entry_info *info) -+{ -+ int retVal = -1; -+ loff_t count = 0; -+ loff_t i_pos = pos - 2; -+ struct list_head *inter = NULL; -+ while (!novfs_enumerate_inode_cache(i, &inter, ino, info)) { -+ DbgPrint("info->name = %s", info->name); -+ if (count == i_pos) { -+ retVal = 0; -+ break; -+ } else -+ count++; -+ } -+ -+ return retVal; -+} -+ -+/* DirCacheLock should be held before calling this routine. */ -+int novfs_get_entry_time(struct inode *i, struct qstr *name, ino_t * ino, -+ struct novfs_entry_info *info, u64 * EntryTime) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ int retVal = -1; -+ char *n = ""; -+ int nl = 6; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ if (name && name->len) { -+ n = (char *)name->name; -+ nl = name->len; -+ } -+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d", i, nl, n, *ino); -+ -+ dc = novfs_lookup_inode_cache(i, name, *ino); -+ if (dc) { -+ retVal = 0; -+ *ino = dc->ino; -+ info->type = 0; -+ info->mode = dc->mode; -+ info->size = dc->size; -+ info->atime = dc->atime; -+ info->mtime = dc->mtime; -+ info->ctime = dc->ctime; -+ info->namelength = dc->nameLen; -+ memcpy(info->name, dc->name, dc->nameLen); -+ info->name[dc->nameLen] = '\0'; -+ if (EntryTime) { -+ *EntryTime = dc->jiffies; -+ } -+ retVal = 0; -+ } -+ } -+ DbgPrint("return %d", retVal); -+ return (retVal); -+} -+ -+/* -+ * Abstract: This routine will return the first entry on the list -+ * and then remove it. -+ * -+ * Notes: DirCacheLock should be held before calling this routine. -+ * -+ */ -+int novfs_get_remove_entry(struct inode *i, ino_t * ino, struct novfs_entry_info *info) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ struct list_head *l = NULL; -+ int retVal = -1; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ l = id->DirCache.next; -+ -+ if (l != &id->DirCache) { -+ dc = list_entry(l, struct novfs_dir_cache, list); -+ -+ *ino = dc->ino; -+ info->type = 0; -+ info->mode = dc->mode; -+ info->size = dc->size; -+ info->atime = dc->atime; -+ info->mtime = dc->mtime; -+ info->ctime = dc->ctime; -+ info->namelength = dc->nameLen; -+ memcpy(info->name, dc->name, dc->nameLen); -+ info->name[dc->nameLen] = '\0'; -+ retVal = 0; -+ -+ list_del(&dc->list); -+ kfree(dc); -+ DCCount--; -+ -+ id->cntDC--; -+ } -+ } -+ return (retVal); -+} -+ -+/* -+ * Abstract: Marks all entries in the directory cache as invalid. -+ * -+ * Notes: DirCacheLock should be held before calling this routine. -+ * -+ *========================================================================*/ -+void novfs_invalidate_inode_cache(struct inode *i) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ struct list_head *l; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ list_for_each(l, &id->DirCache) { -+ dc = list_entry(l, struct novfs_dir_cache, list); -+ dc->flags &= ~ENTRY_VALID; -+ } -+ } -+} -+ -+/*++======================================================================*/ -+struct novfs_dir_cache *novfs_lookup_inode_cache(struct inode *i, struct qstr *name, -+ ino_t ino) -+/* -+ * Returns: struct novfs_dir_cache entry if match -+ * NULL - if there is no match. -+ * -+ * Abstract: Checks a inode directory to see if there are any enties -+ * matching name or ino. If name is specified then ino is -+ * not used. ino is use if name is not specified. -+ * -+ * Notes: DirCacheLock should be held before calling this routine. -+ * -+ *========================================================================*/ -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc, *retVal = NULL; -+ struct list_head *l; -+ char *n = ""; -+ int nl = 6; -+ int hash = 0; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ if (name && name->name) { -+ nl = name->len; -+ n = (char *)name->name; -+ hash = name->hash; -+ } -+ DbgPrint("inode: 0x%p; name: %.*s; hash: 0x%x;\n" -+ " len: %d; ino: %d", i, nl, n, hash, nl, ino); -+ -+ list_for_each(l, &id->DirCache) { -+ dc = list_entry(l, struct novfs_dir_cache, list); -+ if (name) { -+ -+/* DbgPrint("novfs_lookup_inode_cache: 0x%p\n" \ -+ " ino: %d\n" \ -+ " hash: 0x%x\n" \ -+ " len: %d\n" \ -+ " name: %.*s\n", -+ dc, dc->ino, dc->hash, dc->nameLen, dc->nameLen, dc->name); -+*/ -+ if ((name->hash == dc->hash) && -+ (name->len == dc->nameLen) && -+ (0 == -+ memcmp(name->name, dc->name, name->len))) { -+ retVal = dc; -+ break; -+ } -+ } else { -+ if (ino == dc->ino) { -+ retVal = dc; -+ break; -+ } -+ } -+ } -+ } -+ -+ DbgPrint("return 0x%p", retVal); -+ return (retVal); -+} -+ -+/* -+ * Checks a inode directory to see if there are any enties matching name -+ * or ino. If entry is found the valid bit is set. -+ * -+ * DirCacheLock should be held before calling this routine. -+ */ -+int novfs_lookup_validate(struct inode *i, struct qstr *name, ino_t ino) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ int retVal = -1; -+ char *n = ""; -+ int nl = 6; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ if (name && name->len) { -+ n = (char *)name->name; -+ nl = name->len; -+ } -+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d", i, nl, n, ino); -+ -+ dc = novfs_lookup_inode_cache(i, name, ino); -+ if (dc) { -+ dc->flags |= ENTRY_VALID; -+ retVal = 0; -+ } -+ } -+ return (retVal); -+} -+ -+/* -+ * Added entry to directory cache. -+ * -+ * DirCacheLock should be held before calling this routine. -+ */ -+int novfs_add_inode_entry(struct inode *i, -+ struct qstr *name, ino_t ino, struct novfs_entry_info *info) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *new; -+ int retVal = -ENOMEM; -+ struct novfs_dir_cache *todel; -+ struct list_head *todeltmp; -+ -+ //SClark -+ DbgPrint("i: %p", i); -+ if ((id = i->i_private)) { -+ DbgPrint("i->i_private: %p", id); -+ if (id->DirCache.next) -+ DbgPrint("id->DirCache.next: %p", id->DirCache.next); -+ } -+ //SClark -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ new = kmalloc(sizeof(struct novfs_dir_cache) + name->len, GFP_KERNEL); -+ if (new) { -+ id->cntDC++; -+ -+ DCCount++; -+ DbgPrint("inode: 0x%p; id: 0x%p; DC: 0x%p; new: 0x%p; " -+ "name: %.*s; ino: %d; size: %lld; mode: 0x%x", -+ i, id, &id->DirCache, new, name->len, -+ name->name, ino, info->size, info->mode); -+ -+ retVal = 0; -+ new->flags = ENTRY_VALID; -+ new->jiffies = get_jiffies_64(); -+ new->size = info->size; -+ new->mode = info->mode; -+ new->atime = info->atime; -+ new->mtime = info->mtime; -+ new->ctime = info->ctime; -+ new->ino = ino; -+ new->hash = name->hash; -+ new->nameLen = name->len; -+ memcpy(new->name, name->name, name->len); -+ new->name[new->nameLen] = '\0'; -+ list_add(&new->list, &id->DirCache); -+ -+ if (id->cntDC > 20) { -+ todeltmp = id->DirCache.prev; -+ todel = list_entry(todeltmp, struct novfs_dir_cache, list); -+ -+ list_del(&todel->list); -+ -+ kfree(todel); -+ -+ DCCount--; -+ id->cntDC--; -+ } -+ -+ } -+ } -+ return (retVal); -+} -+ -+/* -+ * DirCacheLock should be held before calling this routine. -+ */ -+int novfs_update_entry(struct inode *i, struct qstr *name, ino_t ino, -+ struct novfs_entry_info *info) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ int retVal = -1; -+ char *n = ""; -+ int nl = 6; -+ char atime_buf[32]; -+ char mtime_buf[32]; -+ char ctime_buf[32]; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ -+ if (name && name->len) { -+ n = (char *)name->name; -+ nl = name->len; -+ } -+ ctime_r(&info->atime.tv_sec, atime_buf); -+ ctime_r(&info->mtime.tv_sec, mtime_buf); -+ ctime_r(&info->ctime.tv_sec, ctime_buf); -+ DbgPrint("inode: 0x%p; name: %.*s; ino: %d; size: %lld; " -+ "atime: %s; mtime: %s; ctime: %s", -+ i, nl, n, ino, info->size, atime_buf, mtime_buf, -+ ctime_buf); -+ -+ dc = novfs_lookup_inode_cache(i, name, ino); -+ if (dc) { -+ retVal = 0; -+ dc->flags = ENTRY_VALID; -+ dc->jiffies = get_jiffies_64(); -+ dc->size = info->size; -+ dc->mode = info->mode; -+ dc->atime = info->atime; -+ dc->mtime = info->mtime; -+ dc->ctime = info->ctime; -+ -+ ctime_r(&dc->atime.tv_sec, atime_buf); -+ ctime_r(&dc->mtime.tv_sec, mtime_buf); -+ ctime_r(&dc->ctime.tv_sec, ctime_buf); -+ DbgPrint("entry: 0x%p; flags: 0x%x; jiffies: %lld; " -+ "ino: %d; size: %lld; mode: 0%o; atime: %s; " -+ "mtime: %s %d; ctime: %s; hash: 0x%x; " -+ " nameLen: %d; name: %s", -+ dc, dc->flags, dc->jiffies, dc->ino, dc->size, -+ dc->mode, atime_buf, mtime_buf, -+ dc->mtime.tv_nsec, ctime_buf, dc->hash, -+ dc->nameLen, dc->name); -+ } -+ } -+ DbgPrint("return %d", retVal); -+ return (retVal); -+} -+ -+/* -+ * Removes entry from directory cache. You can specify a name -+ * or an inode number. -+ * -+ * DirCacheLock should be held before calling this routine. -+ */ -+void novfs_remove_inode_entry(struct inode *i, struct qstr *name, ino_t ino) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ char *n = ""; -+ int nl = 6; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ dc = novfs_lookup_inode_cache(i, name, ino); -+ if (dc) { -+ if (name && name->name) { -+ nl = name->len; -+ n = (char *)name->name; -+ } -+ DbgPrint("inode: 0x%p; id: 0x%p; DC: 0x%p; " -+ "name: %.*s; ino: %d entry: 0x%p " -+ "[name: %.*s; ino: %d; next: 0x%p; " -+ "prev: 0x%p]", -+ i, id, &id->DirCache, nl, n, ino, dc, -+ dc->nameLen, dc->name, dc->ino, dc->list.next, -+ dc->list.prev); -+ list_del(&dc->list); -+ kfree(dc); -+ DCCount--; -+ -+ id->cntDC--; -+ } -+ } -+} -+ -+/* -+ * Frees all invalid entries in the directory cache. -+ * -+ * DirCacheLock should be held before calling this routine. -+ */ -+void novfs_free_invalid_entries(struct inode *i) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ struct list_head *l; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ list_for_each(l, &id->DirCache) { -+ dc = list_entry(l, struct novfs_dir_cache, list); -+ if (0 == (dc->flags & ENTRY_VALID)) { -+ DbgPrint("inode: 0x%p; id: 0x%p; entry: 0x%p; " -+ "name: %.*s; ino: %d", -+ i, id, dc, dc->nameLen, dc->name, -+ dc->ino); -+ l = l->prev; -+ list_del(&dc->list); -+ kfree(dc); -+ DCCount--; -+ -+ id->cntDC--; -+ } -+ } -+ } -+} -+ -+/* -+ * Frees all entries in the inode cache. -+ * -+ * DirCacheLock should be held before calling this routine. -+ */ -+void novfs_free_inode_cache(struct inode *i) -+{ -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ struct list_head *l; -+ -+ if (i && (id = i->i_private) && id->DirCache.next) { -+ list_for_each(l, &id->DirCache) { -+ dc = list_entry(l, struct novfs_dir_cache, list); -+ l = l->prev; -+ list_del(&dc->list); -+ kfree(dc); -+ DCCount--; -+ -+ id->cntDC--; -+ } -+ } -+} -+ -+void novfs_dump_inode(void *pf) -+{ -+ struct inode *inode; -+ void (*pfunc) (char *Fmt, ...) = pf; -+ struct inode_data *id; -+ struct novfs_dir_cache *dc; -+ struct list_head *il, *l; -+ char atime_buf[32]; -+ char mtime_buf[32]; -+ char ctime_buf[32]; -+ unsigned long icnt = 0, dccnt = 0; -+ -+ down(&InodeList_lock); -+ list_for_each(il, &InodeList) { -+ id = list_entry(il, struct inode_data, IList); -+ inode = id->Inode; -+ if (inode) { -+ icnt++; -+ -+ pfunc("Inode=0x%p I_ino=%d\n", inode, inode->i_ino); -+ -+ pfunc(" atime=%s\n", -+ ctime_r(&inode->i_atime.tv_sec, atime_buf)); -+ pfunc(" ctime=%s\n", -+ ctime_r(&inode->i_mtime.tv_sec, atime_buf)); -+ pfunc(" mtime=%s\n", -+ ctime_r(&inode->i_ctime.tv_sec, atime_buf)); -+ pfunc(" size=%lld\n", inode->i_size); -+ pfunc(" mode=0%o\n", inode->i_mode); -+ pfunc(" count=0%o\n", atomic_read(&inode->i_count)); -+ } -+ -+ pfunc(" nofs_inode_data: 0x%p Name=%s Scope=0x%p\n", id, id->Name, -+ id->Scope); -+ -+ if (id->DirCache.next) { -+ list_for_each(l, &id->DirCache) { -+ dccnt++; -+ dc = list_entry(l, struct novfs_dir_cache, -+ list); -+ ctime_r(&dc->atime.tv_sec, atime_buf); -+ ctime_r(&dc->mtime.tv_sec, mtime_buf); -+ ctime_r(&dc->ctime.tv_sec, ctime_buf); -+ -+ pfunc(" Cache Entry: 0x%p\n" -+ " flags: 0x%x\n" -+ " jiffies: %llu\n" -+ " ino: %u\n" -+ " size: %llu\n" -+ " mode: 0%o\n" -+ " atime: %s\n" -+ " mtime: %s\n" -+ " ctime: %s\n" -+ " hash: 0x%x\n" -+ " len: %d\n" -+ " name: %s\n", -+ dc, dc->flags, dc->jiffies, -+ dc->ino, dc->size, dc->mode, -+ atime_buf, mtime_buf, ctime_buf, -+ dc->hash, dc->nameLen, dc->name); -+ } -+ } -+ } -+ up(&InodeList_lock); -+ -+ pfunc("Inodes: %d(%d) DirCache: %d(%d)\n", InodeCount, icnt, DCCount, -+ dccnt); -+ -+} -+ -+module_init(init_novfs); -+module_exit(exit_novfs); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Novell Inc."); -+MODULE_DESCRIPTION("Novell NetWare Client for Linux"); -+MODULE_VERSION(NOVFS_VERSION_STRING); ---- /dev/null -+++ b/fs/novfs/nwcapi.c -@@ -0,0 +1,2202 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner/Richard Williams -+ * -+ * This file contains functions used to interface to the library interface of -+ * the daemon. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "nwcapi.h" -+#include "nwerror.h" -+#include "vfs.h" -+#include "commands.h" -+ -+#ifndef strlen_user -+#define strlen_user(str) strnlen_user(str, ~0UL >> 1) -+#endif -+ -+static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply); -+static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply); -+ -+/*++======================================================================*/ -+int novfs_open_conn_by_name(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwd_open_conn_by_name *openConn, *connReply; -+ struct nwc_open_conn_by_name ocbn; -+ int retCode = 0; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ char *data; -+ -+ cpylen = copy_from_user(&ocbn, pdata->reqData, sizeof(ocbn)); -+ datalen = sizeof(*openConn) + strlen_user(ocbn.pName->pString) + strlen_user(ocbn.pServiceType); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_OPEN_CONN_BY_NAME; -+ -+ cmd->dataLen = datalen; -+ openConn = (struct nwd_open_conn_by_name *) cmd->data; -+ -+ openConn->nameLen = strlen_user(ocbn.pName->pString); -+ openConn->serviceLen = strlen_user(ocbn.pServiceType); -+ openConn->uConnFlags = ocbn.uConnFlags; -+ openConn->ConnHandle = Uint32toHandle(ocbn.ConnHandle); -+ data = (char *)openConn; -+ data += sizeof(*openConn); -+ openConn->oName = sizeof(*openConn); -+ -+ openConn->oServiceType = openConn->oName + openConn->nameLen; -+ cpylen = -+ copy_from_user(data, ocbn.pName->pString, -+ openConn->nameLen); -+ data += openConn->nameLen; -+ cpylen = -+ copy_from_user(data, ocbn.pServiceType, -+ openConn->serviceLen); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ /* -+ * we got reply data from the daemon -+ */ -+ connReply = (struct nwd_open_conn_by_name *) reply->data; -+ retCode = reply->Reply.ErrorCode; -+ if (!retCode) { -+ /* -+ * we got valid data. -+ */ -+ connReply = (struct nwd_open_conn_by_name *) reply->data; -+ ocbn.RetConnHandle = HandletoUint32(connReply->newConnHandle); -+ *Handle = connReply->newConnHandle; -+ -+ cpylen = copy_to_user(pdata->reqData, &ocbn, sizeof(ocbn)); -+ DbgPrint("New Conn Handle = %X", connReply->newConnHandle); -+ } -+ kfree(reply); -+ } -+ -+ kfree(cmd); -+ return ((int)retCode); -+ -+} -+ -+int novfs_open_conn_by_addr(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwd_open_conn_by_addr *openConn, *connReply; -+ struct nwc_open_conn_by_addr ocba; -+ struct nwc_tran_addr tranAddr; -+ int retCode = 0; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ char addr[MAX_ADDRESS_LENGTH]; -+ -+ cpylen = copy_from_user(&ocba, pdata->reqData, sizeof(ocba)); -+ datalen = sizeof(*openConn); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_OPEN_CONN_BY_ADDRESS; -+ cmd->dataLen = datalen; -+ openConn = (struct nwd_open_conn_by_addr *) cmd->data; -+ -+ cpylen = -+ copy_from_user(&tranAddr, ocba.pTranAddr, sizeof(tranAddr)); -+ -+ DbgPrint("tranAddr"); -+ novfs_dump(sizeof(tranAddr), &tranAddr); -+ -+ openConn->TranAddr.uTransportType = tranAddr.uTransportType; -+ openConn->TranAddr.uAddressLength = tranAddr.uAddressLength; -+ memset(addr, 0xcc, sizeof(addr) - 1); -+ -+ cpylen = -+ copy_from_user(addr, tranAddr.puAddress, -+ tranAddr.uAddressLength); -+ -+ DbgPrint("addr"); -+ novfs_dump(sizeof(addr), addr); -+ -+ openConn->TranAddr.oAddress = *(unsigned int *) (&addr[2]); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ /* -+ * we got reply data from the daemon -+ */ -+ connReply = (struct nwd_open_conn_by_addr *) reply->data; -+ retCode = reply->Reply.ErrorCode; -+ if (!retCode) { -+ /* -+ * we got valid data. -+ */ -+ connReply = (struct nwd_open_conn_by_addr *) reply->data; -+ ocba.ConnHandle = -+ HandletoUint32(connReply->ConnHandle); -+ *Handle = connReply->ConnHandle; -+ cpylen = -+ copy_to_user(pdata->reqData, &ocba, -+ sizeof(ocba)); -+ DbgPrint("New Conn Handle = %X", connReply->ConnHandle); -+ } -+ kfree(reply); -+ } -+ -+ kfree(cmd); -+ -+ return (retCode); -+ -+} -+ -+int novfs_open_conn_by_ref(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwd_open_conn_by_ref *openConn; -+ struct nwc_open_conn_by_ref ocbr; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = copy_from_user(&ocbr, pdata->reqData, sizeof(ocbr)); -+ datalen = sizeof(*openConn); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_OPEN_CONN_BY_REFERENCE; -+ cmd->dataLen = datalen; -+ openConn = (struct nwd_open_conn_by_ref *) cmd->data; -+ -+ openConn->uConnReference = -+ (void *) (unsigned long) ocbr.uConnReference; -+ openConn->uConnFlags = ocbr.uConnFlags; -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ /* -+ * we got reply data from the daemon -+ */ -+ openConn = (struct nwd_open_conn_by_ref *) reply->data; -+ retCode = reply->Reply.ErrorCode; -+ if (!retCode) { -+ /* -+ * we got valid data. -+ */ -+ ocbr.ConnHandle = -+ HandletoUint32(openConn->ConnHandle); -+ *Handle = openConn->ConnHandle; -+ -+ cpylen = -+ copy_to_user(pdata->reqData, &ocbr, -+ sizeof(ocbr)); -+ DbgPrint("New Conn Handle = %X", openConn->ConnHandle); -+ } -+ kfree(reply); -+ } -+ -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_raw_send(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct nwc_request xRequest; -+ struct nwc_frag *frag, *cFrag, *reqFrag; -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen, totalLen; -+ unsigned int x; -+ struct nwd_ncp_req *ncpData; -+ struct nwd_ncp_rep *ncpReply; -+ unsigned char *reqData; -+ unsigned long actualReplyLength = 0; -+ -+ DbgPrint("[XPLAT] Process Raw NCP Send"); -+ cpylen = copy_from_user(&xRequest, pdata->reqData, sizeof(xRequest)); -+ -+ /* -+ * Figure out the length of the request -+ */ -+ frag = -+ kmalloc(xRequest.uNumReplyFrags * sizeof(struct nwc_frag), GFP_KERNEL); -+ -+ DbgPrint("[XPLAT RawNCP] - Reply Frag Count 0x%X", -+ xRequest.uNumReplyFrags); -+ -+ if (!frag) -+ return (retCode); -+ -+ cpylen = -+ copy_from_user(frag, xRequest.pReplyFrags, -+ xRequest.uNumReplyFrags * sizeof(struct nwc_frag)); -+ totalLen = 0; -+ -+ cFrag = frag; -+ for (x = 0; x < xRequest.uNumReplyFrags; x++) { -+ DbgPrint("[XPLAT - RawNCP] - Frag Len = %d", cFrag->uLength); -+ totalLen += cFrag->uLength; -+ cFrag++; -+ } -+ -+ DbgPrint("[XPLAT - RawNCP] - totalLen = %d", totalLen); -+ datalen = 0; -+ reqFrag = -+ kmalloc(xRequest.uNumRequestFrags * sizeof(struct nwc_frag), -+ GFP_KERNEL); -+ if (!reqFrag) { -+ kfree(frag); -+ return (retCode); -+ } -+ -+ cpylen = -+ copy_from_user(reqFrag, xRequest.pRequestFrags, -+ xRequest.uNumRequestFrags * sizeof(struct nwc_frag)); -+ cFrag = reqFrag; -+ for (x = 0; x < xRequest.uNumRequestFrags; x++) { -+ datalen += cFrag->uLength; -+ cFrag++; -+ } -+ -+ /* -+ * Allocate the cmd Request -+ */ -+ cmdlen = datalen + sizeof(*cmd) + sizeof(*ncpData); -+ DbgPrint("[XPLAT RawNCP] - Frag Count 0x%X", -+ xRequest.uNumRequestFrags); -+ DbgPrint("[XPLAT RawNCP] - Total Command Data Len = %x", cmdlen); -+ -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_RAW_NCP_REQUEST; -+ -+ /* -+ * build the NCP Request -+ */ -+ cmd->dataLen = cmdlen - sizeof(*cmd); -+ ncpData = (struct nwd_ncp_req *) cmd->data; -+ ncpData->replyLen = totalLen; -+ ncpData->requestLen = datalen; -+ ncpData->ConnHandle = (void *) (unsigned long) xRequest.ConnHandle; -+ ncpData->function = xRequest.uFunction; -+ -+ reqData = ncpData->data; -+ cFrag = reqFrag; -+ -+ for (x = 0; x < xRequest.uNumRequestFrags; x++) { -+ cpylen = -+ copy_from_user(reqData, cFrag->pData, -+ cFrag->uLength); -+ reqData += cFrag->uLength; -+ cFrag++; -+ } -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ DbgPrint("RawNCP - reply = %x", reply); -+ DbgPrint("RawNCP - retCode = %x", retCode); -+ -+ if (reply) { -+ /* -+ * we got reply data from the daemon -+ */ -+ ncpReply = (struct nwd_ncp_rep *) reply->data; -+ retCode = reply->Reply.ErrorCode; -+ -+ DbgPrint("RawNCP - Reply Frag Count 0x%X", -+ xRequest.uNumReplyFrags); -+ -+ /* -+ * We need to copy the reply frags to the packet. -+ */ -+ reqData = ncpReply->data; -+ cFrag = frag; -+ -+ totalLen = ncpReply->replyLen; -+ for (x = 0; x < xRequest.uNumReplyFrags; x++) { -+ -+ DbgPrint("RawNCP - Copy Frag %d: 0x%X", x, -+ cFrag->uLength); -+ -+ datalen = -+ min((unsigned long) cFrag->uLength, totalLen); -+ -+ cpylen = -+ copy_to_user(cFrag->pData, reqData, -+ datalen); -+ totalLen -= datalen; -+ reqData += datalen; -+ actualReplyLength += datalen; -+ -+ cFrag++; -+ } -+ -+ kfree(reply); -+ } else { -+ retCode = -EIO; -+ } -+ -+ kfree(cmd); -+ xRequest.uActualReplyLength = actualReplyLength; -+ cpylen = copy_to_user(pdata->reqData, &xRequest, sizeof(xRequest)); -+ -+ kfree(reqFrag); -+ kfree(frag); -+ -+ return (retCode); -+} -+ -+int novfs_conn_close(struct novfs_xplat *pdata, void ** Handle, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_close_conn cc; -+ struct nwd_close_conn *nwdClose; -+ int retCode = 0; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = copy_from_user(&cc, pdata->reqData, sizeof(cc)); -+ -+ datalen = sizeof(*nwdClose); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_CLOSE_CONN; -+ -+ nwdClose = (struct nwd_close_conn *) cmd->data; -+ cmd->dataLen = sizeof(*nwdClose); -+ *Handle = nwdClose->ConnHandle = Uint32toHandle(cc.ConnHandle); -+ -+ /* -+ * send the request -+ */ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, 0); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_sys_conn_close(struct novfs_xplat *pdata, unsigned long *Handle, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_close_conn cc; -+ struct nwd_close_conn *nwdClose; -+ unsigned int retCode = 0; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = copy_from_user(&cc, pdata->reqData, sizeof(cc)); -+ -+ datalen = sizeof(*nwdClose); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_SYS_CLOSE_CONN; -+ -+ nwdClose = (struct nwd_close_conn *) cmd->data; -+ cmd->dataLen = sizeof(*nwdClose); -+ nwdClose->ConnHandle = (void *) (unsigned long) cc.ConnHandle; -+ *Handle = (unsigned long) cc.ConnHandle; -+ -+ /* -+ * send the request -+ */ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, (void **)&reply, &replylen, 0); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_login_id(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct nwc_login_id lgn, *plgn; -+ int retCode = -ENOMEM; -+ struct ncl_string server; -+ struct ncl_string username; -+ struct ncl_string password; -+ unsigned long cpylen; -+ struct nwc_string nwcStr; -+ -+ cpylen = copy_from_user(&lgn, pdata->reqData, sizeof(lgn)); -+ -+ DbgPrint(""); -+ novfs_dump(sizeof(lgn), &lgn); -+ -+ cpylen = copy_from_user(&nwcStr, lgn.pDomainName, sizeof(nwcStr)); -+ DbgPrint("DomainName\n"); -+ novfs_dump(sizeof(nwcStr), &nwcStr); -+ -+ if ((server.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { -+ server.type = nwcStr.DataType; -+ server.len = nwcStr.DataLen; -+ if (!copy_from_user((void *)server.buffer, nwcStr.pBuffer, server.len)) { -+ DbgPrint("Server"); -+ novfs_dump(server.len, server.buffer); -+ -+ cpylen = copy_from_user(&nwcStr, lgn.pObjectName, sizeof(nwcStr)); -+ DbgPrint("ObjectName"); -+ novfs_dump(sizeof(nwcStr), &nwcStr); -+ -+ if ((username.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { -+ username.type = nwcStr.DataType; -+ username.len = nwcStr.DataLen; -+ if (!copy_from_user((void *)username.buffer, nwcStr.pBuffer, username.len)) { -+ DbgPrint("User"); -+ novfs_dump(username.len, username.buffer); -+ -+ cpylen = copy_from_user(&nwcStr, lgn.pPassword, sizeof(nwcStr)); -+ DbgPrint("Password"); -+ novfs_dump(sizeof(nwcStr), &nwcStr); -+ -+ if ((password.buffer = kmalloc(nwcStr.DataLen, GFP_KERNEL))) { -+ password.type = nwcStr.DataType; -+ password.len = nwcStr.DataLen; -+ if (!copy_from_user((void *)password.buffer, nwcStr.pBuffer, password.len)) { -+ retCode = novfs_do_login(&server, &username, &password, (void **)&lgn.AuthenticationId, &Session); -+ if (retCode) { -+ lgn.AuthenticationId = 0; -+ } -+ -+ plgn = (struct nwc_login_id *)pdata->reqData; -+ cpylen = copy_to_user(&plgn->AuthenticationId, &lgn.AuthenticationId, sizeof(plgn->AuthenticationId)); -+ } -+ memset(password.buffer, 0, password.len); -+ kfree(password.buffer); -+ } -+ } -+ memset(username.buffer, 0, username.len); -+ kfree(username.buffer); -+ } -+ } -+ kfree(server.buffer); -+ } -+ return (retCode); -+} -+ -+int novfs_auth_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct nwc_auth_with_id pauth; -+ struct nwc_auth_wid *pDauth; -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ datalen = sizeof(*pDauth); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_AUTHENTICATE_CONN_WITH_ID; -+ -+ cpylen = copy_from_user(&pauth, pdata->reqData, sizeof(pauth)); -+ -+ pDauth = (struct nwc_auth_wid *) cmd->data; -+ cmd->dataLen = datalen; -+ pDauth->AuthenticationId = pauth.AuthenticationId; -+ pDauth->ConnHandle = (void *) (unsigned long) pauth.ConnHandle; -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ return (retCode); -+} -+ -+int novfs_license_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_license_conn lisc; -+ struct nwc_lisc_id * pDLisc; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ datalen = sizeof(*pDLisc); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_LICENSE_CONN; -+ -+ cpylen = copy_from_user(&lisc, pdata->reqData, sizeof(lisc)); -+ -+ pDLisc = (struct nwc_lisc_id *) cmd->data; -+ cmd->dataLen = datalen; -+ pDLisc->ConnHandle = (void *) (unsigned long) lisc.ConnHandle; -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+} -+ -+int novfs_logout_id(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_lo_id logout, *pDLogout; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ datalen = sizeof(*pDLogout); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_LOGOUT_IDENTITY; -+ -+ cpylen = -+ copy_from_user(&logout, pdata->reqData, sizeof(logout)); -+ -+ pDLogout = (struct nwc_lo_id *) cmd->data; -+ cmd->dataLen = datalen; -+ pDLogout->AuthenticationId = logout.AuthenticationId; -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+} -+ -+int novfs_unlicense_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_unlic_conn *pUconn, ulc; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = copy_from_user(&ulc, pdata->reqData, sizeof(ulc)); -+ datalen = sizeof(*pUconn); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_UNLICENSE_CONN; -+ cmd->dataLen = datalen; -+ pUconn = (struct nwc_unlic_conn *) cmd->data; -+ -+ pUconn->ConnHandle = (void *) (unsigned long) ulc.ConnHandle; -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ /* -+ * we got reply data from the daemon -+ */ -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ return (retCode); -+ -+} -+ -+int novfs_unauthenticate(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_unauthenticate auth, *pDAuth; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ datalen = sizeof(*pDAuth); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = (struct novfs_xplat_call_request *)kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_UNAUTHENTICATE_CONN; -+ -+ cpylen = copy_from_user(&auth, pdata->reqData, sizeof(auth)); -+ -+ pDAuth = (struct nwc_unauthenticate *) cmd->data; -+ cmd->dataLen = datalen; -+ pDAuth->AuthenticationId = auth.AuthenticationId; -+ pDAuth->ConnHandle = (void *) (unsigned long) auth.ConnHandle; -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_get_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_get_conn_info connInfo; -+ struct nwd_conn_info *pDConnInfo; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, replylen, cpylen; -+ -+ cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ cpylen = -+ copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_get_conn_info)); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_GET_CONN_INFO; -+ -+ pDConnInfo = (struct nwd_conn_info *) cmd->data; -+ -+ pDConnInfo->ConnHandle = (void *) (unsigned long) connInfo.ConnHandle; -+ pDConnInfo->uInfoLevel = connInfo.uInfoLevel; -+ pDConnInfo->uInfoLength = connInfo.uInfoLength; -+ cmd->dataLen = sizeof(*pDConnInfo); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ if (!retCode) { -+ GetConnData(&connInfo, cmd, reply); -+ } -+ -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_set_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_set_conn_info connInfo; -+ struct nwd_set_conn_info *pDConnInfo; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, replylen, cpylen; -+ -+ cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ cpylen = -+ copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_set_conn_info)); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_SET_CONN_INFO; -+ -+ pDConnInfo = (struct nwd_set_conn_info *) cmd->data; -+ -+ pDConnInfo->ConnHandle = (void *) (unsigned long) connInfo.ConnHandle; -+ pDConnInfo->uInfoLevel = connInfo.uInfoLevel; -+ pDConnInfo->uInfoLength = connInfo.uInfoLength; -+ cmd->dataLen = sizeof(*pDConnInfo); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_get_id_info(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_get_id_info qidInfo, *gId; -+ struct nwd_get_id_info *idInfo; -+ struct nwc_string xferStr; -+ char *str; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, replylen, cpylen; -+ -+ cmdlen = sizeof(*cmd) + sizeof(*idInfo); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ cpylen = copy_from_user(&qidInfo, pdata->reqData, sizeof(qidInfo)); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_GET_IDENTITY_INFO; -+ -+ idInfo = (struct nwd_get_id_info *) cmd->data; -+ -+ idInfo->AuthenticationId = qidInfo.AuthenticationId; -+ cmd->dataLen = sizeof(*idInfo); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ -+ if (!reply->Reply.ErrorCode) { -+ /* -+ * Save the return info to the user structure. -+ */ -+ gId = pdata->reqData; -+ idInfo = (struct nwd_get_id_info *) reply->data; -+ cpylen = -+ copy_to_user(&gId->AuthenticationId, -+ &idInfo->AuthenticationId, -+ sizeof(idInfo-> -+ AuthenticationId)); -+ cpylen = -+ copy_to_user(&gId->AuthType, -+ &idInfo->AuthType, -+ sizeof(idInfo->AuthType)); -+ cpylen = -+ copy_to_user(&gId->IdentityFlags, -+ &idInfo->IdentityFlags, -+ sizeof(idInfo->IdentityFlags)); -+ cpylen = -+ copy_to_user(&gId->NameType, -+ &idInfo->NameType, -+ sizeof(idInfo->NameType)); -+ cpylen = -+ copy_to_user(&gId->ObjectType, -+ &idInfo->ObjectType, -+ sizeof(idInfo->ObjectType)); -+ -+ cpylen = -+ copy_from_user(&xferStr, gId->pDomainName, -+ sizeof(struct nwc_string)); -+ str = -+ (char *)((char *)reply->data + -+ idInfo->pDomainNameOffset); -+ cpylen = -+ copy_to_user(xferStr.pBuffer, str, -+ idInfo->domainLen); -+ xferStr.DataType = NWC_STRING_TYPE_ASCII; -+ xferStr.DataLen = idInfo->domainLen; -+ cpylen = -+ copy_to_user(gId->pDomainName, &xferStr, -+ sizeof(struct nwc_string)); -+ -+ cpylen = -+ copy_from_user(&xferStr, gId->pObjectName, -+ sizeof(struct nwc_string)); -+ str = -+ (char *)((char *)reply->data + -+ idInfo->pObjectNameOffset); -+ cpylen = -+ copy_to_user(xferStr.pBuffer, str, -+ idInfo->objectLen); -+ xferStr.DataLen = idInfo->objectLen - 1; -+ xferStr.DataType = NWC_STRING_TYPE_ASCII; -+ cpylen = -+ copy_to_user(gId->pObjectName, &xferStr, -+ sizeof(struct nwc_string)); -+ } -+ -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+} -+ -+int novfs_scan_conn_info(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_scan_conn_info connInfo, *rInfo; -+ struct nwd_scan_conn_info *pDConnInfo; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, replylen, cpylen; -+ unsigned char *localData; -+ -+ cpylen = -+ copy_from_user(&connInfo, pdata->reqData, sizeof(struct nwc_scan_conn_info)); -+ -+ cmdlen = sizeof(*cmd) + sizeof(*pDConnInfo) + connInfo.uScanInfoLen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_SCAN_CONN_INFO; -+ -+ pDConnInfo = (struct nwd_scan_conn_info *) cmd->data; -+ -+ DbgPrint("Input Data"); -+ __DbgPrint(" connInfo.uScanIndex = 0x%X\n", connInfo.uScanIndex); -+ __DbgPrint(" connInfo.uConnectionReference = 0x%X\n", -+ connInfo.uConnectionReference); -+ __DbgPrint(" connInfo.uScanInfoLevel = 0x%X\n", -+ connInfo.uScanInfoLevel); -+ __DbgPrint(" connInfo.uScanInfoLen = 0x%X\n", -+ connInfo.uScanInfoLen); -+ __DbgPrint(" connInfo.uReturnInfoLength = 0x%X\n", -+ connInfo.uReturnInfoLength); -+ __DbgPrint(" connInfo.uReturnInfoLevel = 0x%X\n", -+ connInfo.uReturnInfoLevel); -+ __DbgPrint(" connInfo.uScanFlags = 0x%X\n", connInfo.uScanFlags); -+ -+ pDConnInfo->uScanIndex = connInfo.uScanIndex; -+ pDConnInfo->uConnectionReference = -+ connInfo.uConnectionReference; -+ pDConnInfo->uScanInfoLevel = connInfo.uScanInfoLevel; -+ pDConnInfo->uScanInfoLen = connInfo.uScanInfoLen; -+ pDConnInfo->uReturnInfoLength = connInfo.uReturnInfoLength; -+ pDConnInfo->uReturnInfoLevel = connInfo.uReturnInfoLevel; -+ pDConnInfo->uScanFlags = connInfo.uScanFlags; -+ -+ if (pDConnInfo->uScanInfoLen) { -+ localData = (unsigned char *) pDConnInfo; -+ pDConnInfo->uScanConnInfoOffset = sizeof(*pDConnInfo); -+ localData += pDConnInfo->uScanConnInfoOffset; -+ cpylen = -+ copy_from_user(localData, connInfo.pScanConnInfo, -+ connInfo.uScanInfoLen); -+ } else { -+ pDConnInfo->uScanConnInfoOffset = 0; -+ } -+ -+ cmd->dataLen = sizeof(*pDConnInfo); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ DbgPrint("Reply recieved"); -+ __DbgPrint(" NextIndex = %x\n", connInfo.uScanIndex); -+ __DbgPrint(" ErrorCode = %x\n", reply->Reply.ErrorCode); -+ __DbgPrint(" data = %p\n", reply->data); -+ -+ pDConnInfo = (struct nwd_scan_conn_info *) reply->data; -+ retCode = (unsigned long) reply->Reply.ErrorCode; -+ if (!retCode) { -+ GetUserData(&connInfo, cmd, reply); -+ rInfo = (struct nwc_scan_conn_info *) pdata->repData; -+ cpylen = -+ copy_to_user(pdata->repData, -+ &pDConnInfo->uScanIndex, -+ sizeof(pDConnInfo-> -+ uScanIndex)); -+ cpylen = -+ copy_to_user(&rInfo->uConnectionReference, -+ &pDConnInfo-> -+ uConnectionReference, -+ sizeof(pDConnInfo-> -+ uConnectionReference)); -+ } else { -+ unsigned long x; -+ -+ x = 0; -+ rInfo = (struct nwc_scan_conn_info *) pdata->reqData; -+ cpylen = -+ copy_to_user(&rInfo->uConnectionReference, -+ &x, -+ sizeof(rInfo-> -+ uConnectionReference)); -+ } -+ -+ kfree(reply); -+ } else { -+ retCode = -EIO; -+ } -+ kfree(cmd); -+ return (retCode); -+} -+ -+/* -+ * Copies the user data out of the scan conn info call. -+ */ -+static void GetUserData(struct nwc_scan_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) -+{ -+ unsigned long uLevel; -+ struct nwd_scan_conn_info *pDConnInfo; -+ -+ unsigned char *srcData = NULL; -+ unsigned long dataLen = 0, cpylen; -+ -+ pDConnInfo = (struct nwd_scan_conn_info *) reply->data; -+ uLevel = pDConnInfo->uReturnInfoLevel; -+ DbgPrint("uLevel = %d, reply = 0x%p, reply->data = 0x%X", -+ uLevel, reply, reply->data); -+ -+ switch (uLevel) { -+ case NWC_CONN_INFO_RETURN_ALL: -+ case NWC_CONN_INFO_NDS_STATE: -+ case NWC_CONN_INFO_MAX_PACKET_SIZE: -+ case NWC_CONN_INFO_LICENSE_STATE: -+ case NWC_CONN_INFO_PUBLIC_STATE: -+ case NWC_CONN_INFO_SERVICE_TYPE: -+ case NWC_CONN_INFO_DISTANCE: -+ case NWC_CONN_INFO_SERVER_VERSION: -+ case NWC_CONN_INFO_AUTH_ID: -+ case NWC_CONN_INFO_SUSPENDED: -+ case NWC_CONN_INFO_WORKGROUP_ID: -+ case NWC_CONN_INFO_SECURITY_STATE: -+ case NWC_CONN_INFO_CONN_NUMBER: -+ case NWC_CONN_INFO_USER_ID: -+ case NWC_CONN_INFO_BCAST_STATE: -+ case NWC_CONN_INFO_CONN_REF: -+ case NWC_CONN_INFO_AUTH_STATE: -+ case NWC_CONN_INFO_TREE_NAME: -+ case NWC_CONN_INFO_SERVER_NAME: -+ case NWC_CONN_INFO_VERSION: -+ srcData = (unsigned char *) pDConnInfo; -+ srcData += pDConnInfo->uReturnConnInfoOffset; -+ dataLen = pDConnInfo->uReturnInfoLength; -+ break; -+ -+ case NWC_CONN_INFO_TRAN_ADDR: -+ { -+ unsigned char *dstData = connInfo->pReturnConnInfo; -+ struct nwc_tran_addr tranAddr; -+ -+ srcData = (unsigned char *) reply->data; -+ dataLen = reply->dataLen; -+ -+ DbgPrint("NWC_CONN_INFO_TRAN_ADDR 0x%p -> 0x%p :: 0x%X", -+ srcData, connInfo->pReturnConnInfo, dataLen); -+ -+ cpylen = -+ copy_from_user(&tranAddr, dstData, -+ sizeof(tranAddr)); -+ -+ srcData += -+ ((struct nwd_scan_conn_info *) srcData)-> -+ uReturnConnInfoOffset; -+ -+ tranAddr.uTransportType = -+ ((struct nwd_tran_addr *) srcData)->uTransportType; -+ tranAddr.uAddressLength = -+ ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; -+ -+ cpylen = -+ copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); -+ cpylen = -+ copy_to_user(tranAddr.puAddress, -+ ((struct tagNwdTranAddrEx *) srcData)->Buffer, -+ ((struct tagNwdTranAddrEx *) srcData)-> -+ uAddressLength); -+ dataLen = 0; -+ break; -+ } -+ case NWC_CONN_INFO_RETURN_NONE: -+ case NWC_CONN_INFO_TREE_NAME_UNICODE: -+ case NWC_CONN_INFO_SERVER_NAME_UNICODE: -+ case NWC_CONN_INFO_LOCAL_TRAN_ADDR: -+ case NWC_CONN_INFO_ALTERNATE_ADDR: -+ case NWC_CONN_INFO_SERVER_GUID: -+ default: -+ break; -+ } -+ -+ if (srcData && dataLen) { -+ DbgPrint("Copy Data 0x%p -> 0x%p :: 0x%X", -+ srcData, connInfo->pReturnConnInfo, dataLen); -+ cpylen = -+ copy_to_user(connInfo->pReturnConnInfo, srcData, dataLen); -+ } -+ -+ return; -+} -+ -+/* -+ * Copies the user data out of the scan conn info call. -+ */ -+static void GetConnData(struct nwc_get_conn_info * connInfo, struct novfs_xplat_call_request *cmd, struct novfs_xplat_call_reply *reply) -+{ -+ unsigned long uLevel; -+ struct nwd_conn_info * pDConnInfo; -+ -+ unsigned char *srcData = NULL; -+ unsigned long dataLen = 0, cpylen; -+ -+ pDConnInfo = (struct nwd_conn_info *) cmd->data; -+ uLevel = pDConnInfo->uInfoLevel; -+ -+ switch (uLevel) { -+ case NWC_CONN_INFO_RETURN_ALL: -+ srcData = (unsigned char *) reply->data; -+ dataLen = reply->dataLen; -+ break; -+ -+ case NWC_CONN_INFO_RETURN_NONE: -+ dataLen = 0; -+ break; -+ -+ case NWC_CONN_INFO_TRAN_ADDR: -+ { -+ unsigned char *dstData = connInfo->pConnInfo; -+ struct nwc_tran_addr tranAddr; -+ -+ srcData = (unsigned char *) reply->data; -+ -+ cpylen = -+ copy_from_user(&tranAddr, dstData, -+ sizeof(tranAddr)); -+ tranAddr.uTransportType = -+ ((struct tagNwdTranAddrEx *) srcData)->uTransportType; -+ tranAddr.uAddressLength = -+ ((struct tagNwdTranAddrEx *) srcData)->uAddressLength; -+ -+ cpylen = -+ copy_to_user(dstData, &tranAddr, sizeof(tranAddr)); -+ cpylen = -+ copy_to_user(tranAddr.puAddress, -+ ((struct tagNwdTranAddrEx *) srcData)->Buffer, -+ ((struct tagNwdTranAddrEx *) srcData)-> -+ uAddressLength); -+ dataLen = 0; -+ break; -+ } -+ case NWC_CONN_INFO_NDS_STATE: -+ case NWC_CONN_INFO_MAX_PACKET_SIZE: -+ case NWC_CONN_INFO_LICENSE_STATE: -+ case NWC_CONN_INFO_PUBLIC_STATE: -+ case NWC_CONN_INFO_SERVICE_TYPE: -+ case NWC_CONN_INFO_DISTANCE: -+ case NWC_CONN_INFO_SERVER_VERSION: -+ case NWC_CONN_INFO_AUTH_ID: -+ case NWC_CONN_INFO_SUSPENDED: -+ case NWC_CONN_INFO_WORKGROUP_ID: -+ case NWC_CONN_INFO_SECURITY_STATE: -+ case NWC_CONN_INFO_CONN_NUMBER: -+ case NWC_CONN_INFO_USER_ID: -+ case NWC_CONN_INFO_BCAST_STATE: -+ case NWC_CONN_INFO_CONN_REF: -+ case NWC_CONN_INFO_AUTH_STATE: -+ case NWC_CONN_INFO_VERSION: -+ case NWC_CONN_INFO_SERVER_NAME: -+ case NWC_CONN_INFO_TREE_NAME: -+ srcData = (unsigned char *) reply->data; -+ dataLen = reply->dataLen; -+ break; -+ -+ case NWC_CONN_INFO_TREE_NAME_UNICODE: -+ case NWC_CONN_INFO_SERVER_NAME_UNICODE: -+ break; -+ -+ case NWC_CONN_INFO_LOCAL_TRAN_ADDR: -+ break; -+ -+ case NWC_CONN_INFO_ALTERNATE_ADDR: -+ break; -+ -+ case NWC_CONN_INFO_SERVER_GUID: -+ break; -+ -+ default: -+ break; -+ } -+ -+ if (srcData && dataLen) { -+ cpylen = -+ copy_to_user(connInfo->pConnInfo, srcData, -+ connInfo->uInfoLength); -+ } -+ -+ return; -+} -+ -+int novfs_get_daemon_ver(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwd_get_reqversion *pDVersion; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ -+ datalen = sizeof(*pDVersion); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_GET_REQUESTER_VERSION; -+ cmdlen = sizeof(*cmd); -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ pDVersion = (struct nwd_get_reqversion *) reply->data; -+ cpylen = -+ copy_to_user(pDVersion, pdata->reqData, -+ sizeof(*pDVersion)); -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwd_get_pref_ds_tree *pDGetTree; -+ struct nwc_get_pref_ds_tree xplatCall, *p; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ unsigned char *dPtr; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, -+ sizeof(struct nwc_get_pref_ds_tree)); -+ datalen = sizeof(*pDGetTree) + xplatCall.uTreeLength; -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_GET_PREFERRED_DS_TREE; -+ cmdlen = sizeof(*cmd); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ if (!retCode) { -+ pDGetTree = -+ (struct nwd_get_pref_ds_tree *) reply->data; -+ dPtr = -+ reply->data + pDGetTree->DsTreeNameOffset; -+ p = (struct nwc_get_pref_ds_tree *) pdata->reqData; -+ -+ DbgPrint("Reply recieved"); -+ __DbgPrint(" TreeLen = %x\n", -+ pDGetTree->uTreeLength); -+ __DbgPrint(" TreeName = %s\n", dPtr); -+ -+ cpylen = -+ copy_to_user(p, &pDGetTree->uTreeLength, 4); -+ cpylen = -+ copy_to_user(xplatCall.pDsTreeName, dPtr, -+ pDGetTree->uTreeLength); -+ } -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwd_set_pref_ds_tree *pDSetTree; -+ struct nwc_set_pref_ds_tree xplatCall; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ unsigned char *dPtr; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, -+ sizeof(struct nwc_set_pref_ds_tree)); -+ datalen = sizeof(*pDSetTree) + xplatCall.uTreeLength; -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_SET_PREFERRED_DS_TREE; -+ -+ pDSetTree = (struct nwd_set_pref_ds_tree *) cmd->data; -+ pDSetTree->DsTreeNameOffset = sizeof(*pDSetTree); -+ pDSetTree->uTreeLength = xplatCall.uTreeLength; -+ -+ dPtr = cmd->data + sizeof(*pDSetTree); -+ cpylen = -+ copy_from_user(dPtr, xplatCall.pDsTreeName, -+ xplatCall.uTreeLength); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_set_default_ctx(struct novfs_xplat *pdata, -+ struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_set_def_name_ctx xplatCall; -+ struct nwd_set_def_name_ctx * pDSet; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, datalen, replylen, cpylen; -+ unsigned char *dPtr; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, -+ sizeof(struct nwc_set_def_name_ctx)); -+ datalen = -+ sizeof(*pDSet) + xplatCall.uTreeLength + xplatCall.uNameLength; -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_SET_DEFAULT_NAME_CONTEXT; -+ cmd->dataLen = -+ sizeof(struct nwd_set_def_name_ctx) + -+ xplatCall.uTreeLength + xplatCall.uNameLength; -+ -+ pDSet = (struct nwd_set_def_name_ctx *) cmd->data; -+ dPtr = cmd->data; -+ -+ pDSet->TreeOffset = sizeof(struct nwd_set_def_name_ctx); -+ pDSet->uTreeLength = xplatCall.uTreeLength; -+ pDSet->NameContextOffset = -+ pDSet->TreeOffset + xplatCall.uTreeLength; -+ pDSet->uNameLength = xplatCall.uNameLength; -+ -+ //sgled cpylen = copy_from_user(dPtr+pDSet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); -+ cpylen = copy_from_user(dPtr + pDSet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled -+ cpylen = -+ copy_from_user(dPtr + pDSet->NameContextOffset, -+ xplatCall.pNameContext, -+ xplatCall.uNameLength); -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_get_default_ctx(struct novfs_xplat *pdata, -+ struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_get_def_name_ctx xplatCall; -+ struct nwd_get_def_name_ctx * pGet; -+ char *dPtr; -+ int retCode = -ENOMEM; -+ unsigned long cmdlen, replylen, cpylen; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, -+ sizeof(struct nwc_get_def_name_ctx)); -+ cmdlen = -+ sizeof(*cmd) + sizeof(struct nwd_get_def_name_ctx ) + -+ xplatCall.uTreeLength; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_GET_DEFAULT_NAME_CONTEXT; -+ cmd->dataLen = -+ sizeof(struct nwd_get_def_name_ctx) + xplatCall.uTreeLength; -+ -+ pGet = (struct nwd_get_def_name_ctx *) cmd->data; -+ dPtr = cmd->data; -+ -+ pGet->TreeOffset = sizeof(struct nwd_get_def_name_ctx ); -+ pGet->uTreeLength = xplatCall.uTreeLength; -+ -+ //sgled cpylen = copy_from_user( dPtr + pGet->TreeOffset, xplatCall.pTreeName, xplatCall.uTreeLength); -+ cpylen = copy_from_user(dPtr + pGet->TreeOffset, xplatCall.pDsTreeName, xplatCall.uTreeLength); //sgled -+ dPtr[pGet->TreeOffset + pGet->uTreeLength] = 0; -+ -+ retCode = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ retCode = reply->Reply.ErrorCode; -+ if (!retCode) { -+ pGet = (struct nwd_get_def_name_ctx *) reply->data; -+ -+ DbgPrint("retCode=0x%x uNameLength1=%d uNameLength2=%d", -+ retCode, pGet->uNameLength, -+ xplatCall.uNameLength); -+ if (xplatCall.uNameLength < pGet->uNameLength) { -+ pGet->uNameLength = -+ xplatCall.uNameLength; -+ retCode = NWE_BUFFER_OVERFLOW; -+ } -+ dPtr = (char *)pGet + pGet->NameContextOffset; -+ cpylen = -+ copy_to_user(xplatCall.pNameContext, dPtr, -+ pGet->uNameLength); -+ } -+ -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (retCode); -+ -+} -+ -+int novfs_query_feature(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct nwc_query_feature xpCall; -+ int status = 0; -+ unsigned long cpylen; -+ -+ cpylen = -+ copy_from_user(&xpCall, pdata->reqData, sizeof(struct nwc_query_feature)); -+ switch (xpCall.Feature) { -+ case NWC_FEAT_NDS: -+ case NWC_FEAT_NDS_MTREE: -+ case NWC_FEAT_PRN_CAPTURE: -+ case NWC_FEAT_NDS_RESOLVE: -+ -+ status = NWE_REQUESTER_FAILURE; -+ -+ } -+ return (status); -+} -+ -+int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, -+ struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_get_tree_monitored_conn_ref xplatCall, *p; -+ struct nwd_get_tree_monitored_conn_ref *pDConnRef; -+ char *dPtr; -+ unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, -+ sizeof(struct nwc_get_tree_monitored_conn_ref)); -+ datalen = sizeof(*pDConnRef) + xplatCall.pTreeName->DataLen; -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_GET_TREE_MONITORED_CONN_REF; -+ -+ pDConnRef = (struct nwd_get_tree_monitored_conn_ref *) cmd->data; -+ pDConnRef->TreeName.boffset = sizeof(*pDConnRef); -+ pDConnRef->TreeName.len = xplatCall.pTreeName->DataLen; -+ pDConnRef->TreeName.type = xplatCall.pTreeName->DataType; -+ -+ dPtr = cmd->data + sizeof(*pDConnRef); -+ cpylen = -+ copy_from_user(dPtr, xplatCall.pTreeName->pBuffer, -+ pDConnRef->TreeName.len); -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ pDConnRef = (struct nwd_get_tree_monitored_conn_ref *) reply->data; -+ dPtr = reply->data + pDConnRef->TreeName.boffset; -+ p = (struct nwc_get_tree_monitored_conn_ref *) pdata->reqData; -+ cpylen = -+ copy_to_user(&p->uConnReference, -+ &pDConnRef->uConnReference, 4); -+ -+ status = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_enum_ids(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_enum_ids xplatCall, *eId; -+ struct nwd_enum_ids *pEnum; -+ struct nwc_string xferStr; -+ char *str; -+ unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, -+ sizeof(struct nwc_enum_ids)); -+ datalen = sizeof(*pEnum); -+ cmdlen = datalen + sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_ENUMERATE_IDENTITIES; -+ -+ DbgPrint("Send Request"); -+ __DbgPrint(" iterator = %x\n", xplatCall.Iterator); -+ __DbgPrint(" cmdlen = %d\n", cmdlen); -+ -+ pEnum = (struct nwd_enum_ids *) cmd->data; -+ pEnum->Iterator = xplatCall.Iterator; -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ -+ eId = pdata->repData; -+ pEnum = (struct nwd_enum_ids *) reply->data; -+ cpylen = -+ copy_to_user(&eId->Iterator, &pEnum->Iterator, -+ sizeof(pEnum->Iterator)); -+ DbgPrint("[XPLAT NWCAPI] Found AuthId 0x%X", -+ pEnum->AuthenticationId); -+ cpylen = -+ copy_to_user(&eId->AuthenticationId, -+ &pEnum->AuthenticationId, -+ sizeof(pEnum->AuthenticationId)); -+ cpylen = -+ copy_to_user(&eId->AuthType, &pEnum->AuthType, -+ sizeof(pEnum->AuthType)); -+ cpylen = -+ copy_to_user(&eId->IdentityFlags, -+ &pEnum->IdentityFlags, -+ sizeof(pEnum->IdentityFlags)); -+ cpylen = -+ copy_to_user(&eId->NameType, &pEnum->NameType, -+ sizeof(pEnum->NameType)); -+ cpylen = -+ copy_to_user(&eId->ObjectType, &pEnum->ObjectType, -+ sizeof(pEnum->ObjectType)); -+ -+ if (!status) { -+ cpylen = -+ copy_from_user(&xferStr, eId->pDomainName, -+ sizeof(struct nwc_string)); -+ str = -+ (char *)((char *)reply->data + -+ pEnum->domainNameOffset); -+ DbgPrint("[XPLAT NWCAPI] Found Domain %s", -+ str); -+ cpylen = -+ copy_to_user(xferStr.pBuffer, str, -+ pEnum->domainNameLen); -+ xferStr.DataType = NWC_STRING_TYPE_ASCII; -+ xferStr.DataLen = pEnum->domainNameLen - 1; -+ cpylen = -+ copy_to_user(eId->pDomainName, &xferStr, -+ sizeof(struct nwc_string)); -+ -+ cpylen = -+ copy_from_user(&xferStr, eId->pObjectName, -+ sizeof(struct nwc_string)); -+ str = -+ (char *)((char *)reply->data + -+ pEnum->objectNameOffset); -+ DbgPrint("[XPLAT NWCAPI] Found User %s", str); -+ cpylen = -+ copy_to_user(xferStr.pBuffer, str, -+ pEnum->objectNameLen); -+ xferStr.DataType = NWC_STRING_TYPE_ASCII; -+ xferStr.DataLen = pEnum->objectNameLen - 1; -+ cpylen = -+ copy_to_user(eId->pObjectName, &xferStr, -+ sizeof(struct nwc_string)); -+ } -+ -+ kfree(reply); -+ -+ } -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_change_auth_key(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_change_key xplatCall; -+ struct nwd_change_key *pNewKey; -+ struct nwc_string xferStr; -+ char *str; -+ unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_change_key)); -+ -+ datalen = -+ sizeof(struct nwd_change_key) + xplatCall.pDomainName->DataLen + -+ xplatCall.pObjectName->DataLen + xplatCall.pNewPassword->DataLen + -+ xplatCall.pVerifyPassword->DataLen; -+ -+ cmdlen = sizeof(*cmd) + datalen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ pNewKey = (struct nwd_change_key *) cmd->data; -+ cmd->dataLen = datalen; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_CHANGE_KEY; -+ -+ pNewKey->NameType = xplatCall.NameType; -+ pNewKey->ObjectType = xplatCall.ObjectType; -+ pNewKey->AuthType = xplatCall.AuthType; -+ str = (char *)pNewKey; -+ -+ /* -+ * Get the tree name -+ */ -+ str += sizeof(*pNewKey); -+ cpylen = -+ copy_from_user(&xferStr, xplatCall.pDomainName, -+ sizeof(struct nwc_string)); -+ pNewKey->domainNameOffset = sizeof(*pNewKey); -+ cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); -+ pNewKey->domainNameLen = xferStr.DataLen; -+ -+ /* -+ * Get the User Name -+ */ -+ str += pNewKey->domainNameLen; -+ cpylen = -+ copy_from_user(&xferStr, xplatCall.pObjectName, -+ sizeof(struct nwc_string)); -+ pNewKey->objectNameOffset = -+ pNewKey->domainNameOffset + pNewKey->domainNameLen; -+ cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); -+ pNewKey->objectNameLen = xferStr.DataLen; -+ -+ /* -+ * Get the New Password -+ */ -+ str += pNewKey->objectNameLen; -+ cpylen = -+ copy_from_user(&xferStr, xplatCall.pNewPassword, -+ sizeof(struct nwc_string)); -+ pNewKey->newPasswordOffset = -+ pNewKey->objectNameOffset + pNewKey->objectNameLen; -+ cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); -+ pNewKey->newPasswordLen = xferStr.DataLen; -+ -+ /* -+ * Get the Verify Password -+ */ -+ str += pNewKey->newPasswordLen; -+ cpylen = -+ copy_from_user(&xferStr, xplatCall.pVerifyPassword, -+ sizeof(struct nwc_string)); -+ pNewKey->verifyPasswordOffset = -+ pNewKey->newPasswordOffset + pNewKey->newPasswordLen; -+ cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); -+ pNewKey->verifyPasswordLen = xferStr.DataLen; -+ -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ memset(cmd, 0, cmdlen); -+ -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_set_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_set_primary_conn xplatCall; -+ struct nwd_set_primary_conn *pConn; -+ unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, -+ sizeof(struct nwc_set_primary_conn)); -+ -+ datalen = sizeof(struct nwd_set_primary_conn); -+ cmdlen = sizeof(*cmd) + datalen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ pConn = (struct nwd_set_primary_conn *) cmd->data; -+ cmd->dataLen = datalen; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_SET_PRIMARY_CONN; -+ pConn->ConnHandle = (void *) (unsigned long) xplatCall.ConnHandle; -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_get_pri_conn(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request cmd; -+ struct novfs_xplat_call_reply *reply; -+ unsigned long status = -ENOMEM, cmdlen, replylen, cpylen; -+ -+ cmdlen = (unsigned long) (&((struct novfs_xplat_call_request *) 0)->data); -+ -+ cmd.dataLen = 0; -+ cmd.Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd.Command.SequenceNumber = 0; -+ cmd.Command.SessionId = Session; -+ cmd.NwcCommand = NWC_GET_PRIMARY_CONN; -+ -+ status = -+ Queue_Daemon_Command((void *)&cmd, cmdlen, NULL, 0, (void **)&reply, -+ &replylen, INTERRUPTIBLE); -+ -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ if (!status) { -+ cpylen = -+ copy_to_user(pdata->repData, reply->data, -+ sizeof(unsigned long)); -+ } -+ -+ kfree(reply); -+ } -+ -+ return (status); -+} -+ -+int novfs_set_map_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ unsigned long status = 0, datalen, cmdlen, replylen; -+ struct nwc_map_drive_ex symInfo; -+ -+ DbgPrint(""); -+ cmdlen = sizeof(*cmd); -+ if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo))) -+ return -EFAULT; -+ datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength + -+ symInfo.linkOffsetLength; -+ -+ __DbgPrint(" cmdlen = %d\n", cmdlen); -+ __DbgPrint(" dataLen = %d\n", datalen); -+ __DbgPrint(" symInfo.dirPathOffsetLength = %d\n", -+ symInfo.dirPathOffsetLength); -+ __DbgPrint(" symInfo.linkOffsetLength = %d\n", symInfo.linkOffsetLength); -+ __DbgPrint(" pdata->datalen = %d\n", pdata->reqLen); -+ -+ novfs_dump(sizeof(symInfo), &symInfo); -+ -+ cmdlen += datalen; -+ -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->dataLen = datalen; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_MAP_DRIVE; -+ -+ if (copy_from_user(cmd->data, pdata->reqData, datalen)) { -+ kfree(cmd); -+ return -EFAULT; -+ } -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+ -+} -+ -+int novfs_unmap_drive(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ unsigned long status = 0, datalen, cmdlen, replylen, cpylen; -+ struct nwc_unmap_drive_ex symInfo; -+ -+ DbgPrint(""); -+ -+ cpylen = copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo)); -+ cmdlen = sizeof(*cmd); -+ datalen = sizeof(symInfo) + symInfo.linkLen; -+ -+ cmdlen += datalen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->dataLen = datalen; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_UNMAP_DRIVE; -+ -+ cpylen = copy_from_user(cmd->data, pdata->reqData, datalen); -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_enum_drives(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ unsigned long status = 0, cmdlen, replylen, cpylen; -+ unsigned long offset; -+ char *cp; -+ -+ DbgPrint(""); -+ -+ cmdlen = sizeof(*cmd); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->dataLen = 0; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_ENUMERATE_DRIVES; -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ DbgPrint("Status Code = 0x%X", status); -+ if (!status) { -+ offset = -+ sizeof(((struct nwc_get_mapped_drives *) pdata-> -+ repData)->MapBuffLen); -+ cp = reply->data; -+ replylen = -+ ((struct nwc_get_mapped_drives *) pdata->repData)-> -+ MapBuffLen; -+ cpylen = -+ copy_to_user(pdata->repData, cp, offset); -+ cp += offset; -+ cpylen = -+ copy_to_user(((struct nwc_get_mapped_drives *) pdata-> -+ repData)->MapBuffer, cp, -+ min(replylen - offset, -+ reply->dataLen - offset)); -+ } -+ -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_get_bcast_msg(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ unsigned long cmdlen, replylen; -+ int status = 0x8866, cpylen; -+ struct nwc_get_bcast_notification msg; -+ struct nwd_get_bcast_notification *dmsg; -+ -+ cmdlen = sizeof(*cmd) + sizeof(*dmsg); -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cpylen = copy_from_user(&msg, pdata->reqData, sizeof(msg)); -+ cmd->dataLen = sizeof(*dmsg); -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ -+ cmd->NwcCommand = NWC_GET_BROADCAST_MESSAGE; -+ dmsg = (struct nwd_get_bcast_notification *) cmd->data; -+ dmsg->uConnReference = (void *) (unsigned long) msg.uConnReference; -+ -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ -+ if (!status) { -+ char *cp = pdata->repData; -+ -+ dmsg = -+ (struct nwd_get_bcast_notification *) reply->data; -+ if (pdata->repLen < dmsg->messageLen) { -+ dmsg->messageLen = pdata->repLen; -+ } -+ msg.messageLen = dmsg->messageLen; -+ cpylen = -+ offsetof(struct -+ nwc_get_bcast_notification, -+ message); -+ cp += cpylen; -+ cpylen = -+ copy_to_user(pdata->repData, &msg, cpylen); -+ cpylen = -+ copy_to_user(cp, dmsg->message, -+ msg.messageLen); -+ } else { -+ msg.messageLen = 0; -+ msg.message[0] = 0; -+ cpylen = offsetof(struct -+ nwc_get_bcast_notification, -+ message); -+ cpylen = -+ copy_to_user(pdata->repData, &msg, -+ sizeof(msg)); -+ } -+ -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_set_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_set_key xplatCall; -+ struct nwd_set_key *pNewKey; -+ struct nwc_string cstrObjectName, cstrPassword; -+ char *str; -+ unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_set_key)); -+ cpylen = -+ copy_from_user(&cstrObjectName, xplatCall.pObjectName, -+ sizeof(struct nwc_string)); -+ cpylen = -+ copy_from_user(&cstrPassword, xplatCall.pNewPassword, -+ sizeof(struct nwc_string)); -+ -+ datalen = -+ sizeof(struct nwd_set_key ) + cstrObjectName.DataLen + cstrPassword.DataLen; -+ -+ cmdlen = sizeof(*cmd) + datalen; -+ cmd = kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ pNewKey = (struct nwd_set_key *) cmd->data; -+ cmd->dataLen = datalen; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_SET_KEY; -+ -+ pNewKey->ObjectType = xplatCall.ObjectType; -+ pNewKey->AuthenticationId = xplatCall.AuthenticationId; -+ pNewKey->ConnHandle = (void *) (unsigned long) xplatCall.ConnHandle; -+ str = (char *)pNewKey; -+ -+ /* -+ * Get the User Name -+ */ -+ str += sizeof(struct nwd_set_key ); -+ cpylen = -+ copy_from_user(str, cstrObjectName.pBuffer, -+ cstrObjectName.DataLen); -+ -+ str += pNewKey->objectNameLen = cstrObjectName.DataLen; -+ pNewKey->objectNameOffset = sizeof(struct nwd_set_key ); -+ -+ /* -+ * Get the Verify Password -+ */ -+ cpylen = -+ copy_from_user(str, cstrPassword.pBuffer, -+ cstrPassword.DataLen); -+ -+ pNewKey->newPasswordLen = cstrPassword.DataLen; -+ pNewKey->newPasswordOffset = -+ pNewKey->objectNameOffset + pNewKey->objectNameLen; -+ -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+} -+ -+int novfs_verify_key_value(struct novfs_xplat *pdata, struct novfs_schandle Session) -+{ -+ struct novfs_xplat_call_request *cmd; -+ struct novfs_xplat_call_reply *reply; -+ struct nwc_verify_key xplatCall; -+ struct nwd_verify_key *pNewKey; -+ struct nwc_string xferStr; -+ char *str; -+ unsigned long status = -ENOMEM, cmdlen, datalen, replylen, cpylen; -+ -+ cpylen = -+ copy_from_user(&xplatCall, pdata->reqData, sizeof(struct nwc_verify_key)); -+ -+ datalen = -+ sizeof(struct nwd_verify_key) + xplatCall.pDomainName->DataLen + -+ xplatCall.pObjectName->DataLen + xplatCall.pVerifyPassword->DataLen; -+ -+ cmdlen = sizeof(*cmd) + datalen; -+ cmd = (struct novfs_xplat_call_request *)kmalloc(cmdlen, GFP_KERNEL); -+ -+ if (!cmd) -+ return -ENOMEM; -+ -+ pNewKey = (struct nwd_verify_key *) cmd->data; -+ cmd->dataLen = datalen; -+ cmd->Command.CommandType = VFS_COMMAND_XPLAT_CALL; -+ cmd->Command.SequenceNumber = 0; -+ cmd->Command.SessionId = Session; -+ cmd->NwcCommand = NWC_VERIFY_KEY; -+ -+ pNewKey->NameType = xplatCall.NameType; -+ pNewKey->ObjectType = xplatCall.ObjectType; -+ pNewKey->AuthType = xplatCall.AuthType; -+ str = (char *)pNewKey; -+ -+ /* -+ * Get the tree name -+ */ -+ str += sizeof(*pNewKey); -+ cpylen = -+ copy_from_user(&xferStr, xplatCall.pDomainName, -+ sizeof(struct nwc_string)); -+ pNewKey->domainNameOffset = sizeof(*pNewKey); -+ cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); -+ pNewKey->domainNameLen = xferStr.DataLen; -+ -+ /* -+ * Get the User Name -+ */ -+ str += pNewKey->domainNameLen; -+ cpylen = -+ copy_from_user(&xferStr, xplatCall.pObjectName, -+ sizeof(struct nwc_string)); -+ pNewKey->objectNameOffset = -+ pNewKey->domainNameOffset + pNewKey->domainNameLen; -+ cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); -+ pNewKey->objectNameLen = xferStr.DataLen; -+ -+ /* -+ * Get the Verify Password -+ */ -+ str += pNewKey->objectNameLen; -+ cpylen = -+ copy_from_user(&xferStr, xplatCall.pVerifyPassword, -+ sizeof(struct nwc_string)); -+ pNewKey->verifyPasswordOffset = -+ pNewKey->objectNameOffset + pNewKey->objectNameLen; -+ cpylen = copy_from_user(str, xferStr.pBuffer, xferStr.DataLen); -+ pNewKey->verifyPasswordLen = xferStr.DataLen; -+ -+ status = -+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0, -+ (void **)&reply, &replylen, -+ INTERRUPTIBLE); -+ if (reply) { -+ status = reply->Reply.ErrorCode; -+ kfree(reply); -+ } -+ kfree(cmd); -+ return (status); -+} ---- /dev/null -+++ b/fs/novfs/nwcapi.h -@@ -0,0 +1,1416 @@ -+/* -+ * NetWare Redirector for Linux -+ * Author: Sheffer Clark -+ * -+ * This file contains all typedefs and constants for the NetWare Client APIs. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+#ifndef __NWCLNX_H__ -+#define __NWCLNX_H__ -+ -+#if 0 //sgled hack -+#else //sgled hack (up to endif) -+ -+#define NW_MAX_TREE_NAME_LEN 33 -+#define NW_MAX_SERVICE_TYPE_LEN 49 -+/* Transport Type - (nuint32 value) */ -+#define NWC_TRAN_TYPE_IPX 0x0001 -+#define NWC_TRAN_TYPE_DDP 0x0003 -+#define NWC_TRAN_TYPE_ASP 0x0004 -+#define NWC_TRAN_TYPE_UDP 0x0008 -+#define NWC_TRAN_TYPE_TCP 0x0009 -+#define NWC_TRAN_TYPE_UDP6 0x000A -+#define NWC_TRAN_TYPE_TCP6 0x000B -+#define NWC_TRAN_TYPE_WILD 0x8000 -+ -+// -+// DeviceIoControl requests for the NetWare Redirector -+// -+// Macro definition for defining DeviceIoControl function control codes. -+// The function codes 0 - 2047 are reserved for Microsoft. -+// Function codes 2048 - 4096 are reserved for customers. -+// The NetWare Redirector will use codes beginning at 3600. -+// -+// METHOD_NEITHER User buffers will be passed directly from the application -+// to the file system. The redirector is responsible for either probing -+// and locking the buffers or using a try - except around access of the -+// buffers. -+ -+#define BASE_REQ_NUM 0x4a541000 -+ -+// Connection functions -+#define NWC_OPEN_CONN_BY_NAME (BASE_REQ_NUM + 0) -+#define NWC_OPEN_CONN_BY_ADDRESS (BASE_REQ_NUM + 1) -+#define NWC_OPEN_CONN_BY_REFERENCE (BASE_REQ_NUM + 2) -+#define NWC_CLOSE_CONN (BASE_REQ_NUM + 3) -+#define NWC_SYS_CLOSE_CONN (BASE_REQ_NUM + 4) -+#define NWC_GET_CONN_INFO (BASE_REQ_NUM + 5) -+#define NWC_SET_CONN_INFO (BASE_REQ_NUM + 6) -+#define NWC_SCAN_CONN_INFO (BASE_REQ_NUM + 7) -+#define NWC_MAKE_CONN_PERMANENT (BASE_REQ_NUM + 8) -+#define NWC_LICENSE_CONN (BASE_REQ_NUM + 9) -+#define NWC_UNLICENSE_CONN (BASE_REQ_NUM + 10) -+#define NWC_GET_NUM_CONNS (BASE_REQ_NUM + 11) -+#define NWC_GET_PREFERRED_SERVER (BASE_REQ_NUM + 12) -+#define NWC_SET_PREFERRED_SERVER (BASE_REQ_NUM + 13) -+#define NWC_GET_PRIMARY_CONN (BASE_REQ_NUM + 14) -+#define NWC_SET_PRIMARY_CONN (BASE_REQ_NUM + 15) -+ -+// Authentication functions -+#define NWC_CHANGE_KEY (BASE_REQ_NUM + 20) -+#define NWC_ENUMERATE_IDENTITIES (BASE_REQ_NUM + 21) -+#define NWC_GET_IDENTITY_INFO (BASE_REQ_NUM + 22) -+#define NWC_LOGIN_IDENTITY (BASE_REQ_NUM + 23) -+#define NWC_LOGOUT_IDENTITY (BASE_REQ_NUM + 24) -+#define NWC_SET_KEY (BASE_REQ_NUM + 25) -+#define NWC_VERIFY_KEY (BASE_REQ_NUM + 26) -+#define NWC_AUTHENTICATE_CONN_WITH_ID (BASE_REQ_NUM + 27) -+#define NWC_UNAUTHENTICATE_CONN (BASE_REQ_NUM + 28) -+ -+// Directory Services functions -+#define NWC_GET_DEFAULT_NAME_CONTEXT (BASE_REQ_NUM + 30) -+#define NWC_SET_DEFAULT_NAME_CONTEXT (BASE_REQ_NUM + 31) -+#define NWC_GET_PREFERRED_DS_TREE (BASE_REQ_NUM + 32) -+#define NWC_SET_PREFERRED_DS_TREE (BASE_REQ_NUM + 33) -+#define NWC_GET_TREE_MONITORED_CONN_REF (BASE_REQ_NUM + 34) -+#define NWC_NDS_RESOLVE_NAME_TO_ID (BASE_REQ_NUM + 35) -+ -+// NCP Request functions -+#define NWC_FRAGMENT_REQUEST (BASE_REQ_NUM + 40) -+#define NWC_NCP_ORDERED_REQUEST_ALL (BASE_REQ_NUM + 41) -+#define NWC_RAW_NCP_REQUEST (BASE_REQ_NUM + 42) -+#define NWC_RAW_NCP_REQUEST_ALL (BASE_REQ_NUM + 43) -+ -+// File Handle Conversion functions -+#define NWC_CONVERT_LOCAL_HANDLE (BASE_REQ_NUM + 50) -+#define NWC_CONVERT_NETWARE_HANDLE (BASE_REQ_NUM + 51) -+ -+// Misc. functions -+#define NWC_MAP_DRIVE (BASE_REQ_NUM + 60) -+#define NWC_UNMAP_DRIVE (BASE_REQ_NUM + 61) -+#define NWC_ENUMERATE_DRIVES (BASE_REQ_NUM + 62) -+ -+#define NWC_GET_REQUESTER_VERSION (BASE_REQ_NUM + 63) -+#define NWC_QUERY_FEATURE (BASE_REQ_NUM + 64) -+ -+#define NWC_GET_CONFIGURED_NSPS (BASE_REQ_NUM + 65) -+ -+#define NWC_GET_MOUNT_PATH (BASE_REQ_NUM + 66) -+ -+#define NWC_GET_BROADCAST_MESSAGE (BASE_REQ_NUM + 67) -+ -+#endif //sgled hack ------------------------------- -+ -+#define IOC_XPLAT 0x4a540002 -+ -+struct novfs_xplat { -+ int xfunction; -+ unsigned long reqLen; -+ void *reqData; -+ unsigned long repLen; -+ void *repData; -+ -+}; -+ -+#if 0 -+N_EXTERN_LIBRARY(NWRCODE) -+ NWCLnxReq -+ (nuint32 request, nptr pInBuf, nuint32 inLen, nptr pOutBuf, nuint32 outLen); -+#endif -+// -+// Network Name Format Type -+// -+ -+#define NWC_NAME_FORMAT_NDS 0x0001 -+#define NWC_NAME_FORMAT_BIND 0x0002 -+#define NWC_NAME_FORMAT_BDP 0x0004 -+#define NWC_NAME_FORMAT_NDS_TREE 0x0008 -+#define NWC_NAME_FORMAT_WILD 0x8000 -+ -+// -+// API String Types -+// -+ -+#define NWC_STRING_TYPE_ASCII 0x0001 // multi-byte, not really ascii -+#define NWC_STRING_TYPE_UNICODE 0x0002 -+#define NWC_STRING_TYPE_UTF8 0x0003 -+ -+// -+// Open Connection Flags -+// -+ -+#define NWC_OPEN_LICENSED 0x0001 -+#define NWC_OPEN_UNLICENSED 0x0002 -+#define NWC_OPEN_PRIVATE 0x0004 -+#define NWC_OPEN_PUBLIC 0x0008 -+#define NWC_OPEN_EXISTING_HANDLE 0x0010 -+#define NWC_OPEN_NO_HANDLE 0x0020 -+#define NWC_OPEN_PERMANENT 0x0040 -+#define NWC_OPEN_DISCONNECTED 0x0080 -+#define NWC_OPEN_NEAREST 0x0100 -+#define NWC_OPEN_IGNORE_CACHE 0x0200 -+ -+// -+// Close Connection Flags -+// -+ -+#define NWC_CLOSE_TEMPORARY 0x0000 -+#define NWC_CLOSE_PERMANENT 0x0001 -+ -+// -+// Connection Information Levels -+// -+ -+#define NWC_CONN_INFO_RETURN_ALL 0xFFFF -+#define NWC_CONN_INFO_RETURN_NONE 0x0000 -+#define NWC_CONN_INFO_VERSION 0x0001 -+#define NWC_CONN_INFO_AUTH_STATE 0x0002 -+#define NWC_CONN_INFO_BCAST_STATE 0x0003 -+#define NWC_CONN_INFO_CONN_REF 0x0004 -+#define NWC_CONN_INFO_TREE_NAME 0x0005 -+#define NWC_CONN_INFO_WORKGROUP_ID 0x0006 -+#define NWC_CONN_INFO_SECURITY_STATE 0x0007 -+#define NWC_CONN_INFO_CONN_NUMBER 0x0008 -+#define NWC_CONN_INFO_USER_ID 0x0009 -+#define NWC_CONN_INFO_SERVER_NAME 0x000A -+#define NWC_CONN_INFO_TRAN_ADDR 0x000B -+#define NWC_CONN_INFO_NDS_STATE 0x000C -+#define NWC_CONN_INFO_MAX_PACKET_SIZE 0x000D -+#define NWC_CONN_INFO_LICENSE_STATE 0x000E -+#define NWC_CONN_INFO_PUBLIC_STATE 0x000F -+#define NWC_CONN_INFO_SERVICE_TYPE 0x0010 -+#define NWC_CONN_INFO_DISTANCE 0x0011 -+#define NWC_CONN_INFO_SERVER_VERSION 0x0012 -+#define NWC_CONN_INFO_AUTH_ID 0x0013 -+#define NWC_CONN_INFO_SUSPENDED 0x0014 -+#define NWC_CONN_INFO_TREE_NAME_UNICODE 0x0015 -+#define NWC_CONN_INFO_SERVER_NAME_UNICODE 0x0016 -+#define NWC_CONN_INFO_LOCAL_TRAN_ADDR 0x0017 -+#define NWC_CONN_INFO_ALTERNATE_ADDR 0x0018 -+#define NWC_CONN_INFO_SERVER_GUID 0x0019 -+ -+#define NWC_CONN_INFO_MAX_LEVEL 0x0014 -+ -+// -+// Information Versions -+// -+ -+#define NWC_INFO_VERSION_1 0x0001 -+#define NWC_INFO_VERSION_2 0x0002 -+ -+// -+// Authentication State -+// -+ -+#define NWC_AUTH_TYPE_NONE 0x0000 -+#define NWC_AUTH_TYPE_BINDERY 0x0001 -+#define NWC_AUTH_TYPE_NDS 0x0002 -+#define NWC_AUTH_TYPE_PNW 0x0003 -+ -+#define NWC_AUTH_STATE_NONE 0x0000 -+#define NWC_AUTH_STATE_BINDERY 0x0001 -+#define NWC_AUTH_STATE_NDS 0x0002 -+#define NWC_AUTH_STATE_PNW 0x0003 -+ -+// -+// Authentication Flags -+// -+ -+#define NWC_AUTH_PRIVATE 0x00000004 -+#define NWC_AUTH_PUBLIC 0x00000008 -+ -+// -+// Broadcast State -+// -+ -+#define NWC_BCAST_PERMIT_ALL 0x0000 -+#define NWC_BCAST_PERMIT_SYSTEM 0x0001 -+#define NWC_BCAST_PERMIT_NONE 0x0002 -+#define NWC_BCAST_PERMIT_SYSTEM_POLLED 0x0003 -+#define NWC_BCAST_PERMIT_ALL_POLLED 0x0004 -+ -+// -+// Broadcast State -+// -+ -+#define NWC_NDS_NOT_CAPABLE 0x0000 -+#define NWC_NDS_CAPABLE 0x0001 -+ -+// -+// License State -+// -+ -+#define NWC_NOT_LICENSED 0x0000 -+#define NWC_CONNECTION_LICENSED 0x0001 -+#define NWC_HANDLE_LICENSED 0x0002 -+ -+// -+// Public State -+// -+ -+#define NWC_CONN_PUBLIC 0x0000 -+#define NWC_CONN_PRIVATE 0x0001 -+ -+// -+// Scan Connection Information Flags used -+// for finding connections by specific criteria -+// -+ -+#define NWC_MATCH_NOT_EQUALS 0x0000 -+#define NWC_MATCH_EQUALS 0x0001 -+#define NWC_RETURN_PUBLIC 0x0002 -+#define NWC_RETURN_PRIVATE 0x0004 -+#define NWC_RETURN_LICENSED 0x0008 -+#define NWC_RETURN_UNLICENSED 0x0010 -+ -+// -+// Authentication Types -+// -+ -+#define NWC_AUTHENT_BIND 0x0001 -+#define NWC_AUTHENT_NDS 0x0002 -+#define NWC_AUTHENT_PNW 0x0003 -+ -+// -+// Disconnected info -+// -+ -+#define NWC_SUSPENDED 0x0001 -+ -+// -+// Maximum object lengths -+// -+ -+#define MAX_DEVICE_LENGTH 16 -+#define MAX_NETWORK_NAME_LENGTH 1024 -+#define MAX_OBJECT_NAME_LENGTH 48 -+#define MAX_PASSWORD_LENGTH 128 -+#define MAX_SERVER_NAME_LENGTH 48 -+#define MAX_SERVICE_TYPE_LENGTH 48 -+#define MAX_TREE_NAME_LENGTH 32 -+#define MAX_ADDRESS_LENGTH 32 -+#define MAX_NAME_SERVICE_PROVIDERS 10 -+ -+// -+// Flags for the GetBroadcastMessage API -+// -+ -+#define MESSAGE_GET_NEXT_MESSAGE 1 -+#define MESSAGE_RECEIVED_FOR_CONNECTION 2 -+ -+// -+// This constant must always be equal to the last device -+// -+ -+#define DEVICE_LAST_DEVICE 0x00000003 -+ -+// -+// Defined feature set provided by requester -+// -+ -+#ifndef NWC_FEAT_PRIV_CONN -+#define NWC_FEAT_PRIV_CONN 1 -+#define NWC_FEAT_REQ_AUTH 2 -+#define NWC_FEAT_SECURITY 3 -+#define NWC_FEAT_NDS 4 -+#define NWC_FEAT_NDS_MTREE 5 -+#define NWC_FEAT_PRN_CAPTURE 6 -+#define NWC_FEAT_NDS_RESOLVE 7 -+#endif -+ -+//===[ Type definitions ]================================================== -+ -+ -+// -+// Structure for defining what a transport -+// address looks like -+// -+ -+struct nwc_tran_addr { -+ u32 uTransportType; -+ u32 uAddressLength; -+ unsigned char *puAddress; -+}; -+ -+ -+struct nwc_conn_string { -+ char *pString; -+ u32 uStringType; -+ u32 uNameFormatType; -+ -+}; -+ -+//#if defined(NTYPES_H) -+//typedef NWCString NwcString, *PNwcString; -+//#else -+struct nwc_string { -+ u32 DataType; -+ u32 BuffSize; -+ u32 DataLen; -+ void *pBuffer; -+ u32 CodePage; -+ u32 CountryCode; -+ -+}; -+//#endif -+ -+// -+// Definition of a fragment for the Raw NCP requests -+// -+ -+struct nwc_frag { -+ void *pData; -+ u32 uLength; -+ -+}; -+ -+// -+// Current connection information available for -+// enumeration using GetConnInfo and ScanConnInfo -+// -+ -+#define NW_INFO_BUFFER_SIZE NW_MAX_TREE_NAME_LEN + \ -+ NW_MAX_TREE_NAME_LEN + \ -+ NW_MAX_SERVICE_TYPE_LEN -+//++======================================================================= -+// API Name: NwcCloseConn -+// -+// Arguments In: ConnHandle - The handle to a connection that is -+// no longer needed. -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_CONN_INVALID -+// NWE_INVALID_OWNER -+// NWE_RESOURCE_LOCK -+// -+// Abstract: This API is used by an application that opened the -+// connection using one of the open connection calls -+// is finished using the connection. After it is closed, -+// the handle may no longer be used to access the -+// connection. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_close_conn { -+ u32 ConnHandle; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcConvertLocalFileHandle -+// -+// Arguments In: NONE -+// -+// Arguments Out: uConnReference - The connection reference associated -+// with the returned NetWare file handle. -+// -+// pNetWareFileHandle - The six byte NetWare file handle -+// associated with the given local file handle. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_RESOURCE_NOT_OWNED -+// -+// Abstract: This API is used to return the NetWare handle that -+// has been associated to a local file handle. -+// In addition to returning the NetWare file handle, -+// this API also returns the connection reference to -+// the connection that owns the file. -+// -+// Notes: This API does not create a new NetWare handle, it -+// only returns the existing handle associated to the -+// local handle. -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_convert_local_handle { -+ u32 uConnReference; -+ unsigned char NetWareHandle[6]; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcConvertNetWareHandle -+// -+// Arguments In: ConnHandle - The connection associated with the -+// NetWare file handle to convert. -+// -+// uAccessMode - The access rights to be used when -+// allocating the local file handle. -+// -+// pNetWareHandle - The NetWare handle that will be -+// bound to the new local handle being created. -+// -+// uFileSize - The current file size of the NetWare -+// file associated with the given NetWare file handle. -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_RESOURCE_NOT_OWNED -+// -+// Abstract: This API is used to convert a NetWare file handle -+// to a local file handle. -+// -+// The local handle must have been created previously -+// by doing a local open to \Special\$Special.net. -+// -+// Then an Ioctl to this function must be issued using the -+// handle returned from the special net open. -+// -+// Notes: After making this call, the NetWare file handle -+// should not be closed using the NetWare library -+// call, instead it should be closed using the local -+// operating system's close call. -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+struct nwc_convert_netware_handle { -+ u32 ConnHandle; -+ u32 uAccessMode; -+ unsigned char NetWareHandle[6]; -+ u32 uFileSize; -+}; -+ -+ -+//++======================================================================= -+// API Name: NwcGetConnInfo -+// -+// Arguments In: ConnHandle - Connection handle for the connection to -+// get information on. -+// uInfoLevel - Specifies what information should be -+// returned. -+// uInfoLen - Length of the ConnInfo buffer. -+// -+// Arguments Out: pConnInfo - A pointer to a buffer to return connection -+// information in. If the caller is requesting all -+// information the pointer will be to a structure of -+// type NwcConnInfo. If the caller is requesting just -+// a single piece of information, the pointer is the -+// type of information being requested. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_CONN_INVALID -+// NWE_INVALID_OWNER -+// NWE_RESOURCE_LOCK -+// NWE_STRING_TRANSLATION -+// -+// Abstract: This API returns connection information for the specified -+// connection. The requester can receive one piece of -+// information or the whole information structure. -+// Some of the entries in the NwcConnInfo structure are -+// pointers. The requester is responsible for supplying -+// valid pointers for any info specified to be returned. -+// If the requester does not want a piece of information -+// returned, a NULL pointer should be placed in the field. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_get_conn_info { -+ u32 ConnHandle; -+ u32 uInfoLevel; -+ u32 uInfoLength; -+ void *pConnInfo; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcGetDefaultNameContext -+// -+// Arguments In:: uTreeLength - Length of tree string. -+// -+// pDsTreeName - Pointer to tree string (multi-byte) -+// -+// pNameLength - On input, this is the length of the -+// name context buffer. On output, this is the actual -+// length of the name context string. -+// -+// Arguments Out: pNameContext - The buffer to copy the default name -+// context into (multi-byte). -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_BUFFER_OVERFLOW -+// NWE_OBJECT_NOT_FOUND -+// NWE_PARAM_INVALID -+// NWE_RESOURCE_LOCK -+// -+// Abstract: This API returns the default name context that -+// was previously set either by configuration or -+// by calling NwcSetDefaultNameContext. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_get_def_name_ctx { -+ u32 uTreeLength; -+ unsigned char *pDsTreeName; -+ u32 uNameLength; -+// unsigned short *pNameContext; -+ unsigned char *pNameContext; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcGetTreeMonitoredConnReference -+// -+// Arguments In: NONE -+// -+// Arguments Out: uConnReference - The connection reference associated -+// with the monitored connection. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_OBJECT_NOT_FOUND -+// NWE_RESOURCE_LOCK -+// -+// Abstract: This call returns a connection reference to a -+// connection that is monitored. This connection -+// reference may be used to open the connection. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_get_tree_monitored_conn_ref { -+ struct nwc_string *pTreeName; -+ u32 uConnReference; -+ -+}; -+ -+ -+//++======================================================================= -+// API Name: NwcGetPreferredDsTree -+// -+// Arguments In: uTreeLength - On input, this is the length in bytes -+// of the DS tree name buffer. On output, this is the -+// actual length of the DS tree name string in bytes. -+// -+// Arguments Out: pDsTreeName - The buffer to copy the DS tree name into. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_BUFFER_OVERFLOW -+// NWE_PARAM_INVALID -+// NWE_DS_PREFERRED_NOT_FOUND -+// NWE_RESOURCE_LOCK -+// -+// Abstract: This API returns the preferred DS tree name that was -+// previously set either by configuration or -+// by calling NwcSetPreferredDsTree. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+struct nwc_get_pref_ds_tree { -+ u32 uTreeLength; -+ unsigned char *pDsTreeName; -+}; -+ -+//++======================================================================= -+// API Name: NwcLicenseConn -+// -+// Arguments In: ConnHandle - An open connection handle that is in -+// an unlicensed state. -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_CONN_INVALID -+// NWE_HANDLE_ALREADY_LICENSED -+// -+// -+// Abstract: This API changes a connections state to licensed. -+// The licensed count will be incremented, and if -+// necessary, the license NCP will be sent. -+// If this handle is already in a licensed state, -+// an error will be returned. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_license_conn { -+ u32 ConnHandle; -+}; -+ -+ -+//++======================================================================= -+// API Name: NWCGetMappedDrives -+// -+// Arguments In: -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_BUFFER_OVERFLOW -+// -+// Abstract: This API returns the NetWare mapped drive info -+// per user. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_get_mapped_drives { -+ u32 MapBuffLen; // Buffer length (actual buffer size returned) -+ struct nwc_mapped_drive_buf *MapBuffer; // Pointer to map buffer -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcGetMountPath -+// -+// Arguments In: MountPathLen - Length of mount path buffer -+// including nul terminator. -+// -+// Arguments Out: MountPath - Pointer to mount path buffer -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_BUFFER_OVERFLOW -+// -+// Abstract: This API returns the mount point of the NOVFS file -+// system. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_get_mount_path { -+ u32 MountPathLen; -+ unsigned char *pMountPath; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcOpenConnByAddr -+// -+// Arguments In: pServiceType - The type of service required. -+// -+// uConnFlags - Specifies whether this connection -+// should be public or private. -+// -+// pTranAddress - Specifies the transport address of -+// the service to open a connection on. -+// a connection to. -+// -+// Arguments Out: ConnHandle - The new connection handle returned. -+// This handle may in turn be used for all requests -+// directed to this connection. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_INSUFFICIENT_RESOURCES -+// NWE_TRAN_INVALID_TYPE -+// NWE_RESOURCE_LOCK -+// NWE_UNSUPPORTED_TRAN_TYPE -+// -+// Abstract: This API will create a service connection to -+// the service specified by the transport address. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_open_conn_by_addr { -+ char *pServiceType; -+ u32 uConnFlags; -+ struct nwc_tran_addr *pTranAddr; -+ u32 ConnHandle; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcOpenConnByName -+// -+// Arguments In: ConnHandle - The connection to use when resolving -+// a name. For instance, if the name is a bindery name -+// the requester will scan the bindery of the given -+// connection to retrieve the service's address. This -+// value can also be NULL if the caller doesn't care -+// which connection is used to resolve the address. -+// -+// pName - A pointer to the name of the service trying -+// to be connected to. This string is NULL terminated, -+// contains no wild cards, and is a maximum of 512 -+// characters long. -+// -+// pServiceType - The type of service required. -+// -+// uConnFlags - Specifies whether this connection -+// should be public or private. -+// -+// uTranType - Specifies the preferred or required -+// transport type to be used. -+// NWC_TRAN_TYPE_WILD may be ORed with the other values -+// or used alone. When ORed with another value, the -+// wild value indicates an unmarked alternative is -+// acceptable. When used alone, the current preferred -+// transport is used. -+// -+// Arguments Out: ConnHandle - The new connection handle returned. -+// This handle may in turn be used for all requests -+// directed to this connection. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_BUFFER_OVERFLOW -+// NWE_INSUFFICIENT_RESOURCES -+// NWE_INVALID_STRING_TYPE -+// NWE_RESOURCE_LOCK -+// NWE_STRING_TRANSLATION -+// NWE_TRAN_INVALID_TYPE -+// NWE_UNSUPPORTED_TRAN_TYPE -+// -+// Abstract: This API will resolve the given name to a network -+// address then create a service connection to the -+// specified service. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_open_conn_by_name { -+ u32 ConnHandle; -+ struct nwc_conn_string *pName; -+ char *pServiceType; -+ u32 uConnFlags; -+ u32 uTranType; -+ u32 RetConnHandle; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcOpenConnByReference -+// -+// Arguments In: uConnReference - A reference handle which identifies -+// a valid connection that the caller wants to obtain -+// a connection handle to. A reference handle can be -+// used to get information about the connection without -+// actually getting a handle to it. A connection handle -+// must be used to make actual requests to that -+// connection. -+// -+// uConnFlags - Currently unused. -+// -+// Arguments Out: ConnHandle - The new connection handle returned. -+// This handle may in turn be used for all requests -+// directed to this connection. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_CONN_INVALID -+// -+// Abstract: This API will open the connection associated with -+// the given connection reference. The connection -+// reference can be obtained by calling the -+// NwcScanConnInfo API. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_open_conn_by_ref { -+ u32 uConnReference; -+ u32 uConnFlags; -+ u32 ConnHandle; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcRawRequest -+// -+// Arguments In: ConnHandle - The connection handle of the connection -+// that the request is being directed to. -+// -+// uFunction - The NCP function that is being called. -+// -+// uNumRequestFrags - The number of fragments that the -+// request packet has been broken into. -+// -+// pRequestFrags - List of fragments that make up the -+// request packet. Each fragment includes the length -+// of the fragment data and a pointer to the data. -+// -+// uNumReplyFrags - The number of fragments the reply -+// packet has been broken into. -+// -+// Arguments Out: pReplyFrags - List of fragments that make up the -+// request packet. Each fragment includes the length -+// of the fragment data and a pointer to the data. -+// -+// uActualReplyLength - Total size of the reply packet -+// after any header and tail information is removed. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_CONN_INVALID -+// -+// Abstract: API for sending raw NCP packets directly to a server. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_request { -+ u32 ConnHandle; -+ u32 uFunction; -+ u32 uNumRequestFrags; -+ struct nwc_frag *pRequestFrags; -+ u32 uNumReplyFrags; -+ struct nwc_frag *pReplyFrags; -+ u32 uActualReplyLength; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcScanConnInfo -+// -+// Arguments In: uScanIndex - The index to be used on the next -+// iteration of the scan. This value should be initially -+// set to zero. The output of this parameter will be -+// used in subsequent calls to this function. -+// -+// uScanInfoLevel - Describes the composition of the -+// pScanConnInfo pointer. If this parameter contains -+// NWC_CONN_INFO_RETURN_ALL, information for all -+// connections will be returned. -+// -+// uScanInfoLen - Lenght of pScanConnInfo buffer -+// -+// pScanConnInfo - This parameter is a pointer to -+// data that describes one piece of connection -+// information. The type of this data depends on -+// which level of information is being scanned for. -+// For instance, if the scan is being used to find all -+// connections with a particular authentication state, -+// pScanConnInfo would be a "pnuint" since -+// authentication state is described as nuint in the -+// NwcConnInfo structure. -+// -+// uScanFlag - This parameter tells whether to return -+// connection information for connections that match -+// the scan criteria or that do not match the scan -+// criteria. If the caller wants to find all the -+// connections that are not in the "NOVELL_INC" DS -+// tree, he would use the call as described below in -+// the description except the uScanFlag parameter would -+// have the value of NWC_MATCH_NOT_EQUALS. This flag -+// is also used to tell the requester whether to -+// return private or public, licensed or unlicensed -+// connections. -+// -+// uReturnInfoLevel - Specifies what information -+// should be returned. -+// -+// uReturnInfoLength - The size in bytes of pConnInfo. -+// -+// Arguments Out: uConnectionReference - Connection reference -+// associated with the information that is being -+// returned. -+// -+// pReturnConnInfo - A pointer to the NwcConnInfo -+// structure defined above. In some of the -+// structures within the union, there are pointers to -+// data to be returned. It is the responsibility of -+// the caller to provide pointers to valid memory -+// to copy this data into. -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_RESOURCE_LOCK -+// NWE_CONN_INVALID -+// NWE_INVALID_LEVEL -+// NWE_STRING_TRANSLATION -+// NWE_INVALID_MATCH_DATA -+// NWE_MATCH_FAILED -+// NWE_BUFFER_OVERFLOW -+// NWE_NO_MORE_ENTRIES -+// -+// Abstract: This API is used to return connection information -+// for multiple connections. It will return one -+// piece or the full structure of connection information -+// for one connection at a time. This call is designed -+// to scan for connections based on any piece of -+// connection information as described in the -+// NwcConnInfo structure. For instance, if the caller -+// wants to scan for all connections in the DS tree -+// "NOVELL_INC", the call would be made with the -+// following paramters: -+// -+// uScanLevelInfo = NWC_CONN_INFO_TREE_NAME -+// pScanConnInfo = "NOVELL_INC" -+// uScanFlag = NWC_MATCH_EQUALS | -+// NWC_RETURN_PUBLIC | -+// NWC_RETURN_LICENSED -+// -+// The scan flag is used to tell if the scan is -+// supposed to return connections that match or don't -+// match. This design doesn't allow any other -+// conditions for this flag (such as greater than or -+// less than). -+// -+// If the caller specifies the uReturnInfoLevel = -+// NWC_CONN_INFO_RETURN_ALL, the full NwcConnInfo -+// structure is returned. The caller must supply -+// data for any pointers in the NwcConnInfo structure -+// (these include tree name, workgroup id, server name -+// and transport address). However if the caller -+// doesn't want to get a particular piece of info -+// that is expecting a pointer to some data, a NULL -+// pointer may be used to indicate to the requester -+// that it should not return that piece of information. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_scan_conn_info { -+ u32 uScanIndex; -+ u32 uScanInfoLevel; -+ u32 uScanInfoLen; -+ void *pScanConnInfo; -+ u32 uScanFlags; -+ u32 uReturnInfoLevel; -+ u32 uReturnInfoLength; -+ u32 uConnectionReference; -+ void *pReturnConnInfo; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcSetConnInfo -+// -+// Arguments In: ConnHandle - Connection handle for the connection to -+// set information on. -+// -+// uInfoLevel - Specifies what information should be set. -+// -+// uInfoLen - Length in bytes of the information being set. -+// -+// pConnInfo - Connection information to set. -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_RESOURCE_LOCK -+// NWE_CONN_INVALID -+// NWE_INVALID_LEVEL -+// -+// -+// Abstract: This API sets information in the connection associated -+// with the connection handle. -+// -+// Notes: At this time the only setable information levels are: -+// NWC_CONN_INFO_AUTH_STATE -+// NWC_CONN_INFO_BCAST_STATE -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_set_conn_info { -+ u32 ConnHandle; -+ u32 uInfoLevel; -+ u32 uInfoLength; -+ void *pConnInfo; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcSetDefaultNameContext -+// -+// Arguments In:: uTreeLength - Length of tree string. -+// -+// pDsTreeName - The tree string (multi-byte). -+// -+// uNameLength - The length in bytes of the name -+// context string. -+// -+// pNameContext - The string to be used as the default -+// name context (multi-byte). -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_PARAM_INVALID -+// NWE_RESOURCE_LOCK -+// NWE_STRING_TRANSLATION -+// -+// Abstract: This API sets the default name context. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_set_def_name_ctx { -+ u32 uTreeLength; -+ unsigned char *pDsTreeName; -+ u32 uNameLength; -+// unsined short *pNameContext; -+ unsigned char *pNameContext; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcSetPreferredDsTree -+// -+// Arguments In: uTreeLength - The length in bytes of the DS tree name. -+// -+// pDsTreeName - The string to be used as the preferred -+// DS tree name. -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_INSUFFICIENT_RESOURCES -+// NWE_RESOURCE_LOCK -+// -+// Abstract: This API sets the preferred DS tree name. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_set_pref_ds_tree { -+ u32 uTreeLength; -+ unsigned char *pDsTreeName; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcSetPrimaryConnection -+// -+// Arguments In: ConnHandle - Connection handle associated to the -+// connection reference which the caller wishes to set -+// as primary. -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// NWE_CONN_PRIMARY_NOT_SET -+// -+// Abstract: This API sets the primary connection according to -+// the connection handle passed in by the caller. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_set_primary_conn { -+ u32 ConnHandle; -+ -+}; -+ -+ -+//++======================================================================= -+// API Name: NwcQueryFeature -+// -+// Arguments In: Feature - The number associated with a particular -+// feature that the caller wants to know if the requester -+// is supporting -+// -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_REQUESTER_FAILURE -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_query_feature { -+ u32 Feature; -+ -+}; -+ -+//++======================================================================= -+// API Name: NWCChangePassword -+// -+// Arguments In: -+// -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_change_key { -+ struct nwc_string *pDomainName; -+ u32 AuthType; -+ struct nwc_string *pObjectName; -+ u32 NameType; -+ u16 ObjectType; -+ struct nwc_string *pVerifyPassword; -+ struct nwc_string *pNewPassword; -+ -+}; -+ -+//++======================================================================= -+// API Name: NWCEnumerateIdentities ` -+// -+// Arguments In: -+// -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_enum_ids { -+ u32 Iterator; -+ struct nwc_string *pDomainName; -+ u32 AuthType; -+ struct nwc_string *pObjectName; -+ u32 NameType; -+ u16 ObjectType; -+ u32 IdentityFlags; -+ u32 AuthenticationId; -+ -+}; -+ -+//++======================================================================= -+// API Name: NWCGetIdentityInfo -+// -+// Arguments In: -+// -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_get_id_info { -+ u32 AuthenticationId; -+ struct nwc_string *pDomainName; -+ u32 AuthType; -+ struct nwc_string *pObjectName; -+ u32 NameType; -+ u16 ObjectType; -+ u32 IdentityFlags; -+ -+}; -+ -+//++======================================================================= -+// API Name: NWCLoginIdentity -+// -+// Arguments In: -+// -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_login_id { -+ struct nwc_string *pDomainName; -+ u32 AuthType; -+ struct nwc_string *pObjectName; -+ u32 NameType; -+ u16 ObjectType; -+ u32 IdentityFlags; -+ struct nwc_string *pPassword; -+ u32 AuthenticationId; -+ -+}; -+ -+ -+//++======================================================================= -+// API Name: NWCSetPassword -+// -+// Arguments In: -+// -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_set_key { -+ u32 ConnHandle; -+ u32 AuthenticationId; -+ struct nwc_string *pObjectName; -+ u16 ObjectType; -+ struct nwc_string *pNewPassword; -+ -+}; -+ -+//++======================================================================= -+// API Name: NWCVerifyPassword -+// -+// Arguments In: -+// -+// Arguments Out: -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//++======================================================================= -+ -+struct nwc_verify_key { -+ struct nwc_string *pDomainName; -+ u32 AuthType; -+ struct nwc_string *pObjectName; -+ u32 NameType; -+ u16 ObjectType; -+ struct nwc_string *pVerifyPassword; -+ -+}; -+ -+//++======================================================================= -+// API Name: NwcAuthenticateWithId -+// -+// Arguments In: ConnHandle - The connection to be authenticated -+// -+// AuthenticationId - the authentication Id associated -+// to the information necessary to authenticate this -+// connection. -+// -+// Arguments Out: NONE -+// -+// Returns: STATUS_SUCCESS -+// NWE_ACCESS_VIOLATION -+// -+// Abstract: This API is used to authenticate a connection using -+// an authentication ID that has already been created. -+// -+// Notes: -+// -+// Environment: PASSIVE_LEVEL, LINUX -+// -+//=======================================================================-- -+ -+struct nwc_auth_with_id { -+ u32 ConnHandle; -+ u32 AuthenticationId; -+ -+}; -+ -+ -+struct nwc_unmap_drive_ex { -+// unsigned long connHdl; -+ unsigned int linkLen; -+ char linkData[1]; -+ -+}; -+ -+struct nwc_map_drive_ex { -+ u32 ConnHandle; -+ unsigned int localUid; -+ unsigned int linkOffsetLength; -+ unsigned int linkOffset; -+ unsigned int dirPathOffsetLength; -+ unsigned int dirPathOffset; -+}; -+ -+struct nwc_get_bcast_notification { -+ u32 uMessageFlags; -+ u32 uConnReference; -+ u32 messageLen; -+ char message[1]; -+}; -+ -+#endif /* __NWCLNX_H__ */ ---- /dev/null -+++ b/fs/novfs/nwerror.h -@@ -0,0 +1,658 @@ -+/* -+ * NetWare Redirector for Linux -+ * Author: Tom Buckley -+ * -+ * This file contains all return error codes. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+#ifndef __NOVFS_ERROR_H -+#define __NOVFS_ERROR_H -+ -+ -+/* -+ * Network errors -+ * Decimal values at end of line are 32768 lower than actual -+ */ -+ -+#define SHELL_ERROR 0x8800 -+#define VLM_ERROR 0x8800 -+#define ALREADY_ATTACHED 0x8800 // 0 - Attach attempted to server with valid, existing connection -+#define INVALID_CONNECTION 0x8801 // 1 - Request attempted with invalid or non-attached connection handle -+#define DRIVE_IN_USE 0x8802 // 2 - OS/2 only (NOT USED) -+#define CANT_ADD_CDS 0x8803 // 3 - Map drive attempted but unable to add new current directory structure -+#define DRIVE_CANNOT_MAP 0x8803 -+#define BAD_DRIVE_BASE 0x8804 // 4 - Map drive attempted with invalid path specification -+#define NET_READ_ERROR 0x8805 // 5 - Attempt to receive from the selected transport failed -+#define NET_RECV_ERROR 0x8805 // 5 -+#define UNKNOWN_NET_ERROR 0x8806 // 6 - Network send attempted with an un-specific network error -+#define SERVER_INVALID_SLOT 0x8807 // 7 - Server request attempted with invalid server connection slot -+#define BAD_SERVER_SLOT 0x8807 // 7 -+#define NO_SERVER_SLOTS 0x8808 // 8 - Attach attempted to server with no connection slots available -+#define NET_WRITE_ERROR 0x8809 // 9 - Attempt to send on the selected transport failed -+#define CONNECTION_IN_ERROR_STATE 0x8809 // Client-32 -+#define NET_SEND_ERROR 0x8809 // 9 -+#define SERVER_NO_ROUTE 0x880A // 10 - Attempted to find route to server where no route exists -+#define BAD_LOCAL_TARGET 0x880B // 11 - OS/2 only -+#define TOO_MANY_REQ_FRAGS 0x880C // 12 - Attempted request with too many request fragments specified -+#define CONNECT_LIST_OVERFLOW 0x880D // 13 -+#define BUFFER_OVERFLOW 0x880E // 14 - Attempt to receive more data than the reply buffer had room for -+#define MORE_DATA_ERROR 0x880E // Client-32 -+#define NO_CONN_TO_SERVER 0x880F // 15 -+#define NO_CONNECTION_TO_SERVER 0x880F // 15 - Attempt to get connection for a server not connected -+#define NO_ROUTER_FOUND 0x8810 // 16 - OS/2 only -+#define BAD_FUNC_ERROR 0x8811 // 17 -+#define INVALID_SHELL_CALL 0x8811 // 17 - Attempted function call to non- existent or illegal function -+#define SCAN_COMPLETE 0x8812 -+#define LIP_RESIZE_ERROR 0x8812 // Client-32 -+#define UNSUPPORTED_NAME_FORMAT_TYPE 0x8813 -+#define INVALID_DIR_HANDLE 0x8813 // Client-32 -+#define HANDLE_ALREADY_LICENSED 0x8814 -+#define OUT_OF_CLIENT_MEMORY 0x8814 // Client-32 -+#define HANDLE_ALREADY_UNLICENSED 0x8815 -+#define PATH_NOT_OURS 0x8815 // Client-32 -+#define INVALID_NCP_PACKET_LENGTH 0x8816 -+#define PATH_IS_PRINT_DEVICE 0x8816 // Client-32 -+#define SETTING_UP_TIMEOUT 0x8817 -+#define PATH_IS_EXCLUDED_DEVICE 0x8817 // Client-32 -+#define SETTING_SIGNALS 0x8818 -+#define PATH_IS_INVALID 0x8818 // Client-32 -+#define SERVER_CONNECTION_LOST 0x8819 -+#define NOT_SAME_DEVICE 0x8819 // Client-32 -+#define OUT_OF_HEAP_SPACE 0x881A -+#define INVALID_SERVICE_REQUEST 0x881B -+#define INVALID_SEARCH_HANDLE 0x881B // Client-32 -+#define INVALID_TASK_NUMBER 0x881C -+#define INVALID_DEVICE_HANDLE 0x881C // Client-32 -+#define INVALID_MESSAGE_LENGTH 0x881D -+#define INVALID_SEM_HANDLE 0x881D // Client-32 -+#define EA_SCAN_DONE 0x881E -+#define INVALID_CFG_HANDLE 0x881E // Client-32 -+#define BAD_CONNECTION_NUMBER 0x881F -+#define INVALID_MOD_HANDLE 0x881F // Client-32 -+#define ASYN_FIRST_PASS 0x8820 -+#define INVALID_DEVICE_INDEX 0x8821 -+#define INVALID_CONN_HANDLE 0x8822 -+#define INVALID_QUEUE_ID 0x8823 -+#define INVALID_PDEVICE_HANDLE 0x8824 -+#define INVALID_JOB_HANDLE 0x8825 -+#define INVALID_ELEMENT_ID 0x8826 -+#define ALIAS_NOT_FOUND 0x8827 -+#define RESOURCE_SUSPENDED 0x8828 -+#define INVALID_QUEUE_SPECIFIED 0x8829 -+#define DEVICE_ALREADY_OPEN 0x882A -+#define JOB_ALREADY_OPEN 0x882B -+#define QUEUE_NAME_ID_MISMATCH 0x882C -+#define JOB_ALREADY_STARTED 0x882D -+#define SPECT_DAA_TYPE_NOT_SUPPORTED 0x882E -+#define INVALID_ENVIR_HANDLE 0x882F -+#define NOT_SAME_CONNECTION 0x8830 // 48 - Internal server request attempted accross different server connections -+#define PRIMARY_CONNECTION_NOT_SET 0x8831 // 49 - Attempt to retrieve default connection with no primary connection set -+#define NO_PRIMARY_SET 0x8831 // 49 -+#define KEYWORD_NOT_FOUND 0x8832 // Client-32 -+#define PRINT_CAPTURE_NOT_IN_PROGRESS 0x8832 // Client-32 -+#define NO_CAPTURE_SET 0x8832 // 50 -+#define NO_CAPTURE_IN_PROGRESS 0x8832 // 50 - Capture information requested on port with no capture in progress -+#define BAD_BUFFER_LENGTH 0x8833 // 51 -+#define INVALID_BUFFER_LENGTH 0x8833 // 51 - Used to indicate length which caller requested on a GetDNC or SetDNC was too large -+#define NO_USER_NAME 0x8834 // 52 -+#define NO_NETWARE_PRINT_SPOOLER 0x8835 // 53 - Capture requested without having the local print spooler installed -+#define INVALID_PARAMETER 0x8836 // 54 - Attempted function with an invalid function parameter specified -+#define CONFIG_FILE_OPEN_FAILED 0x8837 // 55 - OS/2 only -+#define NO_CONFIG_FILE 0x8838 // 56 - OS/2 only -+#define CONFIG_FILE_READ_FAILED 0x8839 // 57 - OS/2 only -+#define CONFIG_LINE_TOO_LONG 0x883A // 58 - OS/2 only -+#define CONFIG_LINES_IGNORED 0x883B // 59 - OS/2 only -+#define NOT_MY_RESOURCE 0x883C // 60 - Attempted request made with a parameter using foriegn resource -+#define DAEMON_INSTALLED 0x883D // 61 - OS/2 only -+#define SPOOLER_INSTALLED 0x883E // 62 - Attempted load of print spooler with print spooler already installed -+#define CONN_TABLE_FULL 0x883F // 63 -+#define CONNECTION_TABLE_FULL 0x883F // 63 - Attempted to allocate a connection handle with no more local connection table entries -+#define CONFIG_SECTION_NOT_FOUND 0x8840 // 64 - OS/2 only -+#define BAD_TRAN_TYPE 0x8841 // 65 -+#define INVALID_TRANSPORT_TYPE 0x8841 // 65 - Attempted function on a connection with an invalid transport selected -+#define TDS_TAG_IN_USE 0x8842 // 66 - OS/2 only -+#define TDS_OUT_OF_MEMORY 0x8843 // 67 - OS/2 only -+#define TDS_INVALID_TAG 0x8844 // 68 - Attempted TDS function with invalid tag -+#define TDS_WRITE_TRUNCATED 0x8845 // 69 - Attempted TDS write with buffer that exceeded buffer -+#define NO_CONNECTION_TO_DS 0x8846 // Client-32 -+#define NO_DIRECTORY_SERVICE_CONNECTION 0x8846 // 70 -+#define SERVICE_BUSY 0x8846 // 70 - Attempted request made to partially asynchronous function in busy state -+#define NO_SERVER_ERROR 0x8847 // 71 - Attempted connect failed to find any servers responding -+#define BAD_VLM_ERROR 0x8848 // 72 - Attempted function call to non-existant or not-loaded overlay -+#define NETWORK_DRIVE_IN_USE 0x8849 // 73 - Attempted map to network drive that was already mapped -+#define LOCAL_DRIVE_IN_USE 0x884A // 74 - Attempted map to local drive that was in use -+#define NO_DRIVES_AVAILABLE 0x884B // 75 - Attempted map to next available drive when none were available -+#define DEVICE_NOT_REDIRECTED 0x884C // 76 - The device is not redirected -+#define NO_MORE_SFT_ENTRIES 0x884D // 77 - Maximum number of files was reached -+#define UNLOAD_ERROR 0x884E // 78 - Attempted unload failed -+#define IN_USE_ERROR 0x884F // 79 - Attempted re-use of already in use connection entry -+#define TOO_MANY_REP_FRAGS 0x8850 // 80 - Attempted request with too many reply fragments specified -+#define TABLE_FULL 0x8851 // 81 - Attempted to add a name into the name table after it was full -+#ifndef SOCKET_NOT_OPEN -+#define SOCKET_NOT_OPEN 0x8852 // 82 - Listen was posted on unopened socket -+#endif -+#define MEM_MGR_ERROR 0x8853 // 83 - Attempted enhanced memory operation failed -+#define SFT3_ERROR 0x8854 // 84 - An SFT3 switch occured mid-transfer -+#define PREFERRED_NOT_FOUND 0x8855 // 85 - the preferred directory server was not established but another directory server was returned -+#define DEVICE_NOT_RECOGNIZED 0x8856 // 86 - used to determine if the device is not used by VISE so pass it on to the next redirector, if any. -+#define BAD_NET_TYPE 0x8857 // 87 - the network type (Bind/NDS) does not match the server version -+#define ERROR_OPENING_FILE 0x8858 // 88 - generic open failure error, invalid path, access denied, etc.. -+#define NO_PREFERRED_SPECIFIED 0x8859 // 89 - no preferred name specified -+#define ERROR_OPENING_SOCKET 0x885A // 90 - error opening a socket -+#define REQUESTER_FAILURE 0x885A // Client-32 -+#define RESOURCE_ACCESS_DENIED 0x885B // Client-32 -+#define SIGNATURE_LEVEL_CONFLICT 0x8861 -+#define NO_LOCK_FOUND 0x8862 // OS/2 - process lock on conn handle failed, process ID not recognized -+#define LOCK_TABLE_FULL 0x8863 // OS/2 - process lock on conn handle failed, process lock table full -+#define INVALID_MATCH_DATA 0x8864 -+#define MATCH_FAILED 0x8865 -+#define NO_MORE_ENTRIES 0x8866 -+#define INSUFFICIENT_RESOURCES 0x8867 -+#define STRING_TRANSLATION 0x8868 -+#define STRING_TRANSLATION_NEEDED 0x8868 // Client-32 -+#define ACCESS_VIOLATION 0x8869 -+#define NOT_AUTHENTICATED 0x886A -+#define INVALID_LEVEL 0x886B -+#define RESOURCE_LOCK_ERROR 0x886C -+#define INVALID_NAME_FORMAT 0x886D -+#define OBJECT_EXISTS 0x886E -+#define OBJECT_NOT_FOUND 0x886F -+#define UNSUPPORTED_TRAN_TYPE 0x8870 -+#define INVALID_STRING_TYPE 0x8871 -+#define INVALID_OWNER 0x8872 -+#define UNSUPPORTED_AUTHENTICATOR 0x8873 -+#define IO_PENDING 0x8874 -+#define INVALID_DRIVE_NUM 0x8875 -+#define SHELL_FAILURE 0x88FF -+#define VLM_FAILURE 0x88FF -+ -+#define SVC_ALREADY_REGISTERED 0x8880 // Client-32 -+#define SVC_REGISTRY_FULL 0x8881 // Client-32 -+#define SVC_NOT_REGISTERED 0x8882 // Client-32 -+#define OUT_OF_RESOURCES 0x8883 // Client-32 -+#define RESOLVE_SVC_FAILED 0x8884 // Client-32 -+#define CONNECT_FAILED 0x8885 // Client-32 -+#define PROTOCOL_NOT_BOUND 0x8886 // Client-32 -+#define AUTHENTICATION_FAILED 0x8887 // Client-32 -+#define INVALID_AUTHEN_HANDLE 0x8888 // Client-32 -+#define AUTHEN_HANDLE_ALREADY_EXISTS 0x8889 // Client-32 -+ -+#define DIFF_OBJECT_ALREADY_AUTHEN 0x8890 // Client-32 -+#define REQUEST_NOT_SERVICEABLE 0x8891 // Client-32 -+#define AUTO_RECONNECT_SO_REBUILD 0x8892 // Client-32 -+#define AUTO_RECONNECT_RETRY_REQUEST 0x8893 // Client-32 -+#define ASYNC_REQUEST_IN_USE 0x8894 // Client-32 -+#define ASYNC_REQUEST_CANCELED 0x8895 // Client-32 -+#define SESS_SVC_ALREADY_REGISTERED 0x8896 // Client-32 -+#define SESS_SVC_NOT_REGISTERED 0x8897 // Client-32 -+#define PREVIOUSLY_AUTHENTICATED 0x8899 // Client-32 -+#define RESOLVE_SVC_PARTIAL 0x889A // Client-32 -+#define NO_DEFAULT_SPECIFIED 0x889B // Client-32 -+#define HOOK_REQUEST_NOT_HANDLED 0x889C // Client-32 -+#define HOOK_REQUEST_BUSY 0x889D // Client-32 -+#define HOOK_REQUEST_QUEUED 0x889D // Client-32 -+#define AUTO_RECONNECT_SO_IGNORE 0x889E // Client-32 -+#define ASYNC_REQUEST_NOT_IN_USE 0x889F // Client-32 -+#define AUTO_RECONNECT_FAILURE 0x88A0 // Client-32 -+#define NET_ERROR_ABORT_APPLICATION 0x88A1 // Client-32 -+#define NET_ERROR_SUSPEND_APPLICATION 0x88A2 // Client-32 -+#define NET_ERROR_ABORTED_PROCESS_GROUP 0x88A3 // Client-32 -+#define NET_ERROR_PASSWORD_HAS_EXPIRED 0x88A5 // Client-32 -+#define NET_ERROR_NETWORK_INACTIVE 0x88A6 // Client-32 -+#define REPLY_TRUNCATED 0x88E6 // 230 NLM -+#define UTF8_CONVERSION_FAILED 0x88F0 // NWCALLS -+ -+/* -+ * Server Errors -+ */ -+ -+#define ERR_INSUFFICIENT_SPACE 0x8901 // 001 -+#define NLM_INVALID_CONNECTION 0x890A // 010 -+#define ERR_TIMEOUT 0x8910 // 016 - nlm connection timeout -+#define ERR_NO_MORE_ENTRY 0x8914 // 020 -+#define ERR_BUFFER_TOO_SMALL 0x8977 // 119 -+#define ERR_VOLUME_FLAG_NOT_SET 0x8978 // 120 the service requested, not avail. on the selected vol. -+#define ERR_NO_ITEMS_FOUND 0x8979 // 121 -+#define ERR_CONN_ALREADY_TEMP 0x897A // 122 -+#define ERR_CONN_ALREADY_LOGGED_IN 0x897B // 123 -+#define ERR_CONN_NOT_AUTHENTICATED 0x897C // 124 -+#define ERR_CONN_NOT_LOGGED_IN 0x897D // 125 -+#define NCP_BOUNDARY_CHECK_FAILED 0x897E // 126 -+#define ERR_LOCK_WAITING 0x897F // 127 -+#define ERR_LOCK_FAIL 0x8980 // 128 -+#define FILE_IN_USE_ERROR 0x8980 // 128 -+#define NO_MORE_FILE_HANDLES 0x8981 // 129 -+#define NO_OPEN_PRIVILEGES 0x8982 // 130 -+#define IO_ERROR_NETWORK_DISK 0x8983 // 131 -+#define ERR_AUDITING_HARD_IO_ERROR 0x8983 // 131 -+#define NO_CREATE_PRIVILEGES 0x8984 // 132 -+#define ERR_AUDITING_NOT_SUPV 0x8984 // 132 -+#define NO_CREATE_DELETE_PRIVILEGES 0x8985 // 133 -+#define CREATE_FILE_EXISTS_READ_ONLY 0x8986 // 134 -+#define WILD_CARDS_IN_CREATE_FILE_NAME 0x8987 // 135 -+#define CREATE_FILENAME_ERROR 0x8987 // 135 -+#define INVALID_FILE_HANDLE 0x8988 // 136 -+#define NO_SEARCH_PRIVILEGES 0x8989 // 137 -+#define NO_DELETE_PRIVILEGES 0x898A // 138 -+#define NO_RENAME_PRIVILEGES 0x898B // 139 -+#define NO_MODIFY_PRIVILEGES 0x898C // 140 -+#define SOME_FILES_AFFECTED_IN_USE 0x898D // 141 -+#define NO_FILES_AFFECTED_IN_USE 0x898E // 142 -+#define SOME_FILES_AFFECTED_READ_ONLY 0x898F // 143 -+#define NO_FILES_AFFECTED_READ_ONLY 0x8990 // 144 -+#define SOME_FILES_RENAMED_NAME_EXISTS 0x8991 // 145 -+#define NO_FILES_RENAMED_NAME_EXISTS 0x8992 // 146 -+#define NO_READ_PRIVILEGES 0x8993 // 147 -+#define NO_WRITE_PRIVILEGES_OR_READONLY 0x8994 // 148 -+#define FILE_DETACHED 0x8995 // 149 -+#define SERVER_OUT_OF_MEMORY 0x8996 // 150 -+#define ERR_TARGET_NOT_A_SUBDIRECTORY 0x8996 // 150 can be changed later (note written by server people). -+#define NO_DISK_SPACE_FOR_SPOOL_FILE 0x8997 // 151 -+#define ERR_AUDITING_NOT_ENABLED 0x8997 // 151 -+#define VOLUME_DOES_NOT_EXIST 0x8998 // 152 -+#define DIRECTORY_FULL 0x8999 // 153 -+#define RENAMING_ACROSS_VOLUMES 0x899A // 154 -+#define BAD_DIRECTORY_HANDLE 0x899B // 155 -+#define INVALID_PATH 0x899C // 156 -+#define NO_MORE_TRUSTEES 0x899C // 156 -+#define NO_MORE_DIRECTORY_HANDLES 0x899D // 157 -+#define INVALID_FILENAME 0x899E // 158 -+#define DIRECTORY_ACTIVE 0x899F // 159 -+#define DIRECTORY_NOT_EMPTY 0x89A0 // 160 -+#define DIRECTORY_IO_ERROR 0x89A1 // 161 -+#define READ_FILE_WITH_RECORD_LOCKED 0x89A2 // 162 -+#define ERR_TRANSACTION_RESTARTED 0x89A3 // 163 -+#define ERR_RENAME_DIR_INVALID 0x89A4 // 164 -+#define ERR_INVALID_OPENCREATE_MODE 0x89A5 // 165 -+#define ERR_ALREADY_IN_USE 0x89A6 // 166 -+#define ERR_AUDITING_ACTIVE 0x89A6 // 166 -+#define ERR_INVALID_RESOURCE_TAG 0x89A7 // 167 -+#define ERR_ACCESS_DENIED 0x89A8 // 168 -+#define ERR_AUDITING_NO_RIGHTS 0x89A8 // 168 -+#define ERR_LINK_IN_PATH 0x89A9 // 169 -+#define INVALID_DATA_TYPE 0x89AA // 170 -+#define INVALID_DATA_STREAM 0x89BE // 190 -+#define INVALID_NAME_SPACE 0x89BF // 191 -+#define NO_ACCOUNTING_PRIVILEGES 0x89C0 // 192 -+#define LOGIN_DENIED_NO_ACCOUNT_BALANCE 0x89C1 // 193 -+#define LOGIN_DENIED_NO_CREDIT 0x89C2 // 194 -+#define ERR_AUDITING_RECORD_SIZE 0x89C2 // 194 -+#define ERR_TOO_MANY_HOLDS 0x89C3 // 195 -+#define ACCOUNTING_DISABLED 0x89C4 // 196 -+#define INTRUDER_DETECTION_LOCK 0x89C5 // 197 -+#define NO_CONSOLE_OPERATOR 0x89C6 // 198 -+#define NO_CONSOLE_PRIVILEGES 0x89C6 // 198 -+#define ERR_Q_IO_FAILURE 0x89D0 // 208 -+#define ERR_NO_QUEUE 0x89D1 // 209 -+#define ERR_NO_Q_SERVER 0x89D2 // 210 -+#define ERR_NO_Q_RIGHTS 0x89D3 // 211 -+#define ERR_Q_FULL 0x89D4 // 212 -+#define ERR_NO_Q_JOB 0x89D5 // 213 -+#define ERR_NO_Q_JOB_RIGHTS 0x89D6 // 214 -+#define ERR_Q_IN_SERVICE 0x89D7 // 215 -+#define PASSWORD_NOT_UNIQUE 0x89D7 // 215 -+#define ERR_Q_NOT_ACTIVE 0x89D8 // 216 -+#define PASSWORD_TOO_SHORT 0x89D8 // 216 -+#define ERR_Q_STN_NOT_SERVER 0x89D9 // 217 -+#define LOGIN_DENIED_NO_CONNECTION 0x89D9 // 217 -+#define ERR_MAXIMUM_LOGINS_EXCEEDED 0x89D9 // 217 -+#define ERR_Q_HALTED 0x89DA // 218 -+#define UNAUTHORIZED_LOGIN_TIME 0x89DA // 218 -+#define UNAUTHORIZED_LOGIN_STATION 0x89DB // 219 -+#define ERR_Q_MAX_SERVERS 0x89DB // 219 -+#define ACCOUNT_DISABLED 0x89DC // 220 -+#define PASSWORD_HAS_EXPIRED_NO_GRACE 0x89DE // 222 -+#define PASSWORD_HAS_EXPIRED 0x89DF // 223 -+#define E_NO_MORE_USERS 0x89E7 // 231 -+#define NOT_ITEM_PROPERTY 0x89E8 // 232 -+#define WRITE_PROPERTY_TO_GROUP 0x89E8 // 232 -+#define MEMBER_ALREADY_EXISTS 0x89E9 // 233 -+#define NO_SUCH_MEMBER 0x89EA // 234 -+#define NOT_GROUP_PROPERTY 0x89EB // 235 -+#define NO_SUCH_SEGMENT 0x89EC // 236 -+#define PROPERTY_ALREADY_EXISTS 0x89ED // 237 -+#define OBJECT_ALREADY_EXISTS 0x89EE // 238 -+#define INVALID_NAME 0x89EF // 239 -+#define WILD_CARD_NOT_ALLOWED 0x89F0 // 240 -+#define INVALID_BINDERY_SECURITY 0x89F1 // 241 -+#define NO_OBJECT_READ_PRIVILEGE 0x89F2 // 242 -+#define NO_OBJECT_RENAME_PRIVILEGE 0x89F3 // 243 -+#define NO_OBJECT_DELETE_PRIVILEGE 0x89F4 // 244 -+#define NO_OBJECT_CREATE_PRIVILEGE 0x89F5 // 245 -+#define NO_PROPERTY_DELETE_PRIVILEGE 0x89F6 // 246 -+#define NO_PROPERTY_CREATE_PRIVILEGE 0x89F7 // 247 -+#define NO_PROPERTY_WRITE_PRIVILEGE 0x89F8 // 248 -+#define NO_FREE_CONNECTION_SLOTS 0x89F9 // 249 -+#define NO_PROPERTY_READ_PRIVILEGE 0x89F9 // 249 -+#define NO_MORE_SERVER_SLOTS 0x89FA // 250 -+#define TEMP_REMAP_ERROR 0x89FA // 250 -+#define INVALID_PARAMETERS 0x89FB // 251 -+#define NO_SUCH_PROPERTY 0x89FB // 251 -+#define ERR_NCP_NOT_SUPPORTED 0x89FB // 251 -+#define INTERNET_PACKET_REQT_CANCELED 0x89FC // 252 -+#define UNKNOWN_FILE_SERVER 0x89FC // 252 -+#define MESSAGE_QUEUE_FULL 0x89FC // 252 -+#define NO_SUCH_OBJECT 0x89FC // 252 -+#define LOCK_COLLISION 0x89FD // 253 -+#define BAD_STATION_NUMBER 0x89FD // 253 -+#define INVALID_PACKET_LENGTH 0x89FD // 253 -+#define UNKNOWN_REQUEST 0x89FD // 253 -+#define BINDERY_LOCKED 0x89FE // 254 -+#define TRUSTEE_NOT_FOUND 0x89FE // 254 -+#define DIRECTORY_LOCKED 0x89FE // 254 -+#define INVALID_SEMAPHORE_NAME_LENGTH 0x89FE // 254 -+#define PACKET_NOT_DELIVERABLE 0x89FE // 254 -+#define SERVER_BINDERY_LOCKED 0x89FE // 254 -+#define SOCKET_TABLE_FULL 0x89FE // 254 -+#define SPOOL_DIRECTORY_ERROR 0x89FE // 254 -+#define SUPERVISOR_HAS_DISABLED_LOGIN 0x89FE // 254 -+#define TIMEOUT_FAILURE 0x89FE // 254 -+#define BAD_PRINTER_ERROR 0x89FF // 255 -+#define BAD_RECORD_OFFSET 0x89FF // 255 -+#define CLOSE_FCB_ERROR 0x89FF // 255 -+#define FILE_EXTENSION_ERROR 0x89FF // 255 -+#define FILE_NAME_ERROR 0x89FF // 255 -+#define HARDWARE_FAILURE 0x89FF // 255 -+#define INVALID_DRIVE_NUMBER 0x89FF // 255 -+#define DOS_INVALID_DRIVE 0x000F // 255 -+#define INVALID_INITIAL_SEMAPHORE_VALUE 0x89FF // 255 -+#define INVALID_SEMAPHORE_HANDLE 0x89FF // 255 -+#define IO_BOUND_ERROR 0x89FF // 255 -+#define NO_FILES_FOUND_ERROR 0x89FF // 255 -+#define NO_RESPONSE_FROM_SERVER 0x89FF // 255 -+#define NO_SUCH_OBJECT_OR_BAD_PASSWORD 0x89FF // 255 -+#define PATH_NOT_LOCATABLE 0x89FF // 255 -+#define QUEUE_FULL_ERROR 0x89FF // 255 -+#define REQUEST_NOT_OUTSTANDING 0x89FF // 255 -+#ifndef SOCKET_ALREADY_OPEN -+#define SOCKET_ALREADY_OPEN 0x89FF // 255 -+#endif -+#define LOCK_ERROR 0x89FF // 255 -+#ifndef FAILURE -+#define FAILURE 0x89FF // 255 Generic Failure -+#endif -+ -+#if 0 -+#define NOT_SAME_LOCAL_DRIVE 0x89F6 -+#define TARGET_DRIVE_NOT_LOCAL 0x89F7 -+#define ALREADY_ATTACHED_TO_SERVER 0x89F8 // 248 -+#define NOT_ATTACHED_TO_SERVER 0x89F8 -+#endif -+ -+/* -+ * Network errors -+ * Decimal values at end of line are 32768 lower than actual -+ */ -+#define NWE_ALREADY_ATTACHED 0x8800 // 0 - Attach attempted to server with valid, existing connection -+#define NWE_CONN_INVALID 0x8801 // 1 - Request attempted with invalid or non-attached connection handle -+#define NWE_DRIVE_IN_USE 0x8802 // 2 - OS/2 only (NOT USED) -+#define NWE_DRIVE_CANNOT_MAP 0x8803 // 3 - Map drive attempted but unable to add new current directory structure -+#define NWE_DRIVE_BAD_PATH 0x8804 // 4 - Map drive attempted with invalid path specification -+#define NWE_NET_RECEIVE 0x8805 // 5 - Attempt to receive from the selected transport failed -+#define NWE_NET_UNKNOWN 0x8806 // 6 - Network send attempted with an un-specific network error -+#define NWE_SERVER_BAD_SLOT 0x8807 // 7 - Server request attempted with invalid server connection slot -+#define NWE_SERVER_NO_SLOTS 0x8808 // 8 - Attach attempted to server with no connection slots available -+#define NWE_NET_SEND 0x8809 // 9 - Attempt to send on the selected transport failed -+#define NWE_SERVER_NO_ROUTE 0x880A // 10 - Attempted to find route to server where no route exists -+#define NWE_BAD_LOCAL_TARGET 0x880B // 11 - OS/2 only -+#define NWE_REQ_TOO_MANY_REQ_FRAGS 0x880C // 12 - Attempted request with too many request fragments specified -+#define NWE_CONN_LIST_OVERFLOW 0x880D // 13 -+#define NWE_BUFFER_OVERFLOW 0x880E // 14 - Attempt to receive more data than the reply buffer had room for -+#define NWE_SERVER_NO_CONN 0x880F // 15 - Attempt to get connection for a server not connected -+#define NWE_NO_ROUTER_FOUND 0x8810 // 16 - OS/2 only -+#define NWE_FUNCTION_INVALID 0x8811 // 17 - Attempted function call to non- existent or illegal function -+#define NWE_SCAN_COMPLETE 0x8812 -+#define NWE_UNSUPPORTED_NAME_FORMAT_TYP 0x8813 -+#define NWE_HANDLE_ALREADY_LICENSED 0x8814 -+#define NWE_HANDLE_ALREADY_UNLICENSED 0x8815 -+#define NWE_INVALID_NCP_PACKET_LENGTH 0x8816 -+#define NWE_SETTING_UP_TIMEOUT 0x8817 -+#define NWE_SETTING_SIGNALS 0x8818 -+#define NWE_SERVER_CONNECTION_LOST 0x8819 -+#define NWE_OUT_OF_HEAP_SPACE 0x881A -+#define NWE_INVALID_SERVICE_REQUEST 0x881B -+#define NWE_INVALID_TASK_NUMBER 0x881C -+#define NWE_INVALID_MESSAGE_LENGTH 0x881D -+#define NWE_EA_SCAN_DONE 0x881E -+#define NWE_BAD_CONNECTION_NUMBER 0x881F -+#define NWE_MULT_TREES_NOT_SUPPORTED 0x8820 // 32 - Attempt to open a connection to a DS tree other than the default tree -+#define NWE_CONN_NOT_SAME 0x8830 // 48 - Internal server request attempted across different server connections -+#define NWE_CONN_PRIMARY_NOT_SET 0x8831 // 49 - Attempt to retrieve default connection with no primary connection set -+#define NWE_PRN_CAPTURE_NOT_IN_PROGRESS 0x8832 // 50 - Capture information requested on port with no capture in progress -+#define NWE_BUFFER_INVALID_LEN 0x8833 // 51 - Used to indicate length which caller requested on a GetDNC or SetDNC was too large -+#define NWE_USER_NO_NAME 0x8834 // 52 -+#define NWE_PRN_NO_LOCAL_SPOOLER 0x8835 // 53 - Capture requested without having the local print spooler installed -+#define NWE_PARAM_INVALID 0x8836 // 54 - Attempted function with an invalid function parameter specified -+#define NWE_CFG_OPEN_FAILED 0x8837 // 55 - OS/2 only -+#define NWE_CFG_NO_FILE 0x8838 // 56 - OS/2 only -+#define NWE_CFG_READ_FAILED 0x8839 // 57 - OS/2 only -+#define NWE_CFG_LINE_TOO_LONG 0x883A // 58 - OS/2 only -+#define NWE_CFG_LINES_IGNORED 0x883B // 59 - OS/2 only -+#define NWE_RESOURCE_NOT_OWNED 0x883C // 60 - Attempted request made with a parameter using foriegn resource -+#define NWE_DAEMON_INSTALLED 0x883D // 61 - OS/2 only -+#define NWE_PRN_SPOOLER_INSTALLED 0x883E // 62 - Attempted load of print spooler with print spooler already installed -+#define NWE_CONN_TABLE_FULL 0x883F // 63 - Attempted to allocate a connection handle with no more local connection table entries -+#define NWE_CFG_SECTION_NOT_FOUND 0x8840 // 64 - OS/2 only -+#define NWE_TRAN_INVALID_TYPE 0x8841 // 65 - Attempted function on a connection with an invalid transport selected -+#define NWE_TDS_TAG_IN_USE 0x8842 // 66 - OS/2 only -+#define NWE_TDS_OUT_OF_MEMORY 0x8843 // 67 - OS/2 only -+#define NWE_TDS_INVALID_TAG 0x8844 // 68 - Attempted TDS function with invalid tag -+#define NWE_TDS_WRITE_TRUNCATED 0x8845 // 69 - Attempted TDS write with buffer that exceeded buffer -+#define NWE_DS_NO_CONN 0x8846 // 70 -+#define NWE_SERVICE_BUSY 0x8846 // 70 - Attempted request made to partially asynchronous function in busy state -+#define NWE_SERVER_NOT_FOUND 0x8847 // 71 - Attempted connect failed to find any servers responding -+#define NWE_VLM_INVALID 0x8848 // 72 - Attempted function call to non-existant or not-loaded overlay -+#define NWE_DRIVE_ALREADY_MAPPED 0x8849 // 73 - Attempted map to network drive that was already mapped -+#define NWE_DRIVE_LOCAL_IN_USE 0x884A // 74 - Attempted map to local drive that was in use -+#define NWE_DRIVE_NONE_AVAILABLE 0x884B // 75 - Attempted map to next available drive when none were available -+#define NWE_DEVICE_NOT_REDIRECTED 0x884C // 76 - The device is not redirected -+#define NWE_FILE_MAX_REACHED 0x884D // 77 - Maximum number of files was reached -+#define NWE_UNLOAD_FAILED 0x884E // 78 - Attempted unload failed -+#define NWE_CONN_IN_USE 0x884F // 79 - Attempted re-use of already in use connection entry -+#define NWE_REQ_TOO_MANY_REP_FRAGS 0x8850 // 80 - Attempted request with too many reply fragments specified -+#define NWE_NAME_TABLE_FULL 0x8851 // 81 - Attempted to add a name into the name table after it was full -+#define NWE_SOCKET_NOT_OPEN 0x8852 // 82 - Listen was posted on unopened socket -+#define NWE_MEMORY_MGR_ERROR 0x8853 // 83 - Attempted enhanced memory operation failed -+#define NWE_SFT3_ERROR 0x8854 // 84 - An SFT3 switch occured mid-transfer -+#define NWE_DS_PREFERRED_NOT_FOUND 0x8855 // 85 - the preferred directory server was not established but another directory server was returned -+#define NWE_DEVICE_NOT_RECOGNIZED 0x8856 // 86 - used to determine if the device is not used by VISE so pass it on to the next redirector, if any. -+#define NWE_NET_INVALID_TYPE 0x8857 // 87 - the network type (Bind/NDS) does not match the server version -+#define NWE_FILE_OPEN_FAILED 0x8858 // 88 - generic open failure error, invalid path, access denied, etc.. -+#define NWE_DS_PREFERRED_NOT_SPECIFIED 0x8859 // 89 - no preferred name specified -+#define NWE_SOCKET_OPEN_FAILED 0x885A // 90 - error opening a socket -+#define NWE_SIGNATURE_LEVEL_CONFLICT 0x8861 -+#define NWE_NO_LOCK_FOUND 0x8862 // OS/2 - process lock on conn handle failed, process ID not recognized -+#define NWE_LOCK_TABLE_FULL 0x8863 // OS/2 - process lock on conn handle failed, process lock table full -+#define NWE_INVALID_MATCH_DATA 0x8864 -+#define NWE_MATCH_FAILED 0x8865 -+#define NWE_NO_MORE_ENTRIES 0x8866 -+#define NWE_INSUFFICIENT_RESOURCES 0x8867 -+#define NWE_STRING_TRANSLATION 0x8868 -+#define NWE_ACCESS_VIOLATION 0x8869 -+#define NWE_NOT_AUTHENTICATED 0x886A -+#define NWE_INVALID_LEVEL 0x886B -+#define NWE_RESOURCE_LOCK 0x886C -+#define NWE_INVALID_NAME_FORMAT 0x886D -+#define NWE_OBJECT_EXISTS 0x886E -+#define NWE_OBJECT_NOT_FOUND 0x886F -+#define NWE_UNSUPPORTED_TRAN_TYPE 0x8870 -+#define NWE_INVALID_STRING_TYPE 0x8871 -+#define NWE_INVALID_OWNER 0x8872 -+#define NWE_UNSUPPORTED_AUTHENTICATOR 0x8873 -+#define NWE_IO_PENDING 0x8874 -+#define NWE_INVALID_DRIVE_NUMBER 0x8875 -+#define NWE_REPLY_TRUNCATED 0x88e6 // 230 NLM -+#define NWE_REQUESTER_FAILURE 0x88FF -+ -+/* -+ * Server Errors -+ */ -+#define NWE_INSUFFICIENT_SPACE 0x8901 // 001 -+#define NWE_INVALID_CONNECTION 0x890a // 010 - nlm invalid connection -+#define NWE_TIMEOUT 0x8910 // 016 - nlm connection timeout -+#define NWE_NO_MORE_ENTRY 0x8914 // 020 -+#define NWE_BUFFER_TOO_SMALL 0x8977 // 119 -+#define NWE_VOL_FLAG_NOT_SET 0x8978 // 120 the service requested, not avail. on the selected vol. -+#define NWE_NO_ITEMS_FOUND 0x8979 // 121 -+#define NWE_CONN_ALREADY_TEMP 0x897a // 122 -+#define NWE_CONN_ALREADY_LOGGED_IN 0x897b // 123 -+#define NWE_CONN_NOT_AUTHENTICATED 0x897c // 124 -+#define NWE_CONN_NOT_LOGGED_IN 0x897d // 125 -+#define NWE_NCP_BOUNDARY_CHECK_FAILED 0x897e // 126 -+#define NWE_LOCK_WAITING 0x897f // 127 -+#define NWE_LOCK_FAIL 0x8980 // 128 -+#define NWE_FILE_IN_USE 0x8980 // 128 -+#define NWE_FILE_NO_HANDLES 0x8981 // 129 -+#define NWE_FILE_NO_OPEN_PRIV 0x8982 // 130 -+#define NWE_DISK_IO_ERROR 0x8983 // 131 -+#define NWE_AUDITING_HARD_IO_ERROR 0x8983 // 131 -+#define NWE_FILE_NO_CREATE_PRIV 0x8984 // 132 -+#define NWE_AUDITING_NOT_SUPV 0x8984 // 132 -+#define NWE_FILE_NO_CREATE_DEL_PRIV 0x8985 // 133 -+#define NWE_FILE_EXISTS_READ_ONLY 0x8986 // 134 -+#define NWE_FILE_WILD_CARDS_IN_NAME 0x8987 // 135 -+#define NWE_FILE_INVALID_HANDLE 0x8988 // 136 -+#define NWE_FILE_NO_SRCH_PRIV 0x8989 // 137 -+#define NWE_FILE_NO_DEL_PRIV 0x898A // 138 -+#define NWE_FILE_NO_RENAME_PRIV 0x898B // 139 -+#define NWE_FILE_NO_MOD_PRIV 0x898C // 140 -+#define NWE_FILE_SOME_IN_USE 0x898D // 141 -+#define NWE_FILE_NONE_IN_USE 0x898E // 142 -+#define NWE_FILE_SOME_READ_ONLY 0x898F // 143 -+#define NWE_FILE_NONE_READ_ONLY 0x8990 // 144 -+#define NWE_FILE_SOME_RENAMED_EXIST 0x8991 // 145 -+#define NWE_FILE_NONE_RENAMED_EXIST 0x8992 // 146 -+#define NWE_FILE_NO_READ_PRIV 0x8993 // 147 -+#define NWE_FILE_NO_WRITE_PRIV 0x8994 // 148 -+#define NWE_FILE_READ_ONLY 0x8994 // 148 -+#define NWE_FILE_DETACHED 0x8995 // 149 -+#define NWE_SERVER_OUT_OF_MEMORY 0x8996 // 150 -+#define NWE_DIR_TARGET_INVALID 0x8996 // 150 -+#define NWE_DISK_NO_SPOOL_SPACE 0x8997 // 151 -+#define NWE_AUDITING_NOT_ENABLED 0x8997 // 151 -+#define NWE_VOL_INVALID 0x8998 // 152 -+#define NWE_DIR_FULL 0x8999 // 153 -+#define NWE_VOL_RENAMING_ACROSS 0x899A // 154 -+#define NWE_DIRHANDLE_INVALID 0x899B // 155 -+#define NWE_PATH_INVALID 0x899C // 156 -+#define NWE_TRUSTEES_NO_MORE 0x899C // 156 -+#define NWE_DIRHANDLE_NO_MORE 0x899D // 157 -+#define NWE_FILE_NAME_INVALID 0x899E // 158 -+#define NWE_DIR_ACTIVE 0x899F // 159 -+#define NWE_DIR_NOT_EMPTY 0x89A0 // 160 -+#define NWE_DIR_IO_ERROR 0x89A1 // 161 -+#define NWE_FILE_IO_LOCKED 0x89A2 // 162 -+#define NWE_TTS_RANSACTION_RESTARTED 0x89A3 // 163 -+#define NWE_TTS_TRANSACTION_RESTARTED 0x89A3 // 163 -+#define NWE_DIR_RENAME_INVALID 0x89A4 // 164 -+#define NWE_FILE_OPENCREAT_MODE_INVALID 0x89A5 // 165 -+#define NWE_ALREADY_IN_USE 0x89A6 // 166 -+#define NWE_AUDITING_ACTIVE 0x89A6 // 166 -+#define NWE_RESOURCE_TAG_INVALID 0x89A7 // 167 -+#define NWE_ACCESS_DENIED 0x89A8 // 168 -+#define NWE_AUDITING_NO_RIGHTS 0x89A8 // 168 -+#define NWE_LINK_IN_PATH 0x89A9 // 169 -+#define NWE_INVALID_DATA_TYPE_FLAG 0x89AA // 170 (legacy vol with UTF8) -+#define NWE_DATA_STREAM_INVALID 0x89BE // 190 -+#define NWE_NAME_SPACE_INVALID 0x89BF // 191 -+#define NWE_ACCTING_NO_PRIV 0x89C0 // 192 -+#define NWE_ACCTING_NO_BALANCE 0x89C1 // 193 -+#define NWE_ACCTING_NO_CREDIT 0x89C2 // 194 -+#define NWE_AUDITING_RECORD_SIZE 0x89C2 // 194 -+#define NWE_ACCTING_TOO_MANY_HOLDS 0x89C3 // 195 -+#define NWE_ACCTING_DISABLED 0x89C4 // 196 -+#define NWE_LOGIN_LOCKOUT 0x89C5 // 197 -+#define NWE_CONSOLE_NO_PRIV 0x89C6 // 198 -+#define NWE_Q_IO_FAILURE 0x89D0 // 208 -+#define NWE_Q_NONE 0x89D1 // 209 -+#define NWE_Q_NO_SERVER 0x89D2 // 210 -+#define NWE_Q_NO_RIGHTS 0x89D3 // 211 -+#define NWE_Q_FULL 0x89D4 // 212 -+#define NWE_Q_NO_JOB 0x89D5 // 213 -+#define NWE_Q_NO_JOB_RIGHTS 0x89D6 // 214 -+#define NWE_PASSWORD_UNENCRYPTED 0x89D6 // 214 -+#define NWE_Q_IN_SERVICE 0x89D7 // 215 -+#define NWE_PASSWORD_NOT_UNIQUE 0x89D7 // 215 -+#define NWE_Q_NOT_ACTIVE 0x89D8 // 216 -+#define NWE_PASSWORD_TOO_SHORT 0x89D8 // 216 -+#define NWE_Q_STN_NOT_SERVER 0x89D9 // 217 -+#define NWE_LOGIN_NO_CONN 0x89D9 // 217 -+#define NWE_LOGIN_MAX_EXCEEDED 0x89D9 // 217 -+#define NWE_Q_HALTED 0x89DA // 218 -+#define NWE_LOGIN_UNAUTHORIZED_TIME 0x89DA // 218 -+#define NWE_LOGIN_UNAUTHORIZED_STATION 0x89DB // 219 -+#define NWE_Q_MAX_SERVERS 0x89DB // 219 -+#define NWE_ACCT_DISABLED 0x89DC // 220 -+#define NWE_PASSWORD_INVALID 0x89DE // 222 -+#define NWE_PASSWORD_EXPIRED 0x89DF // 223 -+#define NWE_LOGIN_NO_CONN_AVAIL 0x89E0 // 224 -+#define NWE_E_NO_MORE_USERS 0x89E7 // 231 -+#define NWE_BIND_NOT_ITEM_PROP 0x89E8 // 232 -+#define NWE_BIND_WRITE_TO_GROUP_PROP 0x89E8 // 232 -+#define NWE_BIND_MEMBER_ALREADY_EXISTS 0x89E9 // 233 -+#define NWE_BIND_NO_SUCH_MEMBER 0x89EA // 234 -+#define NWE_BIND_NOT_GROUP_PROP 0x89EB // 235 -+#define NWE_BIND_NO_SUCH_SEGMENT 0x89EC // 236 -+#define NWE_BIND_PROP_ALREADY_EXISTS 0x89ED // 237 -+#define NWE_BIND_OBJ_ALREADY_EXISTS 0x89EE // 238 -+#define NWE_BIND_NAME_INVALID 0x89EF // 239 -+#define NWE_BIND_WILDCARD_INVALID 0x89F0 // 240 -+#define NWE_BIND_SECURITY_INVALID 0x89F1 // 241 -+#define NWE_BIND_OBJ_NO_READ_PRIV 0x89F2 // 242 -+#define NWE_BIND_OBJ_NO_RENAME_PRIV 0x89F3 // 243 -+#define NWE_BIND_OBJ_NO_DELETE_PRIV 0x89F4 // 244 -+#define NWE_BIND_OBJ_NO_CREATE_PRIV 0x89F5 // 245 -+#define NWE_BIND_PROP_NO_DELETE_PRIV 0x89F6 // 246 -+#define NWE_BIND_PROP_NO_CREATE_PRIV 0x89F7 // 247 -+#define NWE_BIND_PROP_NO_WRITE_PRIV 0x89F8 // 248 -+#define NWE_BIND_PROP_NO_READ_PRIV 0x89F9 // 249 -+#define NWE_NO_FREE_CONN_SLOTS 0x89F9 // 249 -+#define NWE_NO_MORE_SERVER_SLOTS 0x89FA // 250 -+#define NWE_TEMP_REMAP_ERROR 0x89FA // 250 -+#define NWE_PARAMETERS_INVALID 0x89FB // 251 -+#define NWE_BIND_NO_SUCH_PROP 0x89FB // 251 -+#define NWE_NCP_NOT_SUPPORTED 0x89FB // 251 -+#define NWE_INET_PACKET_REQ_CANCELED 0x89FC // 252 -+#define NWE_SERVER_UNKNOWN 0x89FC // 252 -+#define NWE_MSG_Q_FULL 0x89FC // 252 -+#define NWE_BIND_NO_SUCH_OBJ 0x89FC // 252 -+#define NWE_LOCK_COLLISION 0x89FD // 253 -+#define NWE_CONN_NUM_INVALID 0x89FD // 253 -+#define NWE_PACKET_LEN_INVALID 0x89FD // 253 -+#define NWE_UNKNOWN_REQ 0x89FD // 253 -+#define NWE_BIND_LOCKED 0x89FE // 254 -+#define NWE_TRUSTEE_NOT_FOUND 0x89FE // 254 -+#define NWE_DIR_LOCKED 0x89FE // 254 -+#define NWE_SEM_INVALID_NAME_LEN 0x89FE // 254 -+#define NWE_PACKET_NOT_DELIVERABLE 0x89FE // 254 -+#define NWE_SOCKET_TABLE_FULL 0x89FE // 254 -+#define NWE_SPOOL_DIR_ERROR 0x89FE // 254 -+#define NWE_LOGIN_DISABLED_BY_SUPER 0x89FE // 254 -+#define NWE_TIMEOUT_FAILURE 0x89FE // 254 -+#define NWE_FILE_EXT 0x89FF // 255 -+#define NWE_FILE_NAME 0x89FF // 255 -+#define NWE_HARD_FAILURE 0x89FF // 255 -+#define NWE_FCB_CLOSE 0x89FF // 255 -+#define NWE_IO_BOUND 0x89FF // 255 -+#define NWE_BAD_SPOOL_PRINTER 0x89FF // 255 -+#define NWE_BAD_RECORD_OFFSET 0x89FF // 255 -+#define NWE_DRIVE_INVALID_NUM 0x89FF // 255 -+#define NWE_SEM_INVALID_INIT_VAL 0x89FF // 255 -+#define NWE_SEM_INVALID_HANDLE 0x89FF // 255 -+#define NWE_NO_FILES_FOUND_ERROR 0x89FF // 255 -+#define NWE_NO_RESPONSE_FROM_SERVER 0x89FF // 255 -+#define NWE_NO_OBJ_OR_BAD_PASSWORD 0x89FF // 255 -+#define NWE_PATH_NOT_LOCATABLE 0x89FF // 255 -+#define NWE_Q_FULL_ERROR 0x89FF // 255 -+#define NWE_REQ_NOT_OUTSTANDING 0x89FF // 255 -+#define NWE_SOCKET_ALREADY_OPEN 0x89FF // 255 -+#define NWE_LOCK_ERROR 0x89FF // 255 -+#define NWE_FAILURE 0x89FF // 255 Generic Failure -+ -+#endif /* __NOVFS_ERROR_H */ ---- /dev/null -+++ b/fs/novfs/proc.c -@@ -0,0 +1,149 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner -+ * -+ * This module contains functions that create the interface to the proc -+ * filesystem. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "vfs.h" -+ -+struct proc_dir_entry *novfs_procfs_dir; -+struct proc_dir_entry *Novfs_Control; -+struct proc_dir_entry *Novfs_Library; -+struct proc_dir_entry *Novfs_Version; -+ -+static struct file_operations novfs_daemon_proc_fops; -+static struct file_operations novfs_lib_proc_fops; -+ -+/*===[ Code ]=============================================================*/ -+ -+static int Novfs_Get_Version(char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ char *buf, tbuf[48]; -+ int len = 0, i; -+ -+ if (!off) { -+ buf = page + off; -+ *start = buf; -+ len = sprintf(buf, "Novfs Version=%s\n", NOVFS_VERSION_STRING); -+ i = novfs_daemon_getversion(tbuf, sizeof(tbuf)); -+ if ((i > 0) && i < (count - len)) { -+ len += sprintf(buf + len, "Novfsd Version=%s\n", tbuf); -+ } -+ -+ if (novfs_current_mnt) { -+ i = strlen(novfs_current_mnt); -+ if ((i > 0) && i < (count - len)) { -+ len += -+ sprintf(buf + len, "Novfs mount=%s\n", -+ novfs_current_mnt); -+ } -+ } -+ DbgPrint("%s", buf); -+ } -+ *eof = 1; -+ return (len); -+} -+ -+int novfs_proc_init(void) -+{ -+ int retCode = 0; -+ -+ novfs_procfs_dir = proc_mkdir(MODULE_NAME, NULL); -+ if (novfs_procfs_dir) { -+ -+ Novfs_Control = create_proc_entry("Control", 0600, novfs_procfs_dir); -+ -+ if (Novfs_Control) { -+ Novfs_Control->size = 0; -+ memcpy(&novfs_daemon_proc_fops, -+ Novfs_Control->proc_fops, -+ sizeof(struct file_operations)); -+ -+ /* -+ * Setup our functions -+ */ -+ novfs_daemon_proc_fops.owner = THIS_MODULE; -+ novfs_daemon_proc_fops.open = novfs_daemon_open_control; -+ novfs_daemon_proc_fops.release = novfs_daemon_close_control; -+ novfs_daemon_proc_fops.read = novfs_daemon_cmd_send; -+ novfs_daemon_proc_fops.write = novfs_daemon_recv_reply; -+ novfs_daemon_proc_fops.ioctl = novfs_daemon_ioctl; -+ -+ Novfs_Control->proc_fops = &novfs_daemon_proc_fops; -+ } else { -+ remove_proc_entry(MODULE_NAME, NULL); -+ return (-ENOENT); -+ } -+ -+ Novfs_Library = create_proc_entry("Library", 0666, novfs_procfs_dir); -+ if (Novfs_Library) { -+ Novfs_Library->size = 0; -+ -+ /* -+ * Setup our file functions -+ */ -+ memcpy(&novfs_lib_proc_fops, Novfs_Library->proc_fops, -+ sizeof(struct file_operations)); -+ novfs_lib_proc_fops.owner = THIS_MODULE; -+ novfs_lib_proc_fops.open = novfs_daemon_lib_open; -+ novfs_lib_proc_fops.release = novfs_daemon_lib_close; -+ novfs_lib_proc_fops.read = novfs_daemon_lib_read; -+ novfs_lib_proc_fops.write = novfs_daemon_lib_write; -+ novfs_lib_proc_fops.llseek = novfs_daemon_lib_llseek; -+ novfs_lib_proc_fops.ioctl = novfs_daemon_lib_ioctl; -+ Novfs_Library->proc_fops = &novfs_lib_proc_fops; -+ } else { -+ remove_proc_entry("Control", novfs_procfs_dir); -+ remove_proc_entry(MODULE_NAME, NULL); -+ return (-ENOENT); -+ } -+ -+ Novfs_Version = -+ create_proc_read_entry("Version", 0444, novfs_procfs_dir, -+ Novfs_Get_Version, NULL); -+ if (Novfs_Version) { -+ Novfs_Version->size = 0; -+ } else { -+ remove_proc_entry("Library", novfs_procfs_dir); -+ remove_proc_entry("Control", novfs_procfs_dir); -+ remove_proc_entry(MODULE_NAME, NULL); -+ retCode = -ENOENT; -+ } -+ } else { -+ retCode = -ENOENT; -+ } -+ return (retCode); -+} -+ -+void novfs_proc_exit(void) -+{ -+ -+ DbgPrint("remove_proc_entry(Version, NULL)\n"); -+ remove_proc_entry("Version", novfs_procfs_dir); -+ -+ DbgPrint("remove_proc_entry(Control, NULL)\n"); -+ remove_proc_entry("Control", novfs_procfs_dir); -+ -+ DbgPrint("remove_proc_entry(Library, NULL)\n"); -+ remove_proc_entry("Library", novfs_procfs_dir); -+ -+ DbgPrint("remove_proc_entry(%s, NULL)\n", -+ MODULE_NAME); -+ remove_proc_entry(MODULE_NAME, NULL); -+ -+ DbgPrint("done\n"); -+} ---- /dev/null -+++ b/fs/novfs/profile.c -@@ -0,0 +1,704 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner -+ * -+ * This file contains a debugging code for the novfs VFS. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "vfs.h" -+ -+/*===[ Manifest constants ]===============================================*/ -+#define DBGBUFFERSIZE (1024*1024*32) -+ -+/*===[ Type definitions ]=================================================*/ -+struct local_rtc_time { -+ int tm_sec; -+ int tm_min; -+ int tm_hour; -+ int tm_mday; -+ int tm_mon; -+ int tm_year; -+ int tm_wday; -+ int tm_yday; -+ int tm_isdst; -+}; -+ -+char *DbgPrintBuffer = NULL; -+char DbgPrintOn = 0; -+char DbgSyslogOn = 0; -+char DbgProfileOn = 0; -+ -+static unsigned long DbgPrintBufferOffset = 0; -+static unsigned long DbgPrintBufferReadOffset = 0; -+static unsigned long DbgPrintBufferSize = DBGBUFFERSIZE; -+ -+static struct file_operations Dbg_proc_file_operations; -+static struct file_operations dentry_proc_file_ops; -+static struct file_operations inode_proc_file_ops; -+ -+static struct proc_dir_entry *dbg_dir = NULL; -+static struct proc_dir_entry *dbg_file = NULL; -+static struct proc_dir_entry *dentry_file = NULL; -+static struct proc_dir_entry *inode_file = NULL; -+ -+static DECLARE_MUTEX(LocalPrint_lock); -+ -+static ssize_t User_proc_write_DbgBuffer(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) -+{ -+ ssize_t retval = nbytes; -+ u_char *lbuf, *p; -+ int i; -+ u_long cpylen; -+ -+ lbuf = kmalloc(nbytes + 1, GFP_KERNEL); -+ if (lbuf) { -+ cpylen = copy_from_user(lbuf, buf, nbytes); -+ -+ lbuf[nbytes] = 0; -+ DbgPrint("%s", lbuf); -+ -+ for (i = 0; lbuf[i] && lbuf[i] != '\n'; i++) ; -+ -+ if ('\n' == lbuf[i]) { -+ lbuf[i] = '\0'; -+ } -+ -+ if (!strcmp("on", lbuf)) { -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -+ DbgPrintOn = 1; -+ } else if (!strcmp("off", lbuf)) { -+ DbgPrintOn = 0; -+ } else if (!strcmp("reset", lbuf)) { -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -+ } else if (NULL != (p = strchr(lbuf, ' '))) { -+ *p++ = '\0'; -+ if (!strcmp("syslog", lbuf)) { -+ -+ if (!strcmp("on", p)) { -+ DbgSyslogOn = 1; -+ } else if (!strcmp("off", p)) { -+ DbgSyslogOn = 0; -+ } -+ } else if (!strcmp("novfsd", lbuf)) { -+ novfs_daemon_debug_cmd_send(p); -+ } else if (!strcmp("file_update_timeout", lbuf)) { -+ novfs_update_timeout = -+ simple_strtoul(p, NULL, 0); -+ } else if (!strcmp("cache", lbuf)) { -+ if (!strcmp("on", p)) { -+ novfs_page_cache = 1; -+ } else if (!strcmp("off", p)) { -+ novfs_page_cache = 0; -+ } -+ } else if (!strcmp("profile", lbuf)) { -+ if (!strcmp("on", p)) { -+ DbgProfileOn = 1; -+ } else if (!strcmp("off", p)) { -+ DbgProfileOn = 0; -+ } -+ } -+ } -+ kfree(lbuf); -+ } -+ -+ return (retval); -+} -+ -+static ssize_t User_proc_read_DbgBuffer(struct file *file, char *buf, size_t nbytes, loff_t * ppos) -+{ -+ ssize_t retval = 0; -+ size_t count; -+ -+ if (0 != (count = DbgPrintBufferOffset - DbgPrintBufferReadOffset)) { -+ -+ if (count > nbytes) { -+ count = nbytes; -+ } -+ -+ count -= -+ copy_to_user(buf, &DbgPrintBuffer[DbgPrintBufferReadOffset], -+ count); -+ -+ if (count == 0) { -+ if (retval == 0) -+ retval = -EFAULT; -+ } else { -+ DbgPrintBufferReadOffset += count; -+ if (DbgPrintBufferReadOffset >= DbgPrintBufferOffset) { -+ DbgPrintBufferOffset = -+ DbgPrintBufferReadOffset = 0; -+ } -+ retval = count; -+ } -+ } -+ -+ return retval; -+} -+ -+static int proc_read_DbgBuffer(char *page, char **start, off_t off, int count, int *eof, void *data) -+{ -+ int len; -+ -+ printk(KERN_ALERT "proc_read_DbgBuffer: off=%ld count=%d DbgPrintBufferOffset=%lu DbgPrintBufferReadOffset=%lu\n", off, count, DbgPrintBufferOffset, DbgPrintBufferReadOffset); -+ -+ len = DbgPrintBufferOffset - DbgPrintBufferReadOffset; -+ -+ if ((int)(DbgPrintBufferOffset - DbgPrintBufferReadOffset) > count) -+ len = count; -+ -+ if (len) { -+ memcpy(page, &DbgPrintBuffer[DbgPrintBufferReadOffset], len); -+ DbgPrintBufferReadOffset += len; -+ } -+ -+ if (DbgPrintBufferReadOffset >= DbgPrintBufferOffset) -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -+ -+ printk(KERN_ALERT "proc_read_DbgBuffer: return %d\n", len); -+ -+ return len; -+} -+ -+#define DBG_BUFFER_SIZE (2*1024) -+ -+static int LocalPrint(char *Fmt, ...) -+{ -+ int len = 0; -+ va_list args; -+ -+ if (DbgPrintBuffer) { -+ va_start(args, Fmt); -+ len += vsnprintf(DbgPrintBuffer + DbgPrintBufferOffset, -+ DbgPrintBufferSize - DbgPrintBufferOffset, -+ Fmt, args); -+ DbgPrintBufferOffset += len; -+ } -+ -+ return (len); -+} -+ -+int ___DbgPrint(const char *site, const char *Fmt, ...) -+{ -+ char *buf; -+ int len = 0; -+ unsigned long offset; -+ va_list args; -+ -+ if ((DbgPrintBuffer && DbgPrintOn) || DbgSyslogOn) { -+ buf = kmalloc(DBG_BUFFER_SIZE, GFP_KERNEL); -+ -+ if (buf) { -+ va_start(args, Fmt); -+ len = snprintf(buf, DBG_BUFFER_SIZE, "[%d] %s ", current->pid, site); -+ len += vsnprintf(buf + len, DBG_BUFFER_SIZE - len, Fmt, -+ args); -+ if (-1 == len) { -+ len = DBG_BUFFER_SIZE - 1; -+ buf[len] = '\0'; -+ } -+ /* -+ len = sprintf(&DbgPrintBuffer[offset], "[%llu] ", ts); -+ len += vsprintf(&DbgPrintBuffer[offset+len], Fmt, args); -+ */ -+ -+ if (len) { -+ if (DbgSyslogOn) { -+ printk("<6>%s", buf); -+ } -+ -+ if (DbgPrintBuffer && DbgPrintOn) { -+ if ((DbgPrintBufferOffset + len) > -+ DbgPrintBufferSize) { -+ offset = DbgPrintBufferOffset; -+ DbgPrintBufferOffset = 0; -+ memset(&DbgPrintBuffer[offset], -+ 0, -+ DbgPrintBufferSize - -+ offset); -+ } -+ -+ mb(); -+ -+ if ((DbgPrintBufferOffset + len) < -+ DbgPrintBufferSize) { -+ DbgPrintBufferOffset += len; -+ offset = -+ DbgPrintBufferOffset - len; -+ memcpy(&DbgPrintBuffer[offset], -+ buf, len + 1); -+ } -+ } -+ } -+ kfree(buf); -+ } -+ } -+ -+ return (len); -+} -+ -+static void doline(unsigned char *b, unsigned char *e, unsigned char *l) -+{ -+ unsigned char c; -+ -+ *b++ = ' '; -+ -+ while (l < e) { -+ c = *l++; -+ if ((c < ' ') || (c > '~')) { -+ c = '.'; -+ } -+ *b++ = c; -+ *b = '\0'; -+ } -+} -+ -+void novfs_dump(int size, void *dumpptr) -+{ -+ unsigned char *ptr = (unsigned char *)dumpptr; -+ unsigned char *line = NULL, buf[100], *bptr = buf; -+ int i; -+ -+ if (DbgPrintBuffer || DbgSyslogOn) { -+ if (size) { -+ for (i = 0; i < size; i++) { -+ if (0 == (i % 16)) { -+ if (line) { -+ doline(bptr, ptr, line); -+ __DbgPrint("%s\n", buf); -+ bptr = buf; -+ } -+ bptr += sprintf(bptr, "0x%p: ", ptr); -+ line = ptr; -+ } -+ bptr += sprintf(bptr, "%02x ", *ptr++); -+ } -+ doline(bptr, ptr, line); -+ __DbgPrint("%s\n", buf); -+ } -+ } -+} -+ -+#define FEBRUARY 2 -+#define STARTOFTIME 1970 -+#define SECDAY 86400L -+#define SECYR (SECDAY * 365) -+#define leapyear(year) ((year) % 4 == 0) -+#define days_in_year(a) (leapyear(a) ? 366 : 365) -+#define days_in_month(a) (month_days[(a) - 1]) -+ -+static int month_days[12] = { -+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 -+}; -+ -+/* -+ * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) -+ */ -+static void NovfsGregorianDay(struct local_rtc_time *tm) -+{ -+ int leapsToDate; -+ int lastYear; -+ int day; -+ int MonthOffset[] = -+ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; -+ -+ lastYear = tm->tm_year - 1; -+ -+ /* -+ * Number of leap corrections to apply up to end of last year -+ */ -+ leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; -+ -+ /* -+ * This year is a leap year if it is divisible by 4 except when it is -+ * divisible by 100 unless it is divisible by 400 -+ * -+ * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be -+ */ -+ if ((tm->tm_year % 4 == 0) && -+ ((tm->tm_year % 100 != 0) || (tm->tm_year % 400 == 0)) && -+ (tm->tm_mon > 2)) { -+ /* -+ * We are past Feb. 29 in a leap year -+ */ -+ day = 1; -+ } else { -+ day = 0; -+ } -+ -+ day += lastYear * 365 + leapsToDate + MonthOffset[tm->tm_mon - 1] + -+ tm->tm_mday; -+ -+ tm->tm_wday = day % 7; -+} -+ -+static void private_to_tm(int tim, struct local_rtc_time *tm) -+{ -+ register int i; -+ register long hms, day; -+ -+ day = tim / SECDAY; -+ hms = tim % SECDAY; -+ -+ /* Hours, minutes, seconds are easy */ -+ tm->tm_hour = hms / 3600; -+ tm->tm_min = (hms % 3600) / 60; -+ tm->tm_sec = (hms % 3600) % 60; -+ -+ /* Number of years in days */ -+ for (i = STARTOFTIME; day >= days_in_year(i); i++) -+ day -= days_in_year(i); -+ tm->tm_year = i; -+ -+ /* Number of months in days left */ -+ if (leapyear(tm->tm_year)) -+ days_in_month(FEBRUARY) = 29; -+ for (i = 1; day >= days_in_month(i); i++) -+ day -= days_in_month(i); -+ days_in_month(FEBRUARY) = 28; -+ tm->tm_mon = i; -+ -+ /* Days are what is left over (+1) from all that. */ -+ tm->tm_mday = day + 1; -+ -+ /* -+ * Determine the day of week -+ */ -+ NovfsGregorianDay(tm); -+} -+ -+char *ctime_r(time_t * clock, char *buf) -+{ -+ struct local_rtc_time tm; -+ static char *DAYOFWEEK[] = -+ { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; -+ static char *MONTHOFYEAR[] = -+ { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", -+"Oct", "Nov", "Dec" }; -+ -+ private_to_tm(*clock, &tm); -+ -+ sprintf(buf, "%s %s %d %d:%02d:%02d %d", DAYOFWEEK[tm.tm_wday], -+ MONTHOFYEAR[tm.tm_mon - 1], tm.tm_mday, tm.tm_hour, tm.tm_min, -+ tm.tm_sec, tm.tm_year); -+ return (buf); -+} -+ -+static void dump(struct dentry *parent, void *pf) -+{ -+ void (*pfunc) (char *Fmt, ...) = pf; -+ struct l { -+ struct l *next; -+ struct dentry *dentry; -+ } *l, *n, *start; -+ struct list_head *p; -+ struct dentry *d; -+ char *buf, *path, *sd; -+ char inode_number[16]; -+ -+ buf = (char *)kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ -+ if (NULL == buf) { -+ return; -+ } -+ -+ if (parent) { -+ pfunc("starting 0x%p %.*s\n", parent, parent->d_name.len, -+ parent->d_name.name); -+ if (parent->d_subdirs.next == &parent->d_subdirs) { -+ pfunc("No children...\n"); -+ } else { -+ start = kmalloc(sizeof(*start), GFP_KERNEL); -+ if (start) { -+ start->next = NULL; -+ start->dentry = parent; -+ l = start; -+ while (l) { -+ p = l->dentry->d_subdirs.next; -+ while (p != &l->dentry->d_subdirs) { -+ d = list_entry(p, struct dentry, -+ d_u.d_child); -+ p = p->next; -+ -+ if (d->d_subdirs.next != -+ &d->d_subdirs) { -+ n = kmalloc(sizeof -+ (*n), -+ GFP_KERNEL); -+ if (n) { -+ n->next = -+ l->next; -+ l->next = n; -+ n->dentry = d; -+ } -+ } else { -+ path = novfs_scope_dget_path(d, buf, PATH_LENGTH_BUFFER, 1); -+ if (path) { -+ pfunc -+ ("1-0x%p %s\n" -+ " d_name: %.*s\n" -+ " d_parent: 0x%p\n" -+ " d_count: %d\n" -+ " d_flags: 0x%x\n" -+ " d_subdirs: 0x%p\n" -+ " d_inode: 0x%p\n", -+ d, path, -+ d->d_name. -+ len, -+ d->d_name. -+ name, -+ d-> -+ d_parent, -+ atomic_read -+ (&d-> -+ d_count), -+ d->d_flags, -+ d-> -+ d_subdirs. -+ next, -+ d-> -+ d_inode); -+ } -+ } -+ } -+ l = l->next; -+ } -+ l = start; -+ while (l) { -+ d = l->dentry; -+ path = -+ novfs_scope_dget_path(d, buf, -+ PATH_LENGTH_BUFFER, -+ 1); -+ if (path) { -+ sd = " (None)"; -+ if (&d->d_subdirs != -+ d->d_subdirs.next) { -+ sd = ""; -+ } -+ inode_number[0] = '\0'; -+ if (d->d_inode) { -+ sprintf(inode_number, -+ " (%lu)", -+ d->d_inode-> -+ i_ino); -+ } -+ pfunc("0x%p %s\n" -+ " d_parent: 0x%p\n" -+ " d_count: %d\n" -+ " d_flags: 0x%x\n" -+ " d_subdirs: 0x%p%s\n" -+ " d_inode: 0x%p%s\n", -+ d, path, d->d_parent, -+ atomic_read(&d->d_count), -+ d->d_flags, -+ d->d_subdirs.next, sd, -+ d->d_inode, inode_number); -+ } -+ -+ n = l; -+ l = l->next; -+ kfree(n); -+ } -+ } -+ } -+ } -+ -+ kfree(buf); -+ -+} -+ -+static ssize_t common_read(char *buf, size_t len, loff_t * off) -+{ -+ ssize_t retval = 0; -+ size_t count; -+ unsigned long offset = *off; -+ -+ if (0 != (count = DbgPrintBufferOffset - offset)) { -+ if (count > len) { -+ count = len; -+ } -+ -+ count -= copy_to_user(buf, &DbgPrintBuffer[offset], count); -+ -+ if (count == 0) { -+ retval = -EFAULT; -+ } else { -+ *off += (loff_t) count; -+ retval = count; -+ } -+ } -+ return retval; -+ -+} -+ -+static ssize_t novfs_profile_read_inode(struct file * file, char *buf, size_t len, -+ loff_t * off) -+{ -+ ssize_t retval = 0; -+ unsigned long offset = *off; -+ static char save_DbgPrintOn; -+ -+ if (offset == 0) { -+ down(&LocalPrint_lock); -+ save_DbgPrintOn = DbgPrintOn; -+ DbgPrintOn = 0; -+ -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -+ novfs_dump_inode(LocalPrint); -+ } -+ -+ -+ retval = common_read(buf, len, off); -+ -+ if (0 == retval) { -+ DbgPrintOn = save_DbgPrintOn; -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -+ -+ up(&LocalPrint_lock); -+ } -+ -+ return retval; -+ -+} -+ -+static ssize_t novfs_profile_dentry_read(struct file * file, char *buf, size_t len, -+ loff_t * off) -+{ -+ ssize_t retval = 0; -+ unsigned long offset = *off; -+ static char save_DbgPrintOn; -+ -+ if (offset == 0) { -+ down(&LocalPrint_lock); -+ save_DbgPrintOn = DbgPrintOn; -+ DbgPrintOn = 0; -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -+ dump(novfs_root, LocalPrint); -+ } -+ -+ retval = common_read(buf, len, off); -+ -+ if (0 == retval) { -+ DbgPrintBufferOffset = DbgPrintBufferReadOffset = 0; -+ DbgPrintOn = save_DbgPrintOn; -+ -+ up(&LocalPrint_lock); -+ } -+ -+ return retval; -+ -+} -+ -+uint64_t get_nanosecond_time() -+{ -+ struct timespec ts; -+ uint64_t retVal; -+ -+ ts = current_kernel_time(); -+ -+ retVal = (uint64_t) NSEC_PER_SEC; -+ retVal *= (uint64_t) ts.tv_sec; -+ retVal += (uint64_t) ts.tv_nsec; -+ -+ return (retVal); -+} -+ -+void novfs_profile_init() -+{ -+ if (novfs_procfs_dir) -+ dbg_dir = novfs_procfs_dir; -+ else -+ dbg_dir = proc_mkdir(MODULE_NAME, NULL); -+ -+ if (dbg_dir) { -+ dbg_file = create_proc_read_entry("Debug", -+ 0600, -+ dbg_dir, -+ proc_read_DbgBuffer, NULL); -+ if (dbg_file) { -+ dbg_file->size = DBGBUFFERSIZE; -+ memcpy(&Dbg_proc_file_operations, dbg_file->proc_fops, -+ sizeof(struct file_operations)); -+ Dbg_proc_file_operations.read = -+ User_proc_read_DbgBuffer; -+ Dbg_proc_file_operations.write = -+ User_proc_write_DbgBuffer; -+ dbg_file->proc_fops = &Dbg_proc_file_operations; -+ } else { -+ remove_proc_entry(MODULE_NAME, NULL); -+ vfree(DbgPrintBuffer); -+ DbgPrintBuffer = NULL; -+ } -+ } -+ -+ if (DbgPrintBuffer) { -+ if (dbg_dir) { -+ inode_file = create_proc_entry("inode", 0600, dbg_dir); -+ if (inode_file) { -+ inode_file->size = 0; -+ memcpy(&inode_proc_file_ops, -+ inode_file->proc_fops, -+ sizeof(struct file_operations)); -+ inode_proc_file_ops.owner = THIS_MODULE; -+ inode_proc_file_ops.read = -+ novfs_profile_read_inode; -+ inode_file->proc_fops = &inode_proc_file_ops; -+ } -+ -+ dentry_file = create_proc_entry("dentry", -+ 0600, dbg_dir); -+ if (dentry_file) { -+ dentry_file->size = 0; -+ memcpy(&dentry_proc_file_ops, -+ dentry_file->proc_fops, -+ sizeof(struct file_operations)); -+ dentry_proc_file_ops.owner = THIS_MODULE; -+ dentry_proc_file_ops.read = novfs_profile_dentry_read; -+ dentry_file->proc_fops = &dentry_proc_file_ops; -+ } -+ -+ } else { -+ vfree(DbgPrintBuffer); -+ DbgPrintBuffer = NULL; -+ } -+ } -+} -+ -+void novfs_profile_exit(void) -+{ -+ if (dbg_file) -+ DbgPrint("Calling remove_proc_entry(Debug, NULL)\n"), -+ remove_proc_entry("Debug", dbg_dir); -+ if (inode_file) -+ DbgPrint("Calling remove_proc_entry(inode, NULL)\n"), -+ remove_proc_entry("inode", dbg_dir); -+ if (dentry_file) -+ DbgPrint("Calling remove_proc_entry(dentry, NULL)\n"), -+ remove_proc_entry("dentry", dbg_dir); -+ -+ if (dbg_dir && (dbg_dir != novfs_procfs_dir)) { -+ DbgPrint("Calling remove_proc_entry(%s, NULL)\n", MODULE_NAME); -+ remove_proc_entry(MODULE_NAME, NULL); -+ } -+} -+ -+ ---- /dev/null -+++ b/fs/novfs/scope.c -@@ -0,0 +1,659 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner -+ * -+ * This file contains functions used to scope users. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "vfs.h" -+ -+#define SHUTDOWN_INTERVAL 5 -+#define CLEANUP_INTERVAL 10 -+#define MAX_USERNAME_LENGTH 32 -+ -+ -+static struct list_head Scope_List; -+static struct semaphore Scope_Lock; -+static struct semaphore Scope_Thread_Delay; -+static int Scope_Thread_Terminate = 0; -+static struct timer_list Scope_Timer; -+static unsigned int Scope_Hash_Val = 1; -+ -+static struct novfs_scope_list *Scope_Search4Scope(struct novfs_schandle Id, -+ int Session, int Locked) -+{ -+ struct novfs_scope_list *scope, *rscope = NULL; -+ struct novfs_schandle cur_scope; -+ struct list_head *sl; -+ int offset; -+ -+ DbgPrint("Scope_Search4Scope: 0x%p:%p 0x%x 0x%x\n", Id.hTypeId, Id.hId, -+ Session, Locked); -+ -+ if (Session) -+ offset = offsetof(struct novfs_scope_list, SessionId); -+ else -+ offset = offsetof(struct novfs_scope_list, ScopeId); -+ -+ if (!Locked) { -+ down(&Scope_Lock); -+ } -+ -+ sl = Scope_List.next; -+ DbgPrint("Scope_Search4Scope: 0x%p\n", sl); -+ while (sl != &Scope_List) { -+ scope = list_entry(sl, struct novfs_scope_list, ScopeList); -+ -+ cur_scope = *(struct novfs_schandle *) ((char *)scope + offset); -+ if (SC_EQUAL(Id, cur_scope)) { -+ rscope = scope; -+ break; -+ } -+ -+ sl = sl->next; -+ } -+ -+ if (!Locked) { -+ up(&Scope_Lock); -+ } -+ -+ DbgPrint("Scope_Search4Scope: return 0x%p\n", rscope); -+ return (rscope); -+} -+ -+static struct novfs_scope_list *Scope_Find_Scope(int Create) -+{ -+ struct novfs_scope_list *scope = NULL, *pscope = NULL; -+ struct task_struct *task; -+ struct novfs_schandle scopeId; -+ int addscope = 0; -+ -+ task = current; -+ -+ DbgPrint("Scope_Find_Scope: %d %d %d %d\n", current_uid(), -+ current_euid(), current_suid(), current_fsuid()); -+ -+ //scopeId = task->euid; -+ UID_TO_SCHANDLE(scopeId, current_euid()); -+ -+ scope = Scope_Search4Scope(scopeId, 0, 0); -+ -+ if (!scope && Create) { -+ scope = kmalloc(sizeof(*pscope), GFP_KERNEL); -+ if (scope) { -+ scope->ScopeId = scopeId; -+ SC_INITIALIZE(scope->SessionId); -+ scope->ScopePid = task->pid; -+ scope->ScopeTask = task; -+ scope->ScopeHash = 0; -+ scope->ScopeUid = current_euid(); -+ scope->ScopeUserName[0] = '\0'; -+ -+ if (!novfs_daemon_create_sessionId(&scope->SessionId)) { -+ DbgPrint("Scope_Find_Scope2: %d %d %d %d\n", -+ current_uid(), current_euid(), -+ current_suid(), current_fsuid()); -+ memset(scope->ScopeUserName, 0, -+ sizeof(scope->ScopeUserName)); -+ scope->ScopeUserNameLength = 0; -+ novfs_daemon_getpwuid(current_euid(), -+ sizeof(scope->ScopeUserName), -+ scope->ScopeUserName); -+ scope->ScopeUserNameLength = -+ strlen(scope->ScopeUserName); -+ addscope = 1; -+ } -+ -+ scope->ScopeHash = Scope_Hash_Val++; -+ DbgPrint("Scope_Find_Scope: Adding 0x%p\n" -+ " ScopeId: 0x%p:%p\n" -+ " SessionId: 0x%p:%p\n" -+ " ScopePid: %d\n" -+ " ScopeTask: 0x%p\n" -+ " ScopeHash: %u\n" -+ " ScopeUid: %u\n" -+ " ScopeUserNameLength: %u\n" -+ " ScopeUserName: %s\n", -+ scope, -+ scope->ScopeId.hTypeId, scope->ScopeId.hId, -+ scope->SessionId.hTypeId, scope->SessionId.hId, -+ scope->ScopePid, -+ scope->ScopeTask, -+ scope->ScopeHash, -+ scope->ScopeUid, -+ scope->ScopeUserNameLength, -+ scope->ScopeUserName); -+ -+ if (SC_PRESENT(scope->SessionId)) { -+ down(&Scope_Lock); -+ pscope = -+ Scope_Search4Scope(scopeId, 0, 1); -+ -+ if (!pscope) { -+ list_add(&scope->ScopeList, -+ &Scope_List); -+ } -+ up(&Scope_Lock); -+ -+ if (pscope) { -+ printk -+ ("<6>Scope_Find_Scope scope not added because it was already there...\n"); -+ novfs_daemon_destroy_sessionId(scope-> -+ SessionId); -+ kfree(scope); -+ scope = pscope; -+ addscope = 0; -+ } -+ } else { -+ kfree(scope); -+ scope = NULL; -+ } -+ } -+ -+ if (addscope) { -+ novfs_add_to_root(scope->ScopeUserName); -+ } -+ } -+ -+ return (scope); -+} -+ -+static int Scope_Validate_Scope(struct novfs_scope_list *Scope) -+{ -+ struct novfs_scope_list *s; -+ struct list_head *sl; -+ int retVal = 0; -+ -+ DbgPrint("Scope_Validate_Scope: 0x%p\n", Scope); -+ -+ down(&Scope_Lock); -+ -+ sl = Scope_List.next; -+ while (sl != &Scope_List) { -+ s = list_entry(sl, struct novfs_scope_list, ScopeList); -+ -+ if (s == Scope) { -+ retVal = 1; -+ break; -+ } -+ -+ sl = sl->next; -+ } -+ -+ up(&Scope_Lock); -+ -+ return (retVal); -+} -+ -+uid_t novfs_scope_get_uid(struct novfs_scope_list *scope) -+{ -+ uid_t uid = 0; -+ if (!scope) -+ scope = Scope_Find_Scope(1); -+ -+ if (scope && Scope_Validate_Scope(scope)) -+ uid = scope->ScopeUid; -+ return uid; -+} -+ -+char *novfs_scope_get_username(void) -+{ -+ char *name = NULL; -+ struct novfs_scope_list *Scope; -+ -+ Scope = Scope_Find_Scope(1); -+ -+ if (Scope && Scope_Validate_Scope(Scope)) -+ name = Scope->ScopeUserName; -+ -+ return name; -+} -+ -+struct novfs_schandle novfs_scope_get_sessionId(struct novfs_scope_list -+ *Scope) -+{ -+ struct novfs_schandle sessionId; -+ DbgPrint("Scope_Get_SessionId: 0x%p\n", Scope); -+ SC_INITIALIZE(sessionId); -+ if (!Scope) -+ Scope = Scope_Find_Scope(1); -+ -+ if (Scope && Scope_Validate_Scope(Scope)) -+ sessionId = Scope->SessionId; -+ DbgPrint("Scope_Get_SessionId: return 0x%p:%p\n", sessionId.hTypeId, -+ sessionId.hId); -+ return (sessionId); -+} -+ -+struct novfs_scope_list *novfs_get_scope_from_name(struct qstr * Name) -+{ -+ struct novfs_scope_list *scope, *rscope = NULL; -+ struct list_head *sl; -+ -+ DbgPrint("Scope_Get_ScopefromName: %.*s\n", Name->len, Name->name); -+ -+ down(&Scope_Lock); -+ -+ sl = Scope_List.next; -+ while (sl != &Scope_List) { -+ scope = list_entry(sl, struct novfs_scope_list, ScopeList); -+ -+ if ((Name->len == scope->ScopeUserNameLength) && -+ (0 == strncmp(scope->ScopeUserName, Name->name, Name->len))) -+ { -+ rscope = scope; -+ break; -+ } -+ -+ sl = sl->next; -+ } -+ -+ up(&Scope_Lock); -+ -+ return (rscope); -+} -+ -+int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, -+ uint64_t * TotalEnties, uint64_t * FreeEnties) -+{ -+ struct novfs_scope_list *scope; -+ int retVal = 0; -+ -+ scope = Scope_Find_Scope(1); -+ -+ if (scope) { -+ if (TotalSize) -+ scope->ScopeUSize = *TotalSize; -+ if (Free) -+ scope->ScopeUFree = *Free; -+ if (TotalEnties) -+ scope->ScopeUTEnties = *TotalEnties; -+ if (FreeEnties) -+ scope->ScopeUAEnties = *FreeEnties; -+ } -+ -+ return (retVal); -+} -+ -+int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, -+ uint64_t * TotalEnties, uint64_t * FreeEnties) -+{ -+ struct novfs_scope_list *scope; -+ int retVal = 0; -+ -+ uint64_t td, fd, te, fe; -+ -+ scope = Scope_Find_Scope(1); -+ -+ td = fd = te = fe = 0; -+ if (scope) { -+ -+ retVal = -+ novfs_daemon_get_userspace(scope->SessionId, &td, &fd, &te, &fe); -+ -+ scope->ScopeUSize = td; -+ scope->ScopeUFree = fd; -+ scope->ScopeUTEnties = te; -+ scope->ScopeUAEnties = fe; -+ } -+ -+ if (TotalSize) -+ *TotalSize = td; -+ if (Free) -+ *Free = fd; -+ if (TotalEnties) -+ *TotalEnties = te; -+ if (FreeEnties) -+ *FreeEnties = fe; -+ -+ return (retVal); -+} -+ -+struct novfs_scope_list *novfs_get_scope(struct dentry * Dentry) -+{ -+ struct novfs_scope_list *scope = NULL; -+ char *buf, *path, *cp; -+ struct qstr name; -+ -+ buf = (char *)kmalloc(PATH_LENGTH_BUFFER, GFP_KERNEL); -+ if (buf) { -+ path = novfs_scope_dget_path(Dentry, buf, PATH_LENGTH_BUFFER, 0); -+ if (path) { -+ DbgPrint("Scope_Get_ScopefromPath: %s\n", path); -+ -+ if (*path == '/') -+ path++; -+ -+ cp = path; -+ if (*cp) { -+ while (*cp && (*cp != '/')) -+ cp++; -+ -+ *cp = '\0'; -+ name.hash = 0; -+ name.len = (int)(cp - path); -+ name.name = path; -+ scope = novfs_get_scope_from_name(&name); -+ } -+ } -+ kfree(buf); -+ } -+ -+ return (scope); -+} -+ -+static char *add_to_list(char *Name, char *List, char *EndOfList) -+{ -+ while (*Name && (List < EndOfList)) { -+ *List++ = *Name++; -+ } -+ -+ if (List < EndOfList) { -+ *List++ = '\0'; -+ } -+ return (List); -+} -+ -+char *novfs_get_scopeusers(void) -+{ -+ struct novfs_scope_list *scope; -+ struct list_head *sl; -+ int asize = 8 * MAX_USERNAME_LENGTH; -+ char *list, *cp, *ep; -+ -+ DbgPrint("Scope_Get_ScopeUsers\n"); -+ -+ do { /* Copy list until done or out of memory */ -+ list = kmalloc(asize, GFP_KERNEL); -+ -+ DbgPrint("Scope_Get_ScopeUsers list=0x%p\n", list); -+ if (list) { -+ cp = list; -+ ep = cp + asize; -+ -+ /* -+ * Add the tree and server entries -+ */ -+ cp = add_to_list(TREE_DIRECTORY_NAME, cp, ep); -+ cp = add_to_list(SERVER_DIRECTORY_NAME, cp, ep); -+ -+ down(&Scope_Lock); -+ -+ sl = Scope_List.next; -+ while ((sl != &Scope_List) && (cp < ep)) { -+ scope = list_entry(sl, struct novfs_scope_list, ScopeList); -+ -+ DbgPrint("Scope_Get_ScopeUsers found 0x%p %s\n", -+ scope, scope->ScopeUserName); -+ -+ cp = add_to_list(scope->ScopeUserName, cp, ep); -+ -+ sl = sl->next; -+ } -+ -+ up(&Scope_Lock); -+ -+ if (cp < ep) { -+ *cp++ = '\0'; -+ asize = 0; -+ } else { /* Allocation was to small, up size */ -+ -+ asize *= 4; -+ kfree(list); -+ list = NULL; -+ } -+ } else { /* if allocation fails return an empty list */ -+ -+ break; -+ } -+ } while (!list); /* List was to small try again */ -+ -+ return (list); -+} -+ -+void *novfs_scope_lookup(void) -+{ -+ return Scope_Find_Scope(1); -+} -+ -+static void Scope_Timer_Function(unsigned long context) -+{ -+ up(&Scope_Thread_Delay); -+} -+ -+static int Scope_Cleanup_Thread(void *Args) -+{ -+ struct novfs_scope_list *scope, *rscope; -+ struct list_head *sl, cleanup; -+ struct task_struct *task; -+ -+ DbgPrint("Scope_Cleanup_Thread: %d\n", current->pid); -+ -+ /* -+ * Setup and start que timer -+ */ -+ init_timer(&Scope_Timer); -+ -+ while (0 == Scope_Thread_Terminate) { -+ DbgPrint("Scope_Cleanup_Thread: looping\n"); -+ if (Scope_Thread_Terminate) { -+ break; -+ } -+ -+ /* -+ * Check scope list for any terminated processes -+ */ -+ down(&Scope_Lock); -+ -+ sl = Scope_List.next; -+ INIT_LIST_HEAD(&cleanup); -+ -+ while (sl != &Scope_List) { -+ scope = list_entry(sl, struct novfs_scope_list, ScopeList); -+ sl = sl->next; -+ -+ rscope = NULL; -+ rcu_read_lock(); -+ for_each_process(task) { -+ if ((task->cred->uid == scope->ScopeUid) -+ || (task->cred->euid == scope->ScopeUid)) { -+ rscope = scope; -+ break; -+ } -+ } -+ rcu_read_unlock(); -+ -+ if (!rscope) { -+ list_move(&scope->ScopeList, &cleanup); -+ DbgPrint("Scope_Cleanup_Thread: Scope=0x%p\n", -+ rscope); -+ } -+ } -+ -+ up(&Scope_Lock); -+ -+ sl = cleanup.next; -+ while (sl != &cleanup) { -+ scope = list_entry(sl, struct novfs_scope_list, ScopeList); -+ sl = sl->next; -+ -+ DbgPrint("Scope_Cleanup_Thread: Removing 0x%p\n" -+ " ScopeId: 0x%p:%p\n" -+ " SessionId: 0x%p:%p\n" -+ " ScopePid: %d\n" -+ " ScopeTask: 0x%p\n" -+ " ScopeHash: %u\n" -+ " ScopeUid: %u\n" -+ " ScopeUserName: %s\n", -+ scope, -+ scope->ScopeId, -+ scope->SessionId, -+ scope->ScopePid, -+ scope->ScopeTask, -+ scope->ScopeHash, -+ scope->ScopeUid, scope->ScopeUserName); -+ if (!Scope_Search4Scope(scope->SessionId, 1, 0)) { -+ novfs_remove_from_root(scope->ScopeUserName); -+ novfs_daemon_destroy_sessionId(scope->SessionId); -+ } -+ kfree(scope); -+ } -+ -+ Scope_Timer.expires = jiffies + HZ * CLEANUP_INTERVAL; -+ Scope_Timer.data = (unsigned long)0; -+ Scope_Timer.function = Scope_Timer_Function; -+ add_timer(&Scope_Timer); -+ DbgPrint("Scope_Cleanup_Thread: sleeping\n"); -+ -+ if (down_interruptible(&Scope_Thread_Delay)) { -+ break; -+ } -+ del_timer(&Scope_Timer); -+ } -+ Scope_Thread_Terminate = 0; -+ -+ printk(KERN_INFO "Scope_Cleanup_Thread: Exit\n"); -+ DbgPrint("Scope_Cleanup_Thread: Exit\n"); -+ return (0); -+} -+ -+void novfs_scope_cleanup(void) -+{ -+ struct novfs_scope_list *scope; -+ struct list_head *sl; -+ -+ DbgPrint("Scope_Cleanup:\n"); -+ -+ /* -+ * Check scope list for any terminated processes -+ */ -+ down(&Scope_Lock); -+ -+ sl = Scope_List.next; -+ -+ while (sl != &Scope_List) { -+ scope = list_entry(sl, struct novfs_scope_list, ScopeList); -+ sl = sl->next; -+ -+ list_del(&scope->ScopeList); -+ -+ DbgPrint("Scope_Cleanup: Removing 0x%p\n" -+ " ScopeId: 0x%p:%p\n" -+ " SessionId: 0x%p:%p\n" -+ " ScopePid: %d\n" -+ " ScopeTask: 0x%p\n" -+ " ScopeHash: %u\n" -+ " ScopeUid: %u\n" -+ " ScopeUserName: %s\n", -+ scope, -+ scope->ScopeId, -+ scope->SessionId, -+ scope->ScopePid, -+ scope->ScopeTask, -+ scope->ScopeHash, -+ scope->ScopeUid, scope->ScopeUserName); -+ if (!Scope_Search4Scope(scope->SessionId, 1, 1)) { -+ novfs_remove_from_root(scope->ScopeUserName); -+ novfs_daemon_destroy_sessionId(scope->SessionId); -+ } -+ kfree(scope); -+ } -+ -+ up(&Scope_Lock); -+ -+} -+ -+/* -+ * Walks the dentry chain building a path. -+ */ -+char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, unsigned int Buflen, -+ int Flags) -+{ -+ char *retval = &Buf[Buflen]; -+ struct dentry *p = Dentry; -+ int len; -+ -+ *(--retval) = '\0'; -+ Buflen--; -+ -+ do { -+ if (Buflen > p->d_name.len) { -+ retval -= p->d_name.len; -+ Buflen -= p->d_name.len; -+ memcpy(retval, p->d_name.name, p->d_name.len); -+ *(--retval) = '/'; -+ Buflen--; -+ p = p->d_parent; -+ } else { -+ retval = NULL; -+ break; -+ } -+ } while (!IS_ROOT(p)); -+ -+ if (IS_ROOT(Dentry)) { -+ retval++; -+ } -+ -+ if (Flags) { -+ len = strlen(p->d_sb->s_type->name); -+ if (Buflen - len > 0) { -+ retval -= len; -+ Buflen -= len; -+ memcpy(retval, p->d_sb->s_type->name, len); -+ *(--retval) = '/'; -+ Buflen--; -+ } -+ } -+ -+ return (retval); -+} -+ -+void novfs_scope_init(void) -+{ -+ INIT_LIST_HEAD(&Scope_List); -+ init_MUTEX(&Scope_Lock); -+ init_MUTEX_LOCKED(&Scope_Thread_Delay); -+ kthread_run(Scope_Cleanup_Thread, NULL, "novfs_ST"); -+} -+ -+void novfs_scope_exit(void) -+{ -+ unsigned long expires = jiffies + HZ * SHUTDOWN_INTERVAL; -+ -+ printk(KERN_INFO "Scope_Uninit: Start\n"); -+ -+ Scope_Thread_Terminate = 1; -+ -+ up(&Scope_Thread_Delay); -+ -+ mb(); -+ while (Scope_Thread_Terminate && (jiffies < expires)) -+ yield(); -+ /* down(&Scope_Thread_Delay); */ -+ printk(KERN_INFO "Scope_Uninit: Exit\n"); -+ -+} -+ -+ ---- /dev/null -+++ b/fs/novfs/vfs.h -@@ -0,0 +1,454 @@ -+/* -+ * Novell NCP Redirector for Linux -+ * Author: James Turner -+ * -+ * Include file for novfs. -+ * -+ * Copyright (C) 2005 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+#ifndef __NOVFS_H -+#define __NOVFS_H -+ -+#ifndef __STDC_VERSION__ -+#define __STDC_VERSION__ 0L -+#endif -+ -+#include -+#include -+ -+#include "nwcapi.h" -+ -+ -+#ifndef XTIER_SCHANDLE -+struct novfs_schandle { -+ void * hTypeId; -+ void * hId; -+ -+}; -+ -+#include "commands.h" -+ -+#define SC_PRESENT(X) ((X.hTypeId != NULL) || (X.hId != NULL)) ? 1 : 0 -+#define SC_EQUAL(X, Y) ((X.hTypeId == Y.hTypeId) && (X.hId == Y.hId)) ? 1 : 0 -+#define SC_INITIALIZE(X) {X.hTypeId = X.hId = NULL;} -+ -+#define UID_TO_SCHANDLE(hSC, uid) \ -+ { \ -+ hSC.hTypeId = NULL; \ -+ hSC.hId = (void *)(unsigned long)(uid); \ -+ } -+ -+#define XTIER_SCHANDLE -+#endif -+ -+ -+/*===[ Manifest constants ]===============================================*/ -+#define NOVFS_MAGIC 0x4e574653 -+#define MODULE_NAME "novfs" -+ -+#define TREE_DIRECTORY_NAME ".Trees" -+#define SERVER_DIRECTORY_NAME ".Servers" -+ -+#define PATH_LENGTH_BUFFER PATH_MAX -+#define NW_MAX_PATH_LENGTH 255 -+ -+#define XA_BUFFER (8 * 1024) -+ -+#define IOC_LOGIN 0x4a540000 -+#define IOC_LOGOUT 0x4a540001 -+#define IOC_XPLAT 0x4a540002 -+#define IOC_SESSION 0x4a540003 -+#define IOC_DEBUGPRINT 0x4a540004 -+ -+/* -+ * NetWare file attributes -+ */ -+ -+#define NW_ATTRIBUTE_NORMAL 0x00 -+#define NW_ATTRIBUTE_READ_ONLY 0x01 -+#define NW_ATTRIBUTE_HIDDEN 0x02 -+#define NW_ATTRIBUTE_SYSTEM 0x04 -+#define NW_ATTRIBUTE_EXECUTE_ONLY 0x08 -+#define NW_ATTRIBUTE_DIRECTORY 0x10 -+#define NW_ATTRIBUTE_ARCHIVE 0x20 -+#define NW_ATTRIBUTE_EXECUTE 0x40 -+#define NW_ATTRIBUTE_SHAREABLE 0x80 -+ -+/* -+ * Define READ/WRITE flag for DATA_LIST -+ */ -+#define DLREAD 0 -+#define DLWRITE 1 -+ -+/* -+ * Define list type -+ */ -+#define USER_LIST 1 -+#define SERVER_LIST 2 -+#define VOLUME_LIST 3 -+ -+/* -+ * Define flags used in for inodes -+ */ -+#define USER_INODE 1 -+#define UPDATE_INODE 2 -+ -+/* -+ * Define flags for directory cache flags -+ */ -+#define ENTRY_VALID 0x00000001 -+ -+#ifdef INTENT_MAGIC -+#define NDOPENFLAGS intent.it_flags -+#else -+#define NDOPENFLAGS intent.open.flags -+#endif -+ -+/* -+ * daemon_command_t flags values -+ */ -+#define INTERRUPTIBLE 1 -+ -+#ifndef NOVFS_VFS_MAJOR -+#define NOVFS_VFS_MAJOR 0 -+#endif -+ -+#ifndef NOVFS_VFS_MINOR -+#define NOVFS_VFS_MINOR 0 -+#endif -+ -+#ifndef NOVFS_VFS_SUB -+#define NOVFS_VFS_SUB 0 -+#endif -+ -+#ifndef NOVFS_VFS_RELEASE -+#define NOVFS_VFS_RELEASE 0 -+#endif -+ -+#define VALUE_TO_STR( value ) #value -+#define DEFINE_TO_STR(value) VALUE_TO_STR(value) -+ -+#define NOVFS_VERSION_STRING \ -+ DEFINE_TO_STR(NOVFS_VFS_MAJOR)"." \ -+ DEFINE_TO_STR(NOVFS_VFS_MINOR)"." \ -+ DEFINE_TO_STR(NOVFS_VFS_SUB)"-" \ -+ DEFINE_TO_STR(NOVFS_VFS_RELEASE) \ -+ "\0" -+ -+/*===[ Type definitions ]=================================================*/ -+struct novfs_entry_info { -+ int type; -+ umode_t mode; -+ uid_t uid; -+ gid_t gid; -+ loff_t size; -+ struct timespec atime; -+ struct timespec mtime; -+ struct timespec ctime; -+ int namelength; -+ unsigned char name[1]; -+}; -+ -+struct novfs_string { -+ int length; -+ unsigned char *data; -+}; -+ -+struct novfs_login { -+ struct novfs_string Server; -+ struct novfs_string UserName; -+ struct novfs_string Password; -+}; -+ -+struct novfs_logout { -+ struct novfs_string Server; -+}; -+ -+struct novfs_dir_cache { -+ struct list_head list; -+ int flags; -+ u64 jiffies; -+ ino_t ino; -+ loff_t size; -+ umode_t mode; -+ struct timespec atime; -+ struct timespec mtime; -+ struct timespec ctime; -+ unsigned long hash; -+ int nameLen; -+ char name[1]; -+}; -+ -+struct novfs_data_list { -+ void *page; -+ void *offset; -+ int len; -+ int rwflag; -+}; -+ -+ -+extern char *ctime_r(time_t * clock, char *buf); -+ -+/* -+ * Converts a HANDLE to a u32 type. -+ */ -+static inline u32 HandletoUint32(void * h) -+{ -+ return (u32) ((unsigned long) h); -+} -+ -+/* -+ * Converts a u32 to a HANDLE type. -+ */ -+static inline void *Uint32toHandle(u32 ui32) -+{ -+ return ((void *) (unsigned long) ui32); -+} -+ -+/* Global variables */ -+ -+extern struct dentry *novfs_root; -+extern struct proc_dir_entry *novfs_procfs_dir; -+extern unsigned long novfs_update_timeout; -+extern int novfs_page_cache; -+extern char *novfs_current_mnt; -+extern int novfs_max_iosize; -+ -+ -+/* Global functions */ -+extern int novfs_remove_from_root(char *); -+extern void novfs_dump_inode(void *pf); -+ -+extern void novfs_dump(int size, void *dumpptr); -+ -+extern int Queue_Daemon_Command(void *request, unsigned long reqlen, void *data, -+ int dlen, void **reply, unsigned long * replen, -+ int interruptible); -+extern int novfs_do_login(struct ncl_string * Server, struct ncl_string* Username, struct ncl_string * Password, void **lgnId, struct novfs_schandle *Session); -+ -+extern int novfs_proc_init(void); -+extern void novfs_proc_exit(void); -+ -+/* -+ * daemon.c functions -+ */ -+extern void novfs_daemon_queue_init(void); -+extern void novfs_daemon_queue_exit(void); -+extern int novfs_daemon_logout(struct qstr *Server, struct novfs_schandle *Session); -+extern int novfs_daemon_set_mnt_point(char *Path); -+extern int novfs_daemon_create_sessionId(struct novfs_schandle * SessionId); -+extern int novfs_daemon_destroy_sessionId(struct novfs_schandle SessionId); -+extern int novfs_daemon_getpwuid(uid_t uid, int unamelen, char *uname); -+extern int novfs_daemon_get_userspace(struct novfs_schandle SessionId, -+ uint64_t * TotalSize, uint64_t * TotalFree, -+ uint64_t * TotalDirectoryEnties, -+ uint64_t * FreeDirectoryEnties); -+extern int novfs_daemon_debug_cmd_send(char *Command); -+extern ssize_t novfs_daemon_recv_reply(struct file *file, -+ const char *buf, size_t nbytes, loff_t * ppos); -+extern ssize_t novfs_daemon_cmd_send(struct file *file, char *buf, -+ size_t len, loff_t * off); -+extern int novfs_daemon_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg); -+extern int novfs_daemon_lib_close(struct inode *inode, struct file *file); -+extern int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg); -+extern int novfs_daemon_lib_open(struct inode *inode, struct file *file); -+extern ssize_t novfs_daemon_lib_read(struct file *file, char *buf, -+ size_t len, loff_t * off); -+extern ssize_t novfs_daemon_lib_write(struct file *file, const char *buf, -+ size_t len, loff_t * off); -+extern loff_t novfs_daemon_lib_llseek(struct file *file, loff_t offset, -+ int origin); -+extern int novfs_daemon_open_control(struct inode *Inode, struct file *File); -+extern int novfs_daemon_close_control(struct inode *Inode, struct file *File); -+extern int novfs_daemon_getversion(char *Buf, int Length); -+ -+ -+/* -+ * file.c functions -+ */ -+extern int novfs_verify_file(struct qstr *Path, struct novfs_schandle SessionId); -+extern int novfs_get_alltrees(struct dentry *parent); -+extern int novfs_get_servers(unsigned char **ServerList, -+ struct novfs_schandle SessionId); -+extern int novfs_get_vols(struct qstr *Server, -+ unsigned char **VolumeList, struct novfs_schandle SessionId); -+extern int novfs_get_file_info(unsigned char *Path, -+ struct novfs_entry_info *Info, struct novfs_schandle SessionId); -+extern int novfs_getx_file_info(char *Path, const char *Name, -+ char *buffer, ssize_t buffer_size, ssize_t *dataLen, -+ struct novfs_schandle SessionId); -+extern int novfs_listx_file_info(char *Path, char *buffer, -+ ssize_t buffer_size, ssize_t *dataLen, -+ struct novfs_schandle SessionId); -+extern int novfs_setx_file_info(char *Path, const char *Name, const void *Value, -+ unsigned long valueLen, -+ unsigned long *bytesWritten, int flags, -+ struct novfs_schandle SessionId); -+ -+extern int novfs_get_dir_listex(unsigned char *Path, void **EnumHandle, -+ int *Count, struct novfs_entry_info **Info, -+ struct novfs_schandle SessionId); -+extern int novfs_open_file(unsigned char *Path, int Flags, -+ struct novfs_entry_info * Info, void **Handle, -+ struct novfs_schandle SessionId); -+extern int novfs_create(unsigned char *Path, int DirectoryFlag, -+ struct novfs_schandle SessionId); -+extern int novfs_close_file(void * Handle, struct novfs_schandle SessionId); -+extern int novfs_read_file(void * Handle, unsigned char *Buffer, -+ size_t * Bytes, loff_t * Offset, -+ struct novfs_schandle SessionId); -+extern int novfs_read_pages(void * Handle, struct novfs_data_list *DList, -+ int DList_Cnt, size_t * Bytes, loff_t * Offset, -+ struct novfs_schandle SessionId); -+extern int novfs_write_file(void * Handle, unsigned char *Buffer, -+ size_t * Bytes, loff_t * Offset, -+ struct novfs_schandle SessionId); -+extern int novfs_write_page(void * Handle, struct page *Page, -+ struct novfs_schandle SessionId); -+extern int novfs_write_pages(void * Handle, struct novfs_data_list *DList, -+ int DList_Cnt, size_t Bytes, loff_t Offset, -+ struct novfs_schandle SessionId); -+extern int novfs_delete(unsigned char *Path, int DirectoryFlag, -+ struct novfs_schandle SessionId); -+extern int novfs_trunc(unsigned char *Path, int PathLen, -+ struct novfs_schandle SessionId); -+extern int novfs_trunc_ex(void * Handle, loff_t Offset, -+ struct novfs_schandle SessionId); -+extern int novfs_rename_file(int DirectoryFlag, unsigned char *OldName, -+ int OldLen, unsigned char *NewName, int NewLen, -+ struct novfs_schandle SessionId); -+extern int novfs_set_attr(unsigned char *Path, struct iattr *Attr, -+ struct novfs_schandle SessionId); -+extern int novfs_get_file_cache_flag(unsigned char * Path, -+ struct novfs_schandle SessionId); -+extern int novfs_set_file_lock(struct novfs_schandle SessionId, void * fhandle, -+ unsigned char fl_type, loff_t fl_start, -+ loff_t len); -+ -+extern struct inode *novfs_get_inode(struct super_block *sb, int mode, -+ int dev, uid_t uid, ino_t ino, struct qstr *name); -+extern int novfs_read_stream(void * ConnHandle, unsigned char * Handle, -+ unsigned char * Buffer, size_t * Bytes, loff_t * Offset, -+ int User, struct novfs_schandle SessionId); -+extern int novfs_write_stream(void * ConnHandle, unsigned char * Handle, -+ unsigned char * Buffer, size_t * Bytes, loff_t * Offset, -+ struct novfs_schandle SessionId); -+extern int novfs_close_stream(void * ConnHandle, unsigned char * Handle, -+ struct novfs_schandle SessionId); -+ -+extern int novfs_add_to_root(char *); -+extern int novfs_end_directory_enumerate(void *EnumHandle, -+ struct novfs_schandle SessionId); -+ -+/* -+ * scope.c functions -+ */ -+extern void novfs_scope_init(void); -+extern void novfs_scope_exit(void); -+extern void *novfs_scope_lookup(void); -+extern uid_t novfs_scope_get_uid(struct novfs_scope_list *); -+extern struct novfs_schandle novfs_scope_get_sessionId(struct -+ novfs_scope_list *); -+extern char *novfs_get_scopeusers(void); -+extern int novfs_scope_set_userspace(uint64_t * TotalSize, uint64_t * Free, -+ uint64_t * TotalEnties, uint64_t * FreeEnties); -+extern int novfs_scope_get_userspace(uint64_t * TotalSize, uint64_t * Free, -+ uint64_t * TotalEnties, uint64_t * FreeEnties); -+extern char *novfs_scope_dget_path(struct dentry *Dentry, char *Buf, -+ unsigned int Buflen, int Flags); -+extern void novfs_scope_cleanup(void); -+extern struct novfs_scope_list *novfs_get_scope_from_name(struct qstr *); -+extern struct novfs_scope_list *novfs_get_scope(struct dentry *); -+extern char *novfs_scope_get_username(void); -+ -+/* -+ * profile.c functions -+ */ -+extern u64 get_nanosecond_time(void); -+extern int ___DbgPrint(const char *site, const char *Fmt, ...); -+#define DbgPrint(fmt, args...) ___DbgPrint(__func__, ": " fmt "\n", ##args) -+#define __DbgPrint(fmt, args...) ___DbgPrint("", fmt, ##args) -+ -+extern void novfs_profile_init(void); -+extern void novfs_profile_exit(void); -+ -+/* -+ * nwcapi.c functions -+ */ -+extern int novfs_auth_conn(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_conn_close(struct novfs_xplat *pdata, -+ void **Handle, struct novfs_schandle Session); -+extern int novfs_get_conn_info(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_set_conn_info(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_get_daemon_ver(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_get_id_info(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_license_conn(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_login_id(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_logout_id(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_open_conn_by_addr(struct novfs_xplat *pdata, -+ void **Handle, struct novfs_schandle Session); -+extern int novfs_open_conn_by_name(struct novfs_xplat *pdata, -+ void **Handle, struct novfs_schandle Session); -+extern int novfs_open_conn_by_ref(struct novfs_xplat *pdata, -+ void **Handle, struct novfs_schandle Session); -+extern int novfs_query_feature(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_raw_send(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_scan_conn_info(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_sys_conn_close(struct novfs_xplat *pdata, -+ unsigned long *Handle, struct novfs_schandle Session); -+extern int novfs_unauthenticate(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_unlicense_conn(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_change_auth_key(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_enum_ids(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_get_default_ctx(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_get_preferred_DS_tree(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_get_tree_monitored_conn(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_set_default_ctx(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_set_preferred_DS_tree(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_set_pri_conn(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_get_pri_conn(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_set_map_drive(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_unmap_drive(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_enum_drives(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_get_bcast_msg(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_set_key_value(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+extern int novfs_verify_key_value(struct novfs_xplat *pdata, -+ struct novfs_schandle Session); -+ -+ -+#endif /* __NOVFS_H */ -+ diff --git a/patches.suse/novfs-fix-debug-message.patch b/patches.suse/novfs-fix-debug-message.patch deleted file mode 100644 index 33f46cf..0000000 --- a/patches.suse/novfs-fix-debug-message.patch +++ /dev/null @@ -1,22 +0,0 @@ -From: Sankar P -Subject: novfs: fix debug message -Patch-mainline: no - -Signed-off-by: Sankar P -Acked-by: Jiri Benc - ---- - fs/novfs/daemon.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/fs/novfs/daemon.c -+++ b/fs/novfs/daemon.c -@@ -1590,7 +1590,7 @@ int novfs_daemon_lib_ioctl(struct inode - break; - - case NWC_SET_CONN_INFO: -- DbgIocCall("NwGetConnInfo"); -+ DbgIocCall("NwSetConnInfo"); - retCode = - novfs_set_conn_info(&data, dh->session); - break; diff --git a/patches.suse/novfs-fix-ioctl-usage b/patches.suse/novfs-fix-ioctl-usage deleted file mode 100644 index dffcb01..0000000 --- a/patches.suse/novfs-fix-ioctl-usage +++ /dev/null @@ -1,202 +0,0 @@ -From: Jeff Mahoney -Subject: novfs: Fix ioctl usage -Patch-mainline: Whenever novfs is merged - - Upstream commit b19dd42faf413b4705d4adb38521e82d73fa4249 removed support - for locked ioctls. This patch pushes the BKL into the novfs ioctl calls, - switches to ->unlocked_ioctl, and removes ioctls that were empty. - -Signed-off-by: Jeff Mahoney ---- - fs/novfs/daemon.c | 22 ++++++++++++++++++---- - fs/novfs/inode.c | 23 +---------------------- - fs/novfs/proc.c | 4 ++-- - fs/novfs/vfs.h | 4 ++-- - 4 files changed, 23 insertions(+), 30 deletions(-) - ---- a/fs/novfs/daemon.c -+++ b/fs/novfs/daemon.c -@@ -1022,11 +1022,14 @@ int novfs_daemon_debug_cmd_send(char *Co - return (retCode); - } - --int novfs_daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) -+long novfs_daemon_ioctl(struct file *file, unsigned int cmd, unsigned long arg) - { - int retCode = -ENOSYS; - unsigned long cpylen; - struct novfs_schandle session_id; -+ -+ lock_kernel(); /* needed? */ -+ - session_id = novfs_scope_get_sessionId(NULL); - - switch (cmd) { -@@ -1046,8 +1049,10 @@ int novfs_daemon_ioctl(struct inode *ino - char *buf; - io.length = 0; - cpylen = copy_from_user(&io, (char *)arg, sizeof(io)); -- if (io.length <= 0 || io.length > 1024) -+ if (io.length <= 0 || io.length > 1024) { -+ unlock_kernel(); - return -EINVAL; -+ } - if (io.length) { - buf = kmalloc(io.length + 1, GFP_KERNEL); - if (buf) { -@@ -1081,6 +1086,9 @@ int novfs_daemon_ioctl(struct inode *ino - } - - } -+ -+ unlock_kernel(); -+ - return (retCode); - } - -@@ -1337,13 +1345,15 @@ loff_t novfs_daemon_lib_llseek(struct fi - - #define DbgIocCall(str) __DbgPrint("[VFS XPLAT] Call " str "\n") - --int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) -+long novfs_daemon_lib_ioctl(struct file *file, unsigned int cmd, unsigned long arg) - { - int retCode = -ENOSYS; - struct daemon_handle *dh; - void *handle = NULL; - unsigned long cpylen; - -+ lock_kernel(); /* needed? */ -+ - dh = file->private_data; - - DbgPrint("file=0x%p 0x%x 0x%p dh=0x%p", file, cmd, arg, dh); -@@ -1368,8 +1378,10 @@ int novfs_daemon_lib_ioctl(struct inode - char *buf; - io.length = 0; - cpylen = copy_from_user(&io, (void *)arg, sizeof(io)); -- if (io.length <= 0 || io.length > 1024) -+ if (io.length <= 0 || io.length > 1024) { -+ unlock_kernel(); - return -EINVAL; -+ } - if (io.length) { - buf = kmalloc(io.length + 1, GFP_KERNEL); - if (buf) { -@@ -1596,6 +1608,8 @@ int novfs_daemon_lib_ioctl(struct inode - } - } - -+ unlock_kernel(); -+ - return (retCode); - } - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -101,7 +101,6 @@ ssize_t novfs_a_direct_IO(int rw, struct - ssize_t novfs_f_read(struct file *, char *, size_t, loff_t *); - ssize_t novfs_f_write(struct file *, const char *, size_t, loff_t *); - int novfs_f_readdir(struct file *, void *, filldir_t); --int novfs_f_ioctl(struct inode *, struct file *, unsigned int, unsigned long); - int novfs_f_mmap(struct file *file, struct vm_area_struct *vma); - int novfs_f_open(struct inode *, struct file *); - int novfs_f_flush(struct file *, fl_owner_t); -@@ -151,8 +150,6 @@ ssize_t novfs_control_Read(struct file * - - ssize_t novfs_control_write(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); - --int novfs_control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); -- - int __init init_novfs(void); - void __exit exit_novfs(void); - -@@ -197,7 +194,6 @@ static struct file_operations novfs_file - .read = novfs_f_read, - .write = novfs_f_write, - .readdir = novfs_f_readdir, -- .ioctl = novfs_f_ioctl, - .mmap = novfs_f_mmap, - .open = novfs_f_open, - .flush = novfs_f_flush, -@@ -254,7 +250,7 @@ static struct inode_operations novfs_fil - - static struct super_operations novfs_ops = { - .statfs = novfs_statfs, -- .clear_inode = novfs_clear_inode, -+ .evict_inode = novfs_clear_inode, - .drop_inode = generic_delete_inode, - .show_options = novfs_show_options, - -@@ -264,7 +260,6 @@ static struct super_operations novfs_ops - static struct file_operations novfs_Control_operations = { - .read = novfs_Control_read, - .write = novfs_Control_write, -- .ioctl = novfs_Control_ioctl, - }; - */ - -@@ -1277,13 +1272,6 @@ int novfs_f_readdir(struct file *file, v - return -EISDIR; - } - --int novfs_f_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) --{ -- DbgPrint("file=0x%p cmd=0x%x arg=0x%p", file, cmd, arg); -- -- return -ENOSYS; --} -- - int novfs_f_mmap(struct file *file, struct vm_area_struct *vma) - { - int retCode = -EINVAL; -@@ -3471,15 +3459,6 @@ ssize_t novfs_Control_write(struct file - - return (retval); - } -- --int novfs_Control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) --{ -- int retval = 0; -- -- DbgPrint("kernel_locked 0x%x", kernel_locked()); -- -- return (retval); --} - - static struct file_system_type novfs_fs_type = { - .name = "novfs", ---- a/fs/novfs/proc.c -+++ b/fs/novfs/proc.c -@@ -77,7 +77,7 @@ int novfs_proc_init(void) - novfs_daemon_proc_fops.release = novfs_daemon_close_control; - novfs_daemon_proc_fops.read = novfs_daemon_cmd_send; - novfs_daemon_proc_fops.write = novfs_daemon_recv_reply; -- novfs_daemon_proc_fops.ioctl = novfs_daemon_ioctl; -+ novfs_daemon_proc_fops.unlocked_ioctl = novfs_daemon_ioctl; - - Novfs_Control->proc_fops = &novfs_daemon_proc_fops; - } else { -@@ -99,7 +99,7 @@ int novfs_proc_init(void) - novfs_lib_proc_fops.read = novfs_daemon_lib_read; - novfs_lib_proc_fops.write = novfs_daemon_lib_write; - novfs_lib_proc_fops.llseek = novfs_daemon_lib_llseek; -- novfs_lib_proc_fops.ioctl = novfs_daemon_lib_ioctl; -+ novfs_lib_proc_fops.unlocked_ioctl = novfs_daemon_lib_ioctl; - Novfs_Library->proc_fops = &novfs_lib_proc_fops; - } else { - remove_proc_entry("Control", novfs_procfs_dir); ---- a/fs/novfs/vfs.h -+++ b/fs/novfs/vfs.h -@@ -246,9 +246,9 @@ extern int novfs_daemon_get_userspace(st - extern int novfs_daemon_debug_cmd_send(char *Command); - extern ssize_t novfs_daemon_recv_reply(struct file *file, const char *buf, size_t nbytes, loff_t * ppos); - extern ssize_t novfs_daemon_cmd_send(struct file *file, char *buf, size_t len, loff_t * off); --extern int novfs_daemon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); -+extern long novfs_daemon_ioctl(struct file *file, unsigned int cmd, unsigned long arg); - extern int novfs_daemon_lib_close(struct inode *inode, struct file *file); --extern int novfs_daemon_lib_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); -+extern long novfs_daemon_lib_ioctl(struct file *file, unsigned int cmd, unsigned long arg); - extern int novfs_daemon_lib_open(struct inode *inode, struct file *file); - extern ssize_t novfs_daemon_lib_read(struct file *file, char *buf, size_t len, loff_t * off); - extern ssize_t novfs_daemon_lib_write(struct file *file, const char *buf, size_t len, loff_t * off); diff --git a/patches.suse/novfs-use-evict_inode b/patches.suse/novfs-use-evict_inode deleted file mode 100644 index d64d723..0000000 --- a/patches.suse/novfs-use-evict_inode +++ /dev/null @@ -1,47 +0,0 @@ -From: Jeff Mahoney -Subject: novfs: use evict_inode -Patch-mainline: When novfs is merged - - Upstream commit b57922d97fd6f79b6dbe6db0c4fd30d219fa08c1 removed - support for ->clear_inode in favor of ->evict_inode. - - This patch implements support for ->evict_inode. - -Signed-off-by: Jeff Mahoney ---- - fs/novfs/inode.c | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - ---- a/fs/novfs/inode.c -+++ b/fs/novfs/inode.c -@@ -138,7 +138,7 @@ void update_inode(struct inode *Inode, s - void novfs_read_inode(struct inode *inode); - void novfs_write_inode(struct inode *inode); - int novfs_notify_change(struct dentry *dentry, struct iattr *attr); --void novfs_clear_inode(struct inode *inode); -+void novfs_evict_inode(struct inode *inode); - int novfs_show_options(struct seq_file *s, struct vfsmount *m); - - int novfs_statfs(struct dentry *de, struct kstatfs *buf); -@@ -250,7 +250,7 @@ static struct inode_operations novfs_fil - - static struct super_operations novfs_ops = { - .statfs = novfs_statfs, -- .evict_inode = novfs_clear_inode, -+ .evict_inode = novfs_evict_inode, - .drop_inode = generic_delete_inode, - .show_options = novfs_show_options, - -@@ -3143,8 +3143,11 @@ int novfs_notify_change(struct dentry *d - return (0); - } - --void novfs_clear_inode(struct inode *inode) -+void novfs_evict_inode(struct inode *inode) - { -+ truncate_inode_pages(&inode->i_data, 0); -+ end_writeback(inode); -+ - InodeCount--; - - if (inode->i_private) { diff --git a/patches.suse/osync-error b/patches.suse/osync-error deleted file mode 100644 index 6863eef..0000000 --- a/patches.suse/osync-error +++ /dev/null @@ -1,49 +0,0 @@ -From: mason@suse.de -Subject: make sure O_SYNC writes properly return -EIO -References: bnc#58622 - -Make sure to honor the error status of synchronous writeback during -O_SYNC writes - -Acked-by: Jeff Mahoney - ---- - mm/filemap.c | 17 +++++++++++++++-- - 1 file changed, 15 insertions(+), 2 deletions(-) - ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -2325,7 +2325,7 @@ generic_file_buffered_write(struct kiocb - - if (likely(status >= 0)) { - written += status; -- *ppos = pos + status; -+ pos += status; - - /* - * For now, when the user asks for O_SYNC, we'll actually give -@@ -2343,10 +2343,23 @@ generic_file_buffered_write(struct kiocb - * to buffered writes (block instantiation inside i_size). So we sync - * the file data here, to try to honour O_DIRECT expectations. - */ -- if (unlikely(file->f_flags & O_DIRECT) && written) -+ if (unlikely(file->f_flags & O_DIRECT) && status >= 0 && written) - status = filemap_write_and_wait_range(mapping, - pos, pos + written - 1); - -+ /* -+ * We must let know userspace if something hasn't been written -+ * correctly. If we got an I/O error it means we got an hardware -+ * failure, anything can be happening to the on-disk data, -+ * letting know userspace that a bit of data might have been -+ * written correctly on disk is a very low priority, compared -+ * to letting know userspace that some data has _not_ been -+ * written at all. -+ */ -+ if (unlikely(status == -EIO)) -+ return status; -+ *ppos = pos; -+ - return written ? written : status; - } - EXPORT_SYMBOL(generic_file_buffered_write); diff --git a/patches.suse/panic-on-io-nmi-SLE11-user-space-api.patch b/patches.suse/panic-on-io-nmi-SLE11-user-space-api.patch deleted file mode 100644 index 140370c..0000000 --- a/patches.suse/panic-on-io-nmi-SLE11-user-space-api.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Kurt Garloff -Subject: API fix: [PATCH] X86: sysctl to allow panic on IOCK NMI error -References: bnc#427979 -Patch-mainline: never - -Part of patches.suse/panic-on-io-nmi.diff from SLE11 to keep stable user -space API. The rest is upstream as commit 5211a242. - -Signed-off-by: Jiri Benc - ---- - include/linux/sysctl.h | 1 + - kernel/sysctl.c | 2 +- - kernel/sysctl_check.c | 1 + - 3 files changed, 3 insertions(+), 1 deletion(-) - ---- a/include/linux/sysctl.h -+++ b/include/linux/sysctl.h -@@ -162,6 +162,7 @@ enum - KERN_MAX_LOCK_DEPTH=74, - KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ - KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ -+ KERN_PANIC_ON_IO_NMI=79, /* int: whether we will panic on an io NMI */ - }; - - ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -808,7 +808,7 @@ static struct ctl_table kern_table[] = { - .proc_handler = &proc_dointvec, - }, - { -- .ctl_name = CTL_UNNUMBERED, -+ .ctl_name = KERN_PANIC_ON_IO_NMI, - .procname = "panic_on_io_nmi", - .data = &panic_on_io_nmi, - .maxlen = sizeof(int), ---- a/kernel/sysctl_check.c -+++ b/kernel/sysctl_check.c -@@ -104,6 +104,7 @@ static const struct trans_ctl_table tran - { KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, - { KERN_NMI_WATCHDOG, "nmi_watchdog" }, - { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, -+ { KERN_PANIC_ON_IO_NMI, "panic_on_io_nmi" }, - { KERN_SETUID_DUMPABLE, "suid_dumpable" }, - {} - }; diff --git a/patches.suse/ppc-no-LDFLAGS_MODULE.patch b/patches.suse/ppc-no-LDFLAGS_MODULE.patch deleted file mode 100644 index e14c583..0000000 --- a/patches.suse/ppc-no-LDFLAGS_MODULE.patch +++ /dev/null @@ -1,32 +0,0 @@ -Subject: arch/powerpc/lib/crtsavres.o is not available when linking external modules -From: olh@suse.de -Patch-mainline: never - -Maybe it helps. - ---- - arch/powerpc/Makefile | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - ---- a/arch/powerpc/Makefile -+++ b/arch/powerpc/Makefile -@@ -85,15 +85,17 @@ ifeq ($(GCC_BROKEN_VEC),y) - KBUILD_CFLAGS += $(call cc-option,-mcpu=970) - else - KBUILD_CFLAGS += $(call cc-option,-mcpu=power4) -+# GCC_BROKEN_VEC - endif - else - KBUILD_CFLAGS += $(call cc-option,-mcpu=power4) -+# CONFIG_ALTIVEC - endif - else - KBUILD_CFLAGS += $(call cc-option,-mtune=power4) -+# CONFIG_POWER4_ONLY - endif --else --LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o -+# CONFIG_PPC64 - endif - - ifeq ($(CONFIG_TUNE_CELL),y) diff --git a/patches.suse/ppc-powerbook-usb-fn-key-default.patch b/patches.suse/ppc-powerbook-usb-fn-key-default.patch deleted file mode 100644 index f6c9901..0000000 --- a/patches.suse/ppc-powerbook-usb-fn-key-default.patch +++ /dev/null @@ -1,32 +0,0 @@ -Subject: Default value of usbhid.pb_fnmode module parameter -From: olh@suse.de -References: 220266 -Patch-mainline: not yet - - The kernel default value for usbhid.pb_fnmode is 1, which means that pressing - the Fn keys (F1..F10) without the fn key triggers the special functions - decrease/increase brightness, mute, decrease/increase volume, etc., which is - the default under MacOS. - - At least under 10.2 Beta2, only the volume related special functions work at - all. In addition, Ctrl-Alt-Fx is used to switch between consoles. with - pb_fnmode==1, the fn key needs to be pressed in addition. - - Therefore, pb_fnmode==2 (F1..F10 by default trigger Fn rather than the special - functions) makes more sense under Linux. - - - drivers/hid/hid-apple.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/hid/hid-apple.c -+++ b/drivers/hid/hid-apple.c -@@ -36,7 +36,7 @@ - - #define APPLE_FLAG_FKEY 0x01 - --static unsigned int fnmode = 1; -+static unsigned int fnmode = 2; - module_param(fnmode, uint, 0644); - MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, " - "[1] = fkeyslast, 2 = fkeysfirst)"); diff --git a/patches.suse/radeon-monitor-jsxx-quirk.patch b/patches.suse/radeon-monitor-jsxx-quirk.patch deleted file mode 100644 index a809eed..0000000 --- a/patches.suse/radeon-monitor-jsxx-quirk.patch +++ /dev/null @@ -1,65 +0,0 @@ -Subject: [PATCH] Add quirk for the graphics adapter in some JSxx -From: Tony Breeds -References: 461002 - LTC50817 -Patch-mainline: not yet - -These devices are set to 640x480 by firmware, switch them to -800x600@60. - -Signed-off-by: Tony Breeds -Signed-off-by: Olaf Hering ---- - drivers/video/aty/radeon_monitor.c | 35 +++++++++++++++++++++++++++++++++++ - 1 file changed, 35 insertions(+) - ---- a/drivers/video/aty/radeon_monitor.c -+++ b/drivers/video/aty/radeon_monitor.c -@@ -730,6 +730,25 @@ static void radeon_videomode_to_var(stru - var->vmode = mode->vmode; - } - -+#ifdef CONFIG_PPC_PSERIES -+static int is_powerblade(const char *model) -+{ -+ struct device_node *root; -+ const char* cp; -+ int len, l, rc = 0; -+ -+ root = of_find_node_by_path("/"); -+ if (root && model) { -+ l = strlen(model); -+ cp = of_get_property(root, "model", &len); -+ if (cp) -+ rc = memcmp(model, cp, min(len, l)) == 0; -+ of_node_put(root); -+ } -+ return rc; -+} -+#endif -+ - /* - * Build the modedb for head 1 (head 2 will come later), check panel infos - * from either BIOS or EDID, and pick up the default mode -@@ -865,6 +884,22 @@ void __devinit radeon_check_modes(struct - has_default_mode = 1; - } - -+#ifdef CONFIG_PPC_PSERIES -+ if (!has_default_mode && ( -+ is_powerblade("IBM,8842") || /* JS20 */ -+ is_powerblade("IBM,8844") || /* JS21 */ -+ is_powerblade("IBM,7998") || /* JS12/JS21/JS22 */ -+ is_powerblade("IBM,0792") || /* QS21 */ -+ is_powerblade("IBM,0793") /* QS22 */ -+ )) { -+ printk("Falling back to 800x600 on JSxx hardware\n"); -+ if (fb_find_mode(&info->var, info, "800x600@60", -+ info->monspecs.modedb, -+ info->monspecs.modedb_len, NULL, 8) != 0) -+ has_default_mode = 1; -+ } -+#endif -+ - /* - * Still no mode, let's pick up a default from the db - */ diff --git a/patches.suse/raw_device_max_minors_param.diff b/patches.suse/raw_device_max_minors_param.diff deleted file mode 100644 index 4d2cb1e..0000000 --- a/patches.suse/raw_device_max_minors_param.diff +++ /dev/null @@ -1,112 +0,0 @@ -From: Jan Kara -Subject: Allow setting of number of raw devices as a module parameter -References: FATE 302178 -Patch-mainline: never - -Allow setting of maximal number of raw devices as a module parameter. This requires -changing of static array into a vmalloced one (the array is going to be too large -for kmalloc). - -Signed-off-by: Jan Kara - ---- - drivers/char/Kconfig | 2 +- - drivers/char/raw.c | 33 +++++++++++++++++++++++++++------ - 2 files changed, 28 insertions(+), 7 deletions(-) - ---- a/drivers/char/Kconfig -+++ b/drivers/char/Kconfig -@@ -1059,7 +1059,7 @@ config RAW_DRIVER - with the O_DIRECT flag. - - config MAX_RAW_DEVS -- int "Maximum number of RAW devices to support (1-8192)" -+ int "Maximum number of RAW devices to support (1-65536)" - depends on RAW_DRIVER - default "256" - help ---- a/drivers/char/raw.c -+++ b/drivers/char/raw.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - - #include - -@@ -30,10 +31,15 @@ struct raw_device_data { - }; - - static struct class *raw_class; --static struct raw_device_data raw_devices[MAX_RAW_MINORS]; -+static struct raw_device_data *raw_devices; - static DEFINE_MUTEX(raw_mutex); - static const struct file_operations raw_ctl_fops; /* forward declaration */ - -+static int max_raw_minors = MAX_RAW_MINORS; -+ -+module_param(max_raw_minors, int, 0); -+MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)"); -+ - /* - * Open/close code for raw IO. - * -@@ -131,7 +137,7 @@ static int bind_set(int number, u64 majo - struct raw_device_data *rawdev; - int err = 0; - -- if (number <= 0 || number >= MAX_RAW_MINORS) -+ if (number <= 0 || number >= max_raw_minors) - return -EINVAL; - - if (MAJOR(dev) != major || MINOR(dev) != minor) -@@ -318,12 +324,26 @@ static int __init raw_init(void) - dev_t dev = MKDEV(RAW_MAJOR, 0); - int ret; - -- ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw"); -+ if (max_raw_minors < 1 || max_raw_minors > 65536) { -+ printk(KERN_WARNING "raw: invalid max_raw_minors (must be" -+ " between 1 and 65536), using %d\n", MAX_RAW_MINORS); -+ max_raw_minors = MAX_RAW_MINORS; -+ } -+ -+ raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors); -+ if (!raw_devices) { -+ printk(KERN_ERR "Not enough memory for raw device structures\n"); -+ ret = -ENOMEM; -+ goto error; -+ } -+ memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors); -+ -+ ret = register_chrdev_region(dev, max_raw_minors, "raw"); - if (ret) - goto error; - - cdev_init(&raw_cdev, &raw_fops); -- ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS); -+ ret = cdev_add(&raw_cdev, dev, max_raw_minors); - if (ret) { - kobject_put(&raw_cdev.kobj); - goto error_region; -@@ -342,8 +362,9 @@ static int __init raw_init(void) - return 0; - - error_region: -- unregister_chrdev_region(dev, MAX_RAW_MINORS); -+ unregister_chrdev_region(dev, max_raw_minors); - error: -+ vfree(raw_devices); - return ret; - } - -@@ -352,7 +373,7 @@ static void __exit raw_exit(void) - device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); - class_destroy(raw_class); - cdev_del(&raw_cdev); -- unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS); -+ unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors); - } - - module_init(raw_init); diff --git a/patches.suse/readahead-request-tunables.patch b/patches.suse/readahead-request-tunables.patch deleted file mode 100644 index 949bf8e..0000000 --- a/patches.suse/readahead-request-tunables.patch +++ /dev/null @@ -1,44 +0,0 @@ -From: Jan Kara -Subject: Update readahead and max_sectors tunables -References: bnc#548529 -Patch-mainline: no - -Increase read_ahead_kb and max_sectors_kb to values from SLES10 SP3 to get -back sequential IO performance if we are not compiling a -desktop kernel -flavor. - -Signed-off-by: Jan Kara - ---- - include/linux/blkdev.h | 4 ++++ - include/linux/mm.h | 4 ++++ - 2 files changed, 8 insertions(+) - ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -947,7 +947,11 @@ extern int blk_verify_command(unsigned c - enum blk_default_limits { - BLK_MAX_SEGMENTS = 128, - BLK_SAFE_MAX_SECTORS = 255, -+#ifndef CONFIG_KERNEL_DESKTOP -+ BLK_DEF_MAX_SECTORS = 2048, -+#else - BLK_DEF_MAX_SECTORS = 1024, -+#endif - BLK_MAX_SEGMENT_SIZE = 65536, - BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, - }; ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -1307,7 +1307,11 @@ int write_one_page(struct page *page, in - void task_dirty_inc(struct task_struct *tsk); - - /* readahead.c */ -+#ifndef CONFIG_KERNEL_DESKTOP -+#define VM_MAX_READAHEAD 512 /* kbytes */ -+#else - #define VM_MAX_READAHEAD 128 /* kbytes */ -+#endif - #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ - - int force_page_cache_readahead(struct address_space *mapping, struct file *filp, diff --git a/patches.suse/reiser4-dependencies b/patches.suse/reiser4-dependencies deleted file mode 100644 index f92ab67..0000000 --- a/patches.suse/reiser4-dependencies +++ /dev/null @@ -1,225 +0,0 @@ -From: Reiser4 Development -Subject: In-kernel dependencies for reiser4 -Patch-mainline: Probably never - - This patch contains the in-kernel dependencies needed by reiser4. - -Acked-by: Jeff Mahoney ---- - fs/fs-writeback.c | 48 +++++++++++++++++++++++++++++++++++++++------- - fs/inode.c | 1 - include/linux/fs.h | 14 ++++++++++++- - include/linux/mm.h | 1 - include/linux/writeback.h | 3 ++ - mm/filemap.c | 2 + - mm/page-writeback.c | 26 ++++++++++++++++++++++++ - 7 files changed, 87 insertions(+), 8 deletions(-) - ---- a/fs/fs-writeback.c -+++ b/fs/fs-writeback.c -@@ -461,8 +461,10 @@ static bool pin_sb_for_writeback(struct - * Return 1, if the caller writeback routine should be - * interrupted. Otherwise return 0. - */ --static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, -- struct writeback_control *wbc, bool only_this_sb) -+int generic_writeback_sb_inodes(struct super_block *sb, -+ struct bdi_writeback *wb, -+ struct writeback_control *wbc, -+ bool only_this_sb) - { - while (!list_empty(&wb->b_io)) { - long pages_skipped; -@@ -548,7 +550,10 @@ void writeback_inodes_wb(struct bdi_writ - requeue_io(inode); - continue; - } -- ret = writeback_sb_inodes(sb, wb, wbc, false); -+ if (sb->s_op->writeback_inodes) -+ ret = sb->s_op->writeback_inodes(sb, wb, wbc, false); -+ else -+ ret = generic_writeback_sb_inodes(sb, wb, wbc, false); - drop_super(sb); - - if (ret) -@@ -557,18 +562,21 @@ void writeback_inodes_wb(struct bdi_writ - spin_unlock(&inode_lock); - /* Leave any unwritten inodes on b_io */ - } -+EXPORT_SYMBOL(writeback_inodes_wb); - --static void __writeback_inodes_sb(struct super_block *sb, -+void __writeback_inodes_sb(struct super_block *sb, - struct bdi_writeback *wb, struct writeback_control *wbc) - { -- WARN_ON(!rwsem_is_locked(&sb->s_umount)); -- - spin_lock(&inode_lock); - if (!wbc->for_kupdate || list_empty(&wb->b_io)) - queue_io(wb, wbc->older_than_this); -- writeback_sb_inodes(sb, wb, wbc, true); -+ if (sb->s_op->writeback_inodes) -+ sb->s_op->writeback_inodes(sb, wb, wbc, true); -+ else -+ generic_writeback_sb_inodes(sb, wb, wbc, true); - spin_unlock(&inode_lock); - } -+EXPORT_SYMBOL(__writeback_inodes_sb); - - /* - * The maximum number of pages to writeout in a single bdi flush/kupdate -@@ -688,6 +696,32 @@ static long wb_writeback(struct bdi_writ - - return wrote; - } -+EXPORT_SYMBOL(generic_writeback_sb_inodes); -+ -+/* -+ * This function is for file systems which have their -+ * own means of periodical write-out of old data. -+ * NOTE: inode_lock should be hold. -+ * -+ * Skip a portion of b_io inodes which belong to @sb -+ * and go sequentially in reverse order. -+ */ -+void writeback_skip_sb_inodes(struct super_block *sb, -+ struct bdi_writeback *wb) -+{ -+ while (1) { -+ struct inode *inode; -+ -+ if (list_empty(&wb->b_io)) -+ break; -+ inode = list_entry(wb->b_io.prev, struct inode, i_wb_list); -+ if (sb != inode->i_sb) -+ break; -+ redirty_tail(inode); -+ } -+} -+EXPORT_SYMBOL(writeback_skip_sb_inodes); -+ - - /* - * Return the next wb_writeback_work struct that hasn't been processed yet. ---- a/fs/inode.c -+++ b/fs/inode.c -@@ -82,6 +82,7 @@ static struct hlist_head *inode_hashtabl - * the i_state of an inode while it is in use.. - */ - DEFINE_SPINLOCK(inode_lock); -+EXPORT_SYMBOL_GPL(inode_lock); - - /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -522,6 +522,7 @@ enum positive_aop_returns { - struct page; - struct address_space; - struct writeback_control; -+struct bdi_writeback; - - struct iov_iter { - const struct iovec *iov; -@@ -1605,7 +1606,12 @@ struct super_operations { - int (*statfs) (struct dentry *, struct kstatfs *); - int (*remount_fs) (struct super_block *, int *, char *); - void (*umount_begin) (struct super_block *); -- -+ int (*writeback_inodes)(struct super_block *sb, -+ struct bdi_writeback *wb, -+ struct writeback_control *wbc, -+ bool only_this_sb); -+ void (*sync_inodes) (struct super_block *sb, -+ struct writeback_control *wbc); - int (*show_options)(struct seq_file *, struct vfsmount *); - int (*show_stats)(struct seq_file *, struct vfsmount *); - #ifdef CONFIG_QUOTA -@@ -2133,6 +2139,12 @@ extern int invalidate_inode_pages2(struc - extern int invalidate_inode_pages2_range(struct address_space *mapping, - pgoff_t start, pgoff_t end); - extern int write_inode_now(struct inode *, int); -+extern void writeback_skip_sb_inodes(struct super_block *sb, -+ struct bdi_writeback *wb); -+extern int generic_writeback_sb_inodes(struct super_block *sb, -+ struct bdi_writeback *wb, -+ struct writeback_control *wbc, -+ bool only_this_sb); - extern int filemap_fdatawrite(struct address_space *); - extern int filemap_flush(struct address_space *); - extern int filemap_fdatawait(struct address_space *); ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -875,6 +875,7 @@ void account_page_dirtied(struct page *p - void account_page_writeback(struct page *page); - int set_page_dirty(struct page *page); - int set_page_dirty_lock(struct page *page); -+int set_page_dirty_notag(struct page *page); - int clear_page_dirty_for_io(struct page *page); - - /* Is the vma a continuation of the stack vma above it? */ ---- a/include/linux/writeback.h -+++ b/include/linux/writeback.h -@@ -64,6 +64,9 @@ int writeback_inodes_sb_nr_if_idle(struc - void sync_inodes_sb(struct super_block *); - void writeback_inodes_wb(struct bdi_writeback *wb, - struct writeback_control *wbc); -+void __writeback_inodes_sb(struct super_block *sb, -+ struct bdi_writeback *wb, -+ struct writeback_control *wbc); - long wb_do_writeback(struct bdi_writeback *wb, int force_wait); - void wakeup_flusher_threads(long nr_pages); - ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -139,6 +139,7 @@ void __remove_from_page_cache(struct pag - dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); - } - } -+EXPORT_SYMBOL(__remove_from_page_cache); - - void remove_from_page_cache(struct page *page) - { -@@ -967,6 +968,7 @@ static void shrink_readahead_size_eio(st - { - ra->ra_pages /= 4; - } -+EXPORT_SYMBOL(find_get_pages); - - /** - * do_generic_file_read - generic file read routine ---- a/mm/page-writeback.c -+++ b/mm/page-writeback.c -@@ -1180,6 +1180,32 @@ int __set_page_dirty_nobuffers(struct pa - EXPORT_SYMBOL(__set_page_dirty_nobuffers); - - /* -+ * set_page_dirty_notag() -- similar to __set_page_dirty_nobuffers() -+ * except it doesn't tag the page dirty in the page-cache radix tree. -+ * This means that the address space using this cannot use the regular -+ * filemap ->writepages() helpers and must provide its own means of -+ * tracking and finding non-tagged dirty pages. -+ * -+ * NOTE: furthermore, this version also doesn't handle truncate races. -+ */ -+int set_page_dirty_notag(struct page *page) -+{ -+ struct address_space *mapping = page->mapping; -+ -+ if (!TestSetPageDirty(page)) { -+ unsigned long flags; -+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); -+ local_irq_save(flags); -+ account_page_dirtied(page, mapping); -+ local_irq_restore(flags); -+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); -+ return 1; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(set_page_dirty_notag); -+ -+/* - * When a writepage implementation decides that it doesn't want to write this - * page for some reason, it should redirty the locked page via - * redirty_page_for_writepage() and it should then unlock the page and return 0 diff --git a/patches.suse/reiserfs-barrier-default b/patches.suse/reiserfs-barrier-default deleted file mode 100644 index 9a11cca..0000000 --- a/patches.suse/reiserfs-barrier-default +++ /dev/null @@ -1,56 +0,0 @@ -From: Jeff Mahoney -Subject: Make reiserfs default to barrier=flush -Patch-mainline: not yet - -Change the default reiserfs mount option to barrier=flush - -Signed-off-by: Jeff Mahoney ---- - - fs/reiserfs/Kconfig | 22 ++++++++++++++++++++++ - fs/reiserfs/super.c | 3 +++ - 2 files changed, 25 insertions(+) - ---- a/fs/reiserfs/Kconfig -+++ b/fs/reiserfs/Kconfig -@@ -50,6 +50,28 @@ config REISERFS_PROC_INFO - Almost everyone but ReiserFS developers and people fine-tuning - reiserfs or tracing problems should say N. - -+config REISERFS_DEFAULTS_TO_BARRIERS_ENABLED -+ bool "Default to 'barrier=flush' in reiserfs" -+ depends on REISERFS_FS -+ help -+ Modern disk drives support write caches that can speed up writeback. -+ Some devices, in order to improve their performance statistics, -+ report that the write has been completed even when it has only -+ been committed to volatile cache memory. This can result in -+ severe corruption in the event of power loss. -+ -+ The -o barrier option enables the file system to direct the block -+ layer to issue a barrier, which ensures that the cache has been -+ flushed before proceeding. This can produce some slowdown in -+ certain environments, but allows higher end storage arrays with -+ battery-backed caches to report completes writes sooner than -+ would be otherwise possible. -+ -+ Without this option, disk write caches should be disabled if -+ you value data integrity over writeback performance. -+ -+ If unsure, say N. -+ - config REISERFS_FS_XATTR - bool "ReiserFS extended attributes" - depends on REISERFS_FS ---- a/fs/reiserfs/super.c -+++ b/fs/reiserfs/super.c -@@ -1637,6 +1637,9 @@ static int reiserfs_fill_super(struct su - /* Set default values for options: non-aggressive tails, RO on errors */ - REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); - REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); -+#ifdef CONFIG_REISERFS_DEFAULTS_TO_BARRIERS_ENABLED -+ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH); -+#endif - /* no preallocation minimum, be smart in - reiserfs_file_write instead */ - REISERFS_SB(s)->s_alloc_options.preallocmin = 0; diff --git a/patches.suse/richacl-fix b/patches.suse/richacl-fix deleted file mode 100644 index 74ecc54..0000000 --- a/patches.suse/richacl-fix +++ /dev/null @@ -1,235 +0,0 @@ -From: Jeff Mahoney -Subject: richacl: Adopt 2.6.38 API -Patch-mainline: dependent on local patches - - This patch addresses changes in the inode_operations->permission prototype. - -Signed-off-by: Jeff Mahoney ---- - fs/ext4/richacl.c | 13 +++++++------ - fs/ext4/richacl.h | 4 ++-- - fs/richacl_base.c | 2 +- - fs/richacl_inode.c | 34 +++++++++++++++++++--------------- - include/linux/richacl.h | 16 ++++++++++------ - 5 files changed, 39 insertions(+), 30 deletions(-) - ---- a/fs/ext4/richacl.c -+++ b/fs/ext4/richacl.c -@@ -120,7 +120,8 @@ ext4_set_richacl(handle_t *handle, struc - } - - int --ext4_richacl_permission(struct inode *inode, unsigned int mask) -+ext4_richacl_permission(struct inode *inode, unsigned int mask, -+ unsigned int flags) - { - struct richacl *acl; - int retval; -@@ -132,20 +133,20 @@ ext4_richacl_permission(struct inode *in - if (acl && IS_ERR(acl)) - retval = PTR_ERR(acl); - else { -- retval = richacl_inode_permission(inode, acl, mask); -+ retval = richacl_inode_permission(inode, acl, mask, flags); - richacl_put(acl); - } - - return retval; - } - --int ext4_permission(struct inode *inode, int mask) -+int ext4_permission(struct inode *inode, int mask, unsigned int flags) - { - if (IS_RICHACL(inode)) - return ext4_richacl_permission(inode, -- richacl_want_to_mask(mask)); -+ richacl_want_to_mask(mask), flags); - else -- return generic_permission(inode, mask, ext4_check_acl); -+ return generic_permission(inode, mask, flags, ext4_check_acl); - } - - int ext4_may_create(struct inode *dir, int isdir) -@@ -260,7 +261,7 @@ ext4_xattr_set_richacl(struct dentry *de - if (strcmp(name, "") != 0) - return -EINVAL; - if (current_fsuid() != inode->i_uid && -- ext4_richacl_permission(inode, ACE4_WRITE_ACL) && -+ ext4_richacl_permission(inode, ACE4_WRITE_ACL, 0) && - !capable(CAP_FOWNER)) - return -EPERM; - if (value) { ---- a/fs/ext4/richacl.h -+++ b/fs/ext4/richacl.h -@@ -24,8 +24,8 @@ - /* Value for i_richacl if RICHACL has not been cached */ - # define EXT4_RICHACL_NOT_CACHED ((void *)-1) - --extern int ext4_permission(struct inode *, int); --extern int ext4_richacl_permission(struct inode *, unsigned int); -+extern int ext4_permission(struct inode *, int, unsigned int); -+extern int ext4_richacl_permission(struct inode *, unsigned int, unsigned int); - extern int ext4_may_create(struct inode *, int); - extern int ext4_may_delete(struct inode *, struct inode *, int); - extern int ext4_init_richacl(handle_t *, struct inode *, struct inode *); ---- a/fs/richacl_base.c -+++ b/fs/richacl_base.c -@@ -381,7 +381,7 @@ EXPORT_SYMBOL_GPL(richacl_chmod); - */ - int - richacl_permission(struct inode *inode, const struct richacl *acl, -- unsigned int mask) -+ unsigned int mask, unsigned int flags) - { - const struct richace *ace; - unsigned int file_mask, requested = mask, denied = 0; ---- a/fs/richacl_inode.c -+++ b/fs/richacl_inode.c -@@ -23,14 +23,15 @@ - */ - int - richacl_may_create(struct inode *dir, int isdir, -- int (*richacl_permission)(struct inode *, unsigned int)) -+ int (*richacl_permission)(struct inode *, unsigned int, -+ unsigned int)) - { - if (IS_RICHACL(dir)) - return richacl_permission(dir, - ACE4_EXECUTE | (isdir ? -- ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); -+ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE), 0); - else -- return generic_permission(dir, MAY_WRITE | MAY_EXEC, -+ return generic_permission(dir, MAY_WRITE | MAY_EXEC, 0, - dir->i_op->check_acl); - } - EXPORT_SYMBOL(richacl_may_create); -@@ -52,23 +53,25 @@ check_sticky(struct inode *dir, struct i - */ - int - richacl_may_delete(struct inode *dir, struct inode *inode, int replace, -- int (*richacl_permission)(struct inode *, unsigned int)) -+ int (*richacl_permission)(struct inode *, unsigned int, -+ unsigned int)) - { - int error; - - if (IS_RICHACL(inode)) { - error = richacl_permission(dir, -- ACE4_EXECUTE | ACE4_DELETE_CHILD); -+ ACE4_EXECUTE | ACE4_DELETE_CHILD, 0); - if (!error && check_sticky(dir, inode)) - error = -EPERM; -- if (error && !richacl_permission(inode, ACE4_DELETE)) -+ if (error && !richacl_permission(inode, ACE4_DELETE, 0)) - error = 0; - if (!error && replace) - error = richacl_permission(dir, - ACE4_EXECUTE | (S_ISDIR(inode->i_mode) ? -- ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE)); -+ ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE), -+ 0); - } else { -- error = generic_permission(dir, MAY_WRITE | MAY_EXEC, -+ error = generic_permission(dir, MAY_WRITE | MAY_EXEC, 0, - dir->i_op->check_acl); - if (!error && check_sticky(dir, inode)) - error = -EPERM; -@@ -89,10 +92,10 @@ EXPORT_SYMBOL(richacl_may_delete); - */ - int - richacl_inode_permission(struct inode *inode, const struct richacl *acl, -- unsigned int mask) -+ unsigned int mask, unsigned int flags) - { - if (acl) { -- if (!richacl_permission(inode, acl, mask)) -+ if (!richacl_permission(inode, acl, mask, flags)) - return 0; - } else { - int mode = inode->i_mode; -@@ -140,7 +143,8 @@ EXPORT_SYMBOL_GPL(richacl_inode_permissi - */ - int - richacl_inode_change_ok(struct inode *inode, struct iattr *attr, -- int (*richacl_permission)(struct inode *, unsigned int)) -+ int (*richacl_permission)(struct inode *, unsigned int, -+ unsigned int)) - { - unsigned int ia_valid = attr->ia_valid; - -@@ -153,7 +157,7 @@ richacl_inode_change_ok(struct inode *in - (current_fsuid() != inode->i_uid || - attr->ia_uid != inode->i_uid) && - (current_fsuid() != attr->ia_uid || -- richacl_permission(inode, ACE4_WRITE_OWNER)) && -+ richacl_permission(inode, ACE4_WRITE_OWNER, 0)) && - !capable(CAP_CHOWN)) - goto error; - -@@ -163,7 +167,7 @@ richacl_inode_change_ok(struct inode *in - if ((current_fsuid() != inode->i_uid || - (!in_group && attr->ia_gid != inode->i_gid)) && - (!in_group || -- richacl_permission(inode, ACE4_WRITE_OWNER)) && -+ richacl_permission(inode, ACE4_WRITE_OWNER, 0)) && - !capable(CAP_CHOWN)) - goto error; - } -@@ -171,7 +175,7 @@ richacl_inode_change_ok(struct inode *in - /* Make sure a caller can chmod. */ - if (ia_valid & ATTR_MODE) { - if (current_fsuid() != inode->i_uid && -- richacl_permission(inode, ACE4_WRITE_ACL) && -+ richacl_permission(inode, ACE4_WRITE_ACL, 0) && - !capable(CAP_FOWNER)) - goto error; - /* Also check the setgid bit! */ -@@ -183,7 +187,7 @@ richacl_inode_change_ok(struct inode *in - /* Check for setting the inode time. */ - if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) { - if (current_fsuid() != inode->i_uid && -- richacl_permission(inode, ACE4_WRITE_ATTRIBUTES) && -+ richacl_permission(inode, ACE4_WRITE_ATTRIBUTES, 0) && - !capable(CAP_FOWNER)) - goto error; - } ---- a/include/linux/richacl.h -+++ b/include/linux/richacl.h -@@ -294,7 +294,7 @@ extern unsigned int richacl_want_to_mask - extern void richacl_compute_max_masks(struct richacl *); - extern struct richacl *richacl_chmod(struct richacl *, mode_t); - extern int richacl_permission(struct inode *, const struct richacl *, -- unsigned int); -+ unsigned int, unsigned int); - extern struct richacl *richacl_inherit(const struct richacl *, struct inode *); - extern int richacl_equiv_mode(const struct richacl *, mode_t *); - -@@ -302,18 +302,22 @@ extern int richacl_equiv_mode(const stru - - #ifdef CONFIG_FS_RICHACL - extern int richacl_may_create(struct inode *, int, -- int (*)(struct inode *, unsigned int)); -+ int (*)(struct inode *, unsigned int, -+ unsigned int)); - extern int richacl_may_delete(struct inode *, struct inode *, int, -- int (*)(struct inode *, unsigned int)); -+ int (*)(struct inode *, unsigned int, -+ unsigned int)); - extern int richacl_inode_permission(struct inode *, const struct richacl *, -- unsigned int); -+ unsigned int, unsigned int); - extern int richacl_inode_change_ok(struct inode *, struct iattr *, -- int (*)(struct inode *, unsigned int)); -+ int (*)(struct inode *, unsigned int, -+ unsigned int)); - #else - static inline int - richacl_inode_change_ok(struct inode *inode, struct iattr *attr, - int (*richacl_permission)(struct inode *inode, -- unsigned int mask)) -+ unsigned int mask, -+ unsigned int flags)) - { - return -EPERM; - } diff --git a/patches.suse/s390-Kerntypes.diff b/patches.suse/s390-Kerntypes.diff deleted file mode 100644 index ed9356a..0000000 --- a/patches.suse/s390-Kerntypes.diff +++ /dev/null @@ -1,387 +0,0 @@ -From: Michael Holzheu -Subject: S390: Generate Kerntypes file -Patch-mainline: Not yet - -s390 core changes: - - Remove rule to generate kernel listing. - - Add code to generate kerntypes for use with the lkcd utils. - -Signed-off-by: Michael Holzheu -Signed-off-by: Martin Schwidefsky -Signed-off-by: Michal Marek ---- - - arch/s390/Makefile | 4 - arch/s390/boot/Makefile | 19 ++ - arch/s390/boot/kerntypes.c | 311 +++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 328 insertions(+), 6 deletions(-) - ---- a/arch/s390/Makefile -+++ b/arch/s390/Makefile -@@ -106,12 +106,12 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/ - - boot := arch/s390/boot - --all: image bzImage -+all: image bzImage kerntypes.o - - install: vmlinux - $(Q)$(MAKE) $(build)=$(boot) $@ - --image bzImage: vmlinux -+image bzImage kerntypes.o: vmlinux - $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ - - zfcpdump: ---- a/arch/s390/boot/Makefile -+++ b/arch/s390/boot/Makefile -@@ -2,15 +2,26 @@ - # Makefile for the linux s390-specific parts of the memory manager. - # - --COMPILE_VERSION := __linux_compile_version_id__`hostname | \ -- tr -c '[0-9A-Za-z]' '_'`__`date | \ -- tr -c '[0-9A-Za-z]' '_'`_t -+COMPILE_VERSION := __linux_compile_version_id__$(shell hostname | \ -+ tr -c '[0-9A-Za-z]' '_')__$(shell date | \ -+ tr -c '[0-9A-Za-z]' '_')_t - -+ -+chk-option = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \ -+ > /dev/null 2>&1; then echo "$(1)"; fi ;) -+ -+# Remove possible '-g' from CFLAGS_KERNEL, since we want to use stabs -+# debug format. -+override CFLAGS_KERNEL := $(shell echo $(CFLAGS_KERNEL) | sed 's/-g//') - EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. -+# Assume we don't need the flag if the compiler doesn't know about it -+EXTRA_CFLAGS += $(call chk-option,-fno-eliminate-unused-debug-types) -+ - - targets := image - targets += bzImage - subdir- := compressed -+targets += kerntypes.o - - $(obj)/image: vmlinux FORCE - $(call if_changed,objcopy) -@@ -23,4 +34,4 @@ $(obj)/compressed/vmlinux: FORCE - - install: $(CONFIGURE) $(obj)/image - sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ -- System.map Kerntypes "$(INSTALL_PATH)" -+ System.map "$(INSTALL_PATH)" ---- /dev/null -+++ b/arch/s390/boot/kerntypes.c -@@ -0,0 +1,311 @@ -+/* -+ * kerntypes.c -+ * -+ * Dummy module that includes headers for all kernel types of interest. -+ * The kernel type information is used by the lcrash utility when -+ * analyzing system crash dumps or the live system. Using the type -+ * information for the running system, rather than kernel header files, -+ * makes for a more flexible and robust analysis tool. -+ * -+ * This source code is released under the GNU GPL. -+ */ -+ -+/* generate version for this file */ -+typedef char *COMPILE_VERSION; -+ -+/* General linux types */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#ifdef CONFIG_SLUB -+ #include -+#endif -+#ifdef CONFIG_SLAB -+ #include -+#endif -+#ifdef CONFIG_SLQB -+ #include -+#endif -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * s390 specific includes -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* channel subsystem driver */ -+#include "drivers/s390/cio/cio.h" -+#include "drivers/s390/cio/chsc.h" -+#include "drivers/s390/cio/css.h" -+#include "drivers/s390/cio/device.h" -+#include "drivers/s390/cio/chsc_sch.h" -+ -+/* dasd device driver */ -+#include "drivers/s390/block/dasd_int.h" -+#include "drivers/s390/block/dasd_diag.h" -+#include "drivers/s390/block/dasd_eckd.h" -+#include "drivers/s390/block/dasd_fba.h" -+ -+/* networking drivers */ -+#include "include/net/iucv/iucv.h" -+#include "drivers/s390/net/fsm.h" -+#include "drivers/s390/net/ctcm_main.h" -+#include "drivers/s390/net/ctcm_fsms.h" -+#include "drivers/s390/net/lcs.h" -+#include "drivers/s390/net/qeth_core.h" -+#include "drivers/s390/net/qeth_core_mpc.h" -+#include "drivers/s390/net/qeth_l3.h" -+ -+/* zfcp device driver */ -+#include "drivers/s390/scsi/zfcp_def.h" -+#include "drivers/s390/scsi/zfcp_fsf.h" -+ -+/* crypto device driver */ -+#include "drivers/s390/crypto/ap_bus.h" -+#include "drivers/s390/crypto/zcrypt_api.h" -+#include "drivers/s390/crypto/zcrypt_cca_key.h" -+#include "drivers/s390/crypto/zcrypt_pcica.h" -+#include "drivers/s390/crypto/zcrypt_pcicc.h" -+#include "drivers/s390/crypto/zcrypt_pcixcc.h" -+#include "drivers/s390/crypto/zcrypt_cex2a.h" -+ -+/* sclp device driver */ -+#include "drivers/s390/char/sclp.h" -+#include "drivers/s390/char/sclp_rw.h" -+#include "drivers/s390/char/sclp_tty.h" -+ -+/* vmur device driver */ -+#include "drivers/s390/char/vmur.h" -+ -+/* qdio device driver */ -+#include "drivers/s390/cio/qdio.h" -+#include "drivers/s390/cio/qdio_thinint.c" -+ -+ -+/* KVM */ -+#include "include/linux/kvm.h" -+#include "include/linux/kvm_host.h" -+#include "include/linux/kvm_para.h" -+ -+/* Virtio */ -+#include "include/linux/virtio.h" -+#include "include/linux/virtio_config.h" -+#include "include/linux/virtio_ring.h" -+#include "include/linux/virtio_9p.h" -+#include "include/linux/virtio_console.h" -+#include "include/linux/virtio_rng.h" -+#include "include/linux/virtio_balloon.h" -+#include "include/linux/virtio_net.h" -+#include "include/linux/virtio_blk.h" -+ -+/* -+ * include sched.c for types: -+ * - struct prio_array -+ * - struct runqueue -+ */ -+#include "kernel/sched.c" -+/* -+ * include slab.c for struct kmem_cache -+ */ -+#ifdef CONFIG_SLUB -+ #include "mm/slub.c" -+#endif -+#ifdef CONFIG_SLAB -+ #include "mm/slab.c" -+#endif -+#ifdef CONFIG_SLQB -+ #include "mm/slqb.c" -+#endif -+ -+/* include driver core private structures */ -+#include "drivers/base/base.h" diff --git a/patches.suse/s390-System.map.diff b/patches.suse/s390-System.map.diff deleted file mode 100644 index 93b9546..0000000 --- a/patches.suse/s390-System.map.diff +++ /dev/null @@ -1,30 +0,0 @@ -From: Bernhard Walle -Subject: [PATCH] Strip L2^B symbols -Patch-mainline: never -References: bnc #456682 - -This patches strips all L2^B symbols that happen on s390 only from System.map. -We don't need that symbols as this are local labels. It confuses (older) -versions of crash and just makes System.map larger. - -The proper fix needs to be in binutils. However, since the binutils maintainer -at SUSE is not cooperative I workarounded this in the kernel. The proper -binutils patch is already mainline [1]. - - -Signed-off-by: Bernhard Walle - -[1] http://article.gmane.org/gmane.comp.gnu.binutils.cvs/12731 ---- - scripts/mksysmap | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/scripts/mksysmap -+++ b/scripts/mksysmap -@@ -41,5 +41,5 @@ - # so we just ignore them to let readprofile continue to work. - # (At least sparc64 has __crc_ in the middle). - --$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)' > $2 -+$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\(L2\)' > $2 - diff --git a/patches.suse/sched-revert-latency-defaults b/patches.suse/sched-revert-latency-defaults deleted file mode 100644 index c0f2ebe..0000000 --- a/patches.suse/sched-revert-latency-defaults +++ /dev/null @@ -1,95 +0,0 @@ -From: Suresh Jayaraman -Subject: Revert sched latency defaults -References: bnc#557307 -Patch-mainline: Never - -The upstream commit 172e082a91 re-tuned the sched latency defaults to better -suit desktop workloads. This hurt server workloads. So revert the latency -defaults to values similar to SLE11 GM to avoid several performance -regressions. - -Also, turn FAIR_SLEEPERS off and NORMALIZED_SLEEPER on. The above scheduler -tunables seem to be most effective with FAIR_SLEEPERS off and -NORMALIZED_SLEEPER on. - -The sysbench, dbench and Specjjb results showed much better performance with -these changes. - -The interbench results didn't show any user visible impact and I expect -desktop workloads won't be affected much. Iam not aware of/heard of any impact -of this tuning that is affecting any specific workload. - -Signed-off-by: Suresh Jayaraman ---- - kernel/sched_fair.c | 12 ++++++------ - kernel/sched_features.h | 4 ++-- - 2 files changed, 8 insertions(+), 8 deletions(-) - -Index: linux-2.6.32-master/kernel/sched_fair.c -=================================================================== ---- linux-2.6.32-master.orig/kernel/sched_fair.c -+++ linux-2.6.32-master/kernel/sched_fair.c -@@ -24,7 +24,7 @@ - - /* - * Targeted preemption latency for CPU-bound tasks: -- * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds) -+ * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) - * - * NOTE: this latency value is not the same as the concept of - * 'timeslice length' - timeslices in CFS are of variable length -@@ -34,13 +34,13 @@ - * (to see the precise effective timeslice length of your workload, - * run vmstat and monitor the context-switches (cs) field) - */ --unsigned int sysctl_sched_latency = 5000000ULL; -+unsigned int sysctl_sched_latency = 20000000ULL; - - /* - * Minimal preemption granularity for CPU-bound tasks: -- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) -+ * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) - */ --unsigned int sysctl_sched_min_granularity = 1000000ULL; -+unsigned int sysctl_sched_min_granularity = 4000000ULL; - - /* - * is kept at sysctl_sched_latency / sysctl_sched_min_granularity -@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_ - - /* - * SCHED_OTHER wake-up granularity. -- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) -+ * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds) - * - * This option delays the preemption effects of decoupled workloads - * and reduces their over-scheduling. Synchronous workloads will still - * have immediate wakeup/sleep latencies. - */ --unsigned int sysctl_sched_wakeup_granularity = 1000000UL; -+unsigned int sysctl_sched_wakeup_granularity = 5000000UL; - - const_debug unsigned int sysctl_sched_migration_cost = 500000UL; - -Index: linux-2.6.32-master/kernel/sched_features.h -=================================================================== ---- linux-2.6.32-master.orig/kernel/sched_features.h -+++ linux-2.6.32-master/kernel/sched_features.h -@@ -3,7 +3,7 @@ - * considers the task to be running during that period. This gives it - * a service deficit on wakeup, allowing it to run sooner. - */ --SCHED_FEAT(FAIR_SLEEPERS, 1) -+SCHED_FEAT(FAIR_SLEEPERS, 0) - - /* - * Only give sleepers 50% of their service deficit. This allows -@@ -17,7 +17,7 @@ SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) - * longer period, and lighter task an effective shorter period they - * are considered running. - */ --SCHED_FEAT(NORMALIZED_SLEEPER, 0) -+SCHED_FEAT(NORMALIZED_SLEEPER, 1) - - /* - * Place new tasks ahead so that they do not starve already running diff --git a/patches.suse/scsi-error-test-unit-ready-timeout b/patches.suse/scsi-error-test-unit-ready-timeout deleted file mode 100644 index ed09317..0000000 --- a/patches.suse/scsi-error-test-unit-ready-timeout +++ /dev/null @@ -1,35 +0,0 @@ -From: garloff@suse.de -Subject: Introduce own timeout for TEST_UNIT_READY -Reference: SUSE41689 -Patch-mainline: not yet - -In error recovery, a SCSI device may need more than the 10s SENSE_TIMEOUT -to respond to TEST_UNIT_READY, as reported in novell bugzilla #56689. -The patch introduces an own timeout for TEST_UNIT_READY which is set -to 30s and used. - -Signed-off-by: Kurt Garloff - ---- - drivers/scsi/scsi_error.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/scsi/scsi_error.c -+++ b/drivers/scsi/scsi_error.c -@@ -45,6 +45,7 @@ - #include - - #define SENSE_TIMEOUT (10*HZ) -+#define TEST_UNIT_READY_TIMEOUT (30*HZ) - - /* - * These should *probably* be handled by the host itself. -@@ -1028,7 +1029,7 @@ static int scsi_eh_tur(struct scsi_cmnd - int retry_cnt = 1, rtn; - - retry_tur: -- rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); -+ rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, TEST_UNIT_READY_TIMEOUT, 0); - - SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", - __func__, scmd, rtn)); diff --git a/patches.suse/scsi-netlink-ml b/patches.suse/scsi-netlink-ml deleted file mode 100644 index e1d42a7..0000000 --- a/patches.suse/scsi-netlink-ml +++ /dev/null @@ -1,215 +0,0 @@ -Subject: Netlink interface for SCSI sense codes -From: Hannes Reinecke -Date: Fri Nov 21 10:08:01 2008 +0100: -Git: 97746dc5543ef9113c927022dc54ccd26915563d -Patch-mainline: not yet - -Inform the userspace about SCSI sense codes; some of them -carry vital information where userspace should react to. - -Signed-off-by: Hannes Reinecke - ---- - drivers/scsi/scsi_error.c | 79 +++++++++++++++++++++++++++++++++++++++++ - include/scsi/scsi_netlink.h | 6 ++- - include/scsi/scsi_netlink_ml.h | 64 +++++++++++++++++++++++++++++++++ - 3 files changed, 147 insertions(+), 2 deletions(-) - ---- a/drivers/scsi/scsi_error.c -+++ b/drivers/scsi/scsi_error.c -@@ -25,6 +25,8 @@ - #include - #include - #include -+#include -+#include - - #include - #include -@@ -34,6 +36,7 @@ - #include - #include - #include -+#include - - #include "scsi_priv.h" - #include "scsi_logging.h" -@@ -218,6 +221,80 @@ static inline void scsi_eh_prt_fail_stat - } - #endif - -+#ifdef CONFIG_SCSI_NETLINK -+/** -+ * scsi_post_sense_event - called to post a 'Sense Code' event -+ * -+ * @sdev: SCSI device the sense code occured on -+ * @sshdr: SCSI sense code -+ * -+ * Returns: -+ * 0 on succesful return -+ * otherwise, failing error code -+ * -+ */ -+static void scsi_post_sense_event(struct scsi_device *sdev, -+ struct scsi_sense_hdr *sshdr) -+{ -+ struct sk_buff *skb; -+ struct nlmsghdr *nlh; -+ struct scsi_nl_sense_msg *msg; -+ u32 len, skblen; -+ int err; -+ -+ if (!scsi_nl_sock) { -+ err = -ENOENT; -+ goto send_fail; -+ } -+ -+ len = SCSI_NL_MSGALIGN(sizeof(*msg)); -+ skblen = NLMSG_SPACE(len); -+ -+ skb = alloc_skb(skblen, GFP_ATOMIC); -+ if (!skb) { -+ err = -ENOBUFS; -+ goto send_fail; -+ } -+ -+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, -+ skblen - sizeof(*nlh), 0); -+ if (!nlh) { -+ err = -ENOBUFS; -+ goto send_fail_skb; -+ } -+ msg = NLMSG_DATA(nlh); -+ -+ INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT_ML, -+ ML_NL_SCSI_SENSE, len); -+ msg->host_no = sdev->host->host_no; -+ msg->channel = sdev->channel; -+ msg->id = sdev->id; -+ msg->lun = sdev->lun; -+ msg->sense = (sshdr->response_code << 24) | (sshdr->sense_key << 16) | -+ (sshdr->asc << 8) | sshdr->ascq; -+ -+ err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_ML_EVENTS, -+ GFP_KERNEL); -+ if (err && (err != -ESRCH)) -+ /* nlmsg_multicast already kfree_skb'd */ -+ goto send_fail; -+ -+ return; -+ -+send_fail_skb: -+ kfree_skb(skb); -+send_fail: -+ sdev_printk(KERN_WARNING, sdev, -+ "Dropped SCSI Msg %02x/%02x/%02x/%02x: err %d\n", -+ sshdr->response_code, sshdr->sense_key, -+ sshdr->asc, sshdr->ascq, err); -+ return; -+} -+#else -+static inline void scsi_post_sense_event(struct scsi_device *sdev, -+ struct scsi_sense_hdr *sshdr) {} -+#endif -+ - /** - * scsi_check_sense - Examine scsi cmd sense - * @scmd: Cmd to have sense checked. -@@ -240,6 +317,8 @@ static int scsi_check_sense(struct scsi_ - if (scsi_sense_is_deferred(&sshdr)) - return NEEDS_RETRY; - -+ scsi_post_sense_event(sdev, &sshdr); -+ - if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && - sdev->scsi_dh_data->scsi_dh->check_sense) { - int rc; ---- a/include/scsi/scsi_netlink.h -+++ b/include/scsi/scsi_netlink.h -@@ -35,7 +35,8 @@ - /* SCSI Transport Broadcast Groups */ - /* leaving groups 0 and 1 unassigned */ - #define SCSI_NL_GRP_FC_EVENTS (1<<2) /* Group 2 */ --#define SCSI_NL_GRP_CNT 3 -+#define SCSI_NL_GRP_ML_EVENTS (1<<3) /* Group 3 */ -+#define SCSI_NL_GRP_CNT 4 - - - /* SCSI_TRANSPORT_MSG event message header */ -@@ -56,7 +57,8 @@ struct scsi_nl_hdr { - /* scsi_nl_hdr->transport value */ - #define SCSI_NL_TRANSPORT 0 - #define SCSI_NL_TRANSPORT_FC 1 --#define SCSI_NL_MAX_TRANSPORTS 2 -+#define SCSI_NL_TRANSPORT_ML 2 -+#define SCSI_NL_MAX_TRANSPORTS 3 - - /* Transport-based scsi_nl_hdr->msgtype values are defined in each transport */ - ---- /dev/null -+++ b/include/scsi/scsi_netlink_ml.h -@@ -0,0 +1,64 @@ -+/* -+ * SCSI Midlayer Netlink Interface -+ * -+ * Copyright (C) 2008 Hannes Reinecke, SuSE Linux Products GmbH -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+#ifndef SCSI_NETLINK_ML_H -+#define SCSI_NETLINK_ML_H -+ -+#include -+ -+/* -+ * This file intended to be included by both kernel and user space -+ */ -+ -+/* -+ * FC Transport Message Types -+ */ -+ /* kernel -> user */ -+#define ML_NL_SCSI_SENSE 0x0100 -+ /* user -> kernel */ -+/* none */ -+ -+ -+/* -+ * Message Structures : -+ */ -+ -+/* macro to round up message lengths to 8byte boundary */ -+#define SCSI_NL_MSGALIGN(len) (((len) + 7) & ~7) -+ -+ -+/* -+ * SCSI Midlayer SCSI Sense messages : -+ * SCSI_NL_SCSI_SENSE -+ * -+ */ -+struct scsi_nl_sense_msg { -+ struct scsi_nl_hdr snlh; /* must be 1st element ! */ -+ uint64_t seconds; -+ u64 id; -+ u64 lun; -+ u16 host_no; -+ u16 channel; -+ u32 sense; -+} __attribute__((aligned(sizeof(uint64_t)))); -+ -+ -+#endif /* SCSI_NETLINK_ML_H */ -+ diff --git a/patches.suse/setuid-dumpable-wrongdir b/patches.suse/setuid-dumpable-wrongdir deleted file mode 100644 index cf919a4..0000000 --- a/patches.suse/setuid-dumpable-wrongdir +++ /dev/null @@ -1,48 +0,0 @@ -From: Kurt Garloff -Subject: suid-dumpable ended up in wrong sysctl dir -Patch-mainline: never - -Diffing in sysctl.c is tricky, using more context is recommended. -suid_dumpable ended up in fs/ instead of kernel/ and the reason -is likely a patch with too little context. - -NOTE: This has been in the wrong dir fs/ since it was introduced by -Alan Cox into mainline on 2005-06-23. However, SUSE shipped it -in the correct directory kernel/ in SLES9. - -By now, it's just something that we are going to have to drag along for -a long time until SLES 11/12/13 time frame... - -Signed-off-by: Kurt Garloff - ---- - kernel/sysctl.c | 7 +++++++ - kernel/sysctl_binary.c | 1 + - 2 files changed, 8 insertions(+) - ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -817,6 +817,13 @@ static struct ctl_table kern_table[] = { - .proc_handler = proc_dointvec, - }, - #endif -+ { -+ .procname = "suid_dumpable", -+ .data = &suid_dumpable, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_dointvec, -+ }, - #if defined(CONFIG_S390) && defined(CONFIG_SMP) - { - .procname = "spin_retry", ---- a/kernel/sysctl_binary.c -+++ b/kernel/sysctl_binary.c -@@ -138,6 +138,7 @@ static const struct bin_table bin_kern_t - { CTL_INT, KERN_COMPAT_LOG, "compat-log" }, - { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, - { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, -+ { CTL_INT, KERN_SETUID_DUMPABLE, "suid_dumpable" }, - {} - }; - diff --git a/patches.suse/shmall-bigger b/patches.suse/shmall-bigger deleted file mode 100644 index 6364e07..0000000 --- a/patches.suse/shmall-bigger +++ /dev/null @@ -1,50 +0,0 @@ -From: Chris Mason -Subject: increase defaults for shmmall, shmmax, msgmax and msgmnb -References: 146656 -Patch-mainline: not yet - -The defaults are too small for most users. - -Acked-by: Jeff Mahoney - ---- - include/linux/msg.h | 4 ++-- - include/linux/sem.h | 2 +- - include/linux/shm.h | 2 +- - 3 files changed, 4 insertions(+), 4 deletions(-) - ---- a/include/linux/msg.h -+++ b/include/linux/msg.h -@@ -60,8 +60,8 @@ struct msginfo { - #define MSG_MEM_SCALE 32 - - #define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */ --#define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ --#define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ -+#define MSGMAX 65536 /* <= INT_MAX */ /* max size of message (bytes) */ -+#define MSGMNB 65536 /* <= INT_MAX */ /* default max size of a message queue */ - - /* unused */ - #define MSGPOOL (MSGMNI * MSGMNB / 1024) /* size in kbytes of message pool */ ---- a/include/linux/sem.h -+++ b/include/linux/sem.h -@@ -63,7 +63,7 @@ struct seminfo { - int semaem; - }; - --#define SEMMNI 128 /* <= IPCMNI max # of semaphore identifiers */ -+#define SEMMNI 1024 /* <= IPCMNI max # of semaphore identifiers */ - #define SEMMSL 250 /* <= 8 000 max num of semaphores per id */ - #define SEMMNS (SEMMNI*SEMMSL) /* <= INT_MAX max # of semaphores in system */ - #define SEMOPM 32 /* <= 1 000 max num of ops per semop call */ ---- a/include/linux/shm.h -+++ b/include/linux/shm.h -@@ -14,7 +14,7 @@ - * be increased by sysctl - */ - --#define SHMMAX 0x2000000 /* max shared seg size (bytes) */ -+#define SHMMAX ULONG_MAX /* max shared seg size (bytes) */ - #define SHMMIN 1 /* min shared seg size (bytes) */ - #define SHMMNI 4096 /* max num of segs system wide */ - #ifdef __KERNEL__ diff --git a/patches.suse/slab-handle-memoryless-nodes-v2a.patch b/patches.suse/slab-handle-memoryless-nodes-v2a.patch deleted file mode 100644 index 7b363d6..0000000 --- a/patches.suse/slab-handle-memoryless-nodes-v2a.patch +++ /dev/null @@ -1,308 +0,0 @@ -From: Lee Schermerhorn -Subject: slab - handle memoryless nodes V2a -References: bnc#436025, bnc#570492 -Patch-mainline: not yet - -The slab cache, since [apparently] 2.6.21, does not handle memoryless -nodes well. Specifically, the "fast path"-- ____cache_alloc()--will -never succeed, but will be called twice: once speculatively [expected -to succeed] and once in the fallback path. This adds significant -overhead to all kmem cache allocations, incurring a significant -regression relative to earlier kernels [from before slab.c was -reorganized]. - -This patch addresses the regression by modifying slab.c to treat the -first fallback node in a memoryless node's general zonelist as the "slab -local node" -- i.e., the local node for the purpose of slab allocations. -This is, in fact, the node from which all "local" allocations for cpus -attached to a memoryless node will be satisfied. - -The new function numa_slab_nid(gfp_t) replaces all calls to -numa_node_id() in slab.c. numa_slab_id() will simply return -numa_node_id() for nodes with memory, but will return the first -node in the local node's zonelist selected by the gfp flags. - -Effects of the patch: - -We first noticed the effects of the slab reorganization running the -AIM benchmark on a distro based on 2.6.27. The effect is even more -pronounced in the hackbench results. The platform in an HP rx8640 -numa platform, configured with "0% Cell Local Memory". In this -configuration, all memory appears in a "pseudo-node"--an artifact -of the firmware--and is interleaved across all the physical nodes' -memory on a cacheline granularity. All cpus are presented as -attached to memoryless nodes. - -Here are the results of running hackbench at various load levels -with and without the patch on the same platform configured for -0% CLM and "100% CLM". - -Command: hackbench N process 100, for N = 10..100 by 10 - - - 100% CLM 0% CLM -Tasks no with no with - patch patch %diff patch patch %diff - 400 0.246 0.281 14.23% 2.962 0.410 -86.16% - 800 0.418 0.421 0.72% 6.224 0.793 -87.26% - 1200 0.548 0.532 -2.92% 9.058 1.090 -87.97% - 1600 0.654 0.716 9.48% 12.473 1.562 -87.48% - 2000 0.871 0.859 -1.38% 15.484 1.889 -87.80% - 2400 0.994 1.043 4.93% 18.689 2.309 -87.65% - 2800 1.196 1.195 -0.08% 22.069 2.606 -88.19% - 3200 1.322 1.344 1.66% 25.642 2.988 -88.35% - 3600 1.474 1.519 3.05% 28.003 3.418 -87.79% - 4000 1.682 1.750 4.04% 30.887 3.858 -87.51% - -In the 100% CLM case, the regression does not appear, because -all nodes have local memory. Note that the patch has >10% -overhead on the first run, but then varies widely from run -to run [more below]. For the 0%CLM configuration, the patch -reduced the run time by 86-88%. - - -The following runs extend the number of hackbench tasks using: - - hackbench N process 100, for N = 100 to 400 by 20 - -We didn't run the 0%CLM/no-patch runs as they were taking too -long for our liking. We wanted to see how the patched kernel -performed as we extended the range. - - 100% CLM 0% CLM -Tasks no with no with - patch patch %diff patch patch %diff - 4800 1.879 2.117 12.67% not-run 4.458 - 5600 2.100 2.352 12.00% not-run 5.207 - 6400 2.532 2.447 -3.36% not-run 5.669 - 8000 2.799 2.792 -0.25% not-run 6.651 - 8000 3.244 3.030 -6.60% not-run 7.366 - 8800 3.282 3.550 8.17% not-run 8.169 - 9600 3.595 3.738 3.98% not-run 8.582 -10400 3.811 4.045 6.14% not-run 9.705 -11200 4.090 4.162 1.76% not-run 9.760 -12000 4.408 4.419 0.25% not-run 10.141 -12800 4.665 4.787 2.62% not-run 11.628 -13600 5.095 5.069 -0.51% not-run 11.735 -14400 5.347 5.464 2.19% not-run 12.621 -15200 5.620 5.831 3.75% not-run 13.452 -16000 5.870 6.161 4.96% not-run 14.069 - -The 0% CLM configuration with the patch performs worse than -the 100% CLM configuration. In the 0% CLM case we had 64 -ia64 cores beating on a single zone in the interleaved -memory-only pseudo-node. In the 100% CLM case, we have 16 -cores allocating memory locally to each of 4 nodes, -demonstating the difference between [pseudo-]SMP and NUMA -behavior. - -Note, again, that the first run[s] have higher % difference -between the patched and unpatched kernels for the 100% CLM -config, and then vary quite a bit run to run. To get a feel -for the average overhead, we ran 40 runs at the 16000 task -load point with more interations to increase the runtime -per run: - - hackbench 400 process 200 - -These were run on the 100% CLM configuration, as this best represents -most NUMA platforms: - - No patch with Patch %diff -Average of 40: 9.796 9.857 0.623 - - -Signed-off-by: Lee Schermerhorn -Acked-by: Nick Piggin - - mm/slab.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++++++--------- - 1 file changed, 76 insertions(+), 12 deletions(-) - ---- a/mm/slab.c -+++ b/mm/slab.c -@@ -281,7 +281,7 @@ struct kmem_list3 { - struct array_cache **alien; /* on other nodes */ - unsigned long next_reap; /* updated without locking */ - int free_touched; /* updated without locking */ --}; -+} __attribute__((aligned(sizeof(long)))); - - /* - * Need this for bootstrapping a per node allocator. -@@ -944,6 +944,11 @@ static int transfer_objects(struct array - #define drain_alien_cache(cachep, alien) do { } while (0) - #define reap_alien(cachep, l3) do { } while (0) - -+static inline int numa_slab_nid(struct kmem_cache *cachep, gfp_t flags) -+{ -+ return 0; -+} -+ - static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) - { - return (struct array_cache **)BAD_ALIEN_MAGIC; -@@ -975,6 +980,64 @@ static inline void *____cache_alloc_node - static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); - static void *alternate_node_alloc(struct kmem_cache *, gfp_t); - -+/* -+ * slow path for numa_slab_nid(), below -+ */ -+static noinline int __numa_slab_nid(struct kmem_cache *cachep, -+ int node, gfp_t flags) -+{ -+ struct zonelist *zonelist; -+ struct zone *zone; -+ enum zone_type highest_zoneidx = gfp_zone(flags); -+ -+ if (likely(node_state(node, N_NORMAL_MEMORY))) -+ return node; -+ -+ /* -+ * memoryless node: consult its zonelist. -+ * Cache the fallback node, if cache pointer provided. -+ */ -+ zonelist = &NODE_DATA(node)->node_zonelists[0]; -+ (void)first_zones_zonelist(zonelist, highest_zoneidx, -+ NULL, -+ &zone); -+ if (cachep) -+ cachep->nodelists[node] = -+ (struct kmem_list3 *)((unsigned long)zone->node << 1 | 1); -+ return zone->node; -+} -+ -+/* -+ * "Local" node for slab is first node in zonelist with memory. -+ * For nodes with memory this will be the actual local node. -+ * -+ * Use nodelist[numa_node_id()] to cache the fallback node for -+ * memoryless nodes. We'll be loading that member soon anyway, -+ * or already have, when called for cache refill, ... Use low -+ * bit of "pointer" as flag for "memoryless_node", indicating -+ * that the fallback nodes is stored here [<<1]. -+ */ -+#define memoryless_node(L3L) ((L3L) & 1) -+static inline int numa_slab_nid(struct kmem_cache *cachep, gfp_t flags) -+{ -+ int node = numa_mem_id(); -+ -+ if (likely(cachep)){ -+ unsigned long l3l = (unsigned long)cachep->nodelists[node]; -+ -+ if (likely(l3l)) { -+ if (unlikely(memoryless_node(l3l))) -+ node = (int)(l3l >> 1); -+ return node; -+ } -+ } -+ -+ /* -+ * !cachep || !l3l - the slow path -+ */ -+ return __numa_slab_nid(cachep, node, flags); -+} -+ - static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) - { - struct array_cache **ac_ptr; -@@ -1074,7 +1137,7 @@ static inline int cache_free_alien(struc - struct array_cache *alien = NULL; - int node; - -- node = numa_mem_id(); -+ node = numa_slab_nid(cachep, GFP_KERNEL); - - /* - * Make sure we are not freeing a object from another node to the array -@@ -1503,7 +1566,7 @@ void __init kmem_cache_init(void) - * 6) Resize the head arrays of the kmalloc caches to their final sizes. - */ - -- node = numa_mem_id(); -+ node = numa_slab_nid(NULL, GFP_KERNEL); - - /* 1) create the cache_cache */ - INIT_LIST_HEAD(&cache_chain); -@@ -2147,7 +2210,7 @@ static int __init_refok setup_cpu_cache( - } - } - } -- cachep->nodelists[numa_mem_id()]->next_reap = -+ cachep->nodelists[numa_slab_nid(cachep, GFP_KERNEL)]->next_reap = - jiffies + REAPTIMEOUT_LIST3 + - ((unsigned long)cachep) % REAPTIMEOUT_LIST3; - -@@ -2479,7 +2542,7 @@ static void check_spinlock_acquired(stru - { - #ifdef CONFIG_SMP - check_irq_off(); -- assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock); -+ assert_spin_locked(&cachep->nodelists[numa_slab_nid(cachep, GFP_KERNEL)]->list_lock); - #endif - } - -@@ -2506,7 +2569,7 @@ static void do_drain(void *arg) - { - struct kmem_cache *cachep = arg; - struct array_cache *ac; -- int node = numa_mem_id(); -+ int node = numa_slab_nid(cachep, GFP_KERNEL); - - check_irq_off(); - ac = cpu_cache_get(cachep); -@@ -3043,7 +3106,7 @@ static void *cache_alloc_refill(struct k - - retry: - check_irq_off(); -- node = numa_mem_id(); -+ node = numa_slab_nid(cachep, flags); - if (unlikely(must_refill)) - goto force_grow; - ac = cpu_cache_get(cachep); -@@ -3253,7 +3316,7 @@ static void *alternate_node_alloc(struct - - if (in_interrupt() || (flags & __GFP_THISNODE)) - return NULL; -- nid_alloc = nid_here = numa_mem_id(); -+ nid_alloc = nid_here = numa_slab_nid(cachep, flags); - get_mems_allowed(); - if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) - nid_alloc = cpuset_slab_spread_node(); -@@ -3432,7 +3495,7 @@ __cache_alloc_node(struct kmem_cache *ca - { - unsigned long save_flags; - void *ptr; -- int slab_node = numa_mem_id(); -+ int slab_node = numa_slab_nid(cachep, flags); - - flags &= gfp_allowed_mask; - -@@ -3498,7 +3561,8 @@ __do_cache_alloc(struct kmem_cache *cach - * ____cache_alloc_node() knows how to locate memory on other nodes - */ - if (!objp) -- objp = ____cache_alloc_node(cache, flags, numa_mem_id()); -+ objp = ____cache_alloc_node(cache, flags, -+ numa_slab_nid(cache, flags)); - - out: - return objp; -@@ -3595,7 +3659,7 @@ static void cache_flusharray(struct kmem - { - int batchcount; - struct kmem_list3 *l3; -- int node = numa_mem_id(); -+ int node = numa_slab_nid(cachep, GFP_KERNEL); - - batchcount = ac->batchcount; - #if DEBUG -@@ -4234,7 +4298,7 @@ static void cache_reap(struct work_struc - { - struct kmem_cache *searchp; - struct kmem_list3 *l3; -- int node = numa_mem_id(); -+ int node = numa_slab_nid(NULL, GFP_KERNEL); - struct delayed_work *work = to_delayed_work(w); - - if (!mutex_trylock(&cache_chain_mutex)) diff --git a/patches.suse/stack-unwind b/patches.suse/stack-unwind deleted file mode 100644 index a91a3ea..0000000 --- a/patches.suse/stack-unwind +++ /dev/null @@ -1,2193 +0,0 @@ -Subject: DWARF2 EH-frame based stack unwinding -From: jbeulich@novell.com -Patch-mainline: no - -This includes reverting f1883f86dea84fe47a71a39fc1afccc005915ed8. - -Update Jan 17 2009 jeffm: -- Something in 2.6.29-rc1 tweaked the frame pointer code somehow, so I fixed - that up. -Update Jul 02 2010 jbeulich: -- fix after upstream commit 9e565292270a2d55524be38835104c564ac8f795 - ---- - Makefile | 5 - arch/x86/Kconfig | 2 - arch/x86/Makefile | 2 - arch/x86/include/asm/dwarf2.h | 3 - arch/x86/include/asm/stacktrace.h | 4 - arch/x86/include/asm/system.h | 10 - arch/x86/include/asm/unwind.h | 163 ++++ - arch/x86/kernel/dumpstack.c | 89 ++ - arch/x86/kernel/dumpstack_32.c | 5 - arch/x86/kernel/dumpstack_64.c | 8 - arch/x86/kernel/entry_32.S | 35 + - arch/x86/kernel/entry_64.S | 34 - arch/x86/kernel/vmlinux.lds.S | 2 - include/asm-generic/vmlinux.lds.h | 22 - include/linux/module.h | 3 - include/linux/unwind.h | 135 +++ - init/main.c | 3 - kernel/Makefile | 1 - kernel/module.c | 32 - kernel/unwind.c | 1303 ++++++++++++++++++++++++++++++++++++++ - lib/Kconfig.debug | 18 - 21 files changed, 1874 insertions(+), 5 deletions(-) - ---- a/Makefile -+++ b/Makefile -@@ -589,6 +589,11 @@ KBUILD_CFLAGS += -fomit-frame-pointer - endif - endif - -+ifdef CONFIG_UNWIND_INFO -+KBUILD_CFLAGS += -fasynchronous-unwind-tables -+LDFLAGS_vmlinux += --eh-frame-hdr -+endif -+ - ifdef CONFIG_DEBUG_INFO - KBUILD_CFLAGS += -g - KBUILD_AFLAGS += -gdwarf-2 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -496,7 +496,7 @@ config X86_32_IRIS - config SCHED_OMIT_FRAME_POINTER - def_bool y - prompt "Single-depth WCHAN output" -- depends on X86 -+ depends on X86 && !STACK_UNWIND - ---help--- - Calculate simpler /proc//wchan values. If this option - is disabled then wchan values will recurse back to the ---- a/arch/x86/Makefile -+++ b/arch/x86/Makefile -@@ -110,7 +110,9 @@ KBUILD_CFLAGS += -pipe - # Workaround for a gcc prelease that unfortunately was shipped in a suse release - KBUILD_CFLAGS += -Wno-sign-compare - # -+ifneq ($(CONFIG_UNWIND_INFO),y) - KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -+endif - # prevent gcc from generating any FP code by mistake - KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) - ---- a/arch/x86/include/asm/dwarf2.h -+++ b/arch/x86/include/asm/dwarf2.h -@@ -34,7 +34,8 @@ - #define CFI_SIGNAL_FRAME - #endif - --#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__) -+#if !defined(CONFIG_UNWIND_INFO) && defined(CONFIG_AS_CFI_SECTIONS) \ -+ && defined(__ASSEMBLY__) - /* - * Emit CFI data in .debug_frame sections, not .eh_frame sections. - * The latter we currently just discard since we don't do DWARF ---- a/arch/x86/include/asm/stacktrace.h -+++ b/arch/x86/include/asm/stacktrace.h -@@ -92,6 +92,10 @@ extern void - show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *sp, char *log_lvl); - -+int try_stack_unwind(struct task_struct *task, struct pt_regs *regs, -+ unsigned long **stack, unsigned long *bp, -+ const struct stacktrace_ops *ops, void *data); -+ - extern unsigned int code_bytes; - - /* The form of the top of the frame on the stack */ ---- a/arch/x86/include/asm/system.h -+++ b/arch/x86/include/asm/system.h -@@ -123,12 +123,22 @@ do { \ - #define __switch_canary_iparam - #endif /* CC_STACKPROTECTOR */ - -+/* The stack unwind code needs this but it pollutes traces otherwise */ -+#ifdef CONFIG_UNWIND_INFO -+#define THREAD_RETURN_SYM \ -+ ".globl thread_return\n" \ -+ "thread_return:\n\t" -+#else -+#define THREAD_RETURN_SYM -+#endif -+ - /* Save restore flags to clear handle leaking NT */ - #define switch_to(prev, next, last) \ - asm volatile(SAVE_CONTEXT \ - "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ - "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ - "call __switch_to\n\t" \ -+ THREAD_RETURN_SYM \ - "movq "__percpu_arg([current_task])",%%rsi\n\t" \ - __switch_canary \ - "movq %P[thread_info](%%rsi),%%r8\n\t" \ ---- /dev/null -+++ b/arch/x86/include/asm/unwind.h -@@ -0,0 +1,163 @@ -+#ifndef _ASM_X86_UNWIND_H -+#define _ASM_X86_UNWIND_H -+ -+/* -+ * Copyright (C) 2002-2009 Novell, Inc. -+ * Jan Beulich -+ * This code is released under version 2 of the GNU GPL. -+ */ -+ -+#ifdef CONFIG_STACK_UNWIND -+ -+#include -+#include -+#include -+ -+struct unwind_frame_info -+{ -+ struct pt_regs regs; -+ struct task_struct *task; -+ unsigned call_frame:1; -+}; -+ -+#define UNW_PC(frame) (frame)->regs.ip -+#define UNW_SP(frame) (frame)->regs.sp -+#ifdef CONFIG_FRAME_POINTER -+#define UNW_FP(frame) (frame)->regs.bp -+#define FRAME_LINK_OFFSET 0 -+#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.sp0) -+#define TSK_STACK_TOP(tsk) ((tsk)->thread.sp0) -+#else -+#define UNW_FP(frame) ((void)(frame), 0UL) -+#endif -+/* On x86-64, might need to account for the special exception and interrupt -+ handling stacks here, since normally -+ EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER, -+ but the construct is needed only for getting across the stack switch to -+ the interrupt stack - thus considering the IRQ stack itself is unnecessary, -+ and the overhead of comparing against all exception handling stacks seems -+ not desirable. */ -+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) -+ -+#ifdef CONFIG_X86_64 -+ -+#include -+ -+#define FRAME_RETADDR_OFFSET 8 -+ -+#define UNW_REGISTER_INFO \ -+ PTREGS_INFO(ax), \ -+ PTREGS_INFO(dx), \ -+ PTREGS_INFO(cx), \ -+ PTREGS_INFO(bx), \ -+ PTREGS_INFO(si), \ -+ PTREGS_INFO(di), \ -+ PTREGS_INFO(bp), \ -+ PTREGS_INFO(sp), \ -+ PTREGS_INFO(r8), \ -+ PTREGS_INFO(r9), \ -+ PTREGS_INFO(r10), \ -+ PTREGS_INFO(r11), \ -+ PTREGS_INFO(r12), \ -+ PTREGS_INFO(r13), \ -+ PTREGS_INFO(r14), \ -+ PTREGS_INFO(r15), \ -+ PTREGS_INFO(ip) -+ -+#else /* X86_32 */ -+ -+#include -+ -+#define FRAME_RETADDR_OFFSET 4 -+ -+#define UNW_REGISTER_INFO \ -+ PTREGS_INFO(ax), \ -+ PTREGS_INFO(cx), \ -+ PTREGS_INFO(dx), \ -+ PTREGS_INFO(bx), \ -+ PTREGS_INFO(sp), \ -+ PTREGS_INFO(bp), \ -+ PTREGS_INFO(si), \ -+ PTREGS_INFO(di), \ -+ PTREGS_INFO(ip) -+ -+#endif -+ -+#define UNW_DEFAULT_RA(raItem, dataAlign) \ -+ ((raItem).where == Memory && \ -+ !((raItem).value * (dataAlign) + sizeof(void *))) -+ -+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, -+ /*const*/ struct pt_regs *regs) -+{ -+#ifdef CONFIG_X86_64 -+ info->regs = *regs; -+#else -+ if (user_mode_vm(regs)) -+ info->regs = *regs; -+ else { -+ memcpy(&info->regs, regs, offsetof(struct pt_regs, sp)); -+ info->regs.sp = (unsigned long)®s->sp; -+ info->regs.ss = __KERNEL_DS; -+ } -+#endif -+} -+ -+static inline void arch_unw_init_blocked(struct unwind_frame_info *info) -+{ -+#ifdef CONFIG_X86_64 -+ extern const char thread_return[]; -+ -+ memset(&info->regs, 0, sizeof(info->regs)); -+ info->regs.ip = (unsigned long)thread_return; -+ info->regs.cs = __KERNEL_CS; -+ probe_kernel_address(info->task->thread.sp, info->regs.bp); -+ info->regs.sp = info->task->thread.sp; -+ info->regs.ss = __KERNEL_DS; -+#else -+ memset(&info->regs, 0, sizeof(info->regs)); -+ info->regs.ip = info->task->thread.ip; -+ info->regs.cs = __KERNEL_CS; -+ probe_kernel_address(info->task->thread.sp, info->regs.bp); -+ info->regs.sp = info->task->thread.sp; -+ info->regs.ss = __KERNEL_DS; -+ info->regs.ds = __USER_DS; -+ info->regs.es = __USER_DS; -+#endif -+} -+ -+extern asmlinkage int -+arch_unwind_init_running(struct unwind_frame_info *, -+ unwind_callback_fn, -+ const struct stacktrace_ops *, void *data); -+ -+static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info) -+{ -+#ifdef CONFIG_X86_64 -+ return user_mode(&info->regs) -+ || (long)info->regs.ip >= 0 -+ || (info->regs.ip >= VSYSCALL_START && info->regs.ip < VSYSCALL_END) -+ || (long)info->regs.sp >= 0; -+#else -+ return user_mode_vm(&info->regs) -+ || info->regs.ip < PAGE_OFFSET -+ || (info->regs.ip >= __fix_to_virt(FIX_VDSO) -+ && info->regs.ip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) -+ || info->regs.sp < PAGE_OFFSET; -+#endif -+} -+ -+#else -+ -+#define UNW_PC(frame) ((void)(frame), 0UL) -+#define UNW_SP(frame) ((void)(frame), 0UL) -+#define UNW_FP(frame) ((void)(frame), 0UL) -+ -+static inline int arch_unw_user_mode(const void *info) -+{ -+ return 0; -+} -+ -+#endif -+ -+#endif /* _ASM_X86_UNWIND_H */ ---- a/arch/x86/kernel/dumpstack.c -+++ b/arch/x86/kernel/dumpstack.c -@@ -17,12 +17,18 @@ - #include - - #include -+#include - - - int panic_on_unrecovered_nmi; - int panic_on_io_nmi; - unsigned int code_bytes = 64; - int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; -+#ifdef CONFIG_STACK_UNWIND -+static int call_trace = 1; -+#else -+#define call_trace (-1) -+#endif - static int die_counter; - - void printk_address(unsigned long address, int reliable) -@@ -62,6 +68,71 @@ print_ftrace_graph_addr(unsigned long ad - { } - #endif - -+int asmlinkage dump_trace_unwind(struct unwind_frame_info *info, -+ const struct stacktrace_ops *ops, void *data) -+{ -+ int n = 0; -+#ifdef CONFIG_UNWIND_INFO -+ unsigned long sp = UNW_SP(info); -+ -+ if (arch_unw_user_mode(info)) -+ return -1; -+ while (unwind(info) == 0 && UNW_PC(info)) { -+ n++; -+ ops->address(data, UNW_PC(info), 1); -+ if (arch_unw_user_mode(info)) -+ break; -+ if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1)) -+ && sp > UNW_SP(info)) -+ break; -+ sp = UNW_SP(info); -+ } -+#endif -+ return n; -+} -+ -+int try_stack_unwind(struct task_struct *task, struct pt_regs *regs, -+ unsigned long **stack, unsigned long *bp, -+ const struct stacktrace_ops *ops, void *data) -+{ -+#ifdef CONFIG_UNWIND_INFO -+ int unw_ret = 0; -+ struct unwind_frame_info info; -+ if (call_trace < 0) -+ return 0; -+ -+ if (regs) { -+ if (unwind_init_frame_info(&info, task, regs) == 0) -+ unw_ret = dump_trace_unwind(&info, ops, data); -+ } else if (task == current) -+ unw_ret = unwind_init_running(&info, dump_trace_unwind, ops, data); -+ else { -+ if (unwind_init_blocked(&info, task) == 0) -+ unw_ret = dump_trace_unwind(&info, ops, data); -+ } -+ if (unw_ret > 0) { -+ if (call_trace == 1 && !arch_unw_user_mode(&info)) { -+ ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n", -+ UNW_PC(&info)); -+ if ((long)UNW_SP(&info) < 0) { -+ ops->warning(data, "Leftover inexact backtrace:\n"); -+ *stack = (unsigned long *)UNW_SP(&info); -+ if (!stack) { -+ *bp = UNW_FP(&info); -+ return -1; -+ } -+ } else -+ ops->warning(data, "Full inexact backtrace again:\n"); -+ } else if (call_trace >= 1) { -+ return -1; -+ } else -+ ops->warning(data, "Full inexact backtrace again:\n"); -+ } else -+ ops->warning(data, "Inexact backtrace:\n"); -+#endif -+ return 0; -+} -+ - /* - * x86-64 can have up to three kernel stacks: - * process stack -@@ -373,3 +444,21 @@ static int __init code_bytes_setup(char - return 1; - } - __setup("code_bytes=", code_bytes_setup); -+ -+#ifdef CONFIG_STACK_UNWIND -+static int __init call_trace_setup(char *s) -+{ -+ if (!s) -+ return -EINVAL; -+ if (strcmp(s, "old") == 0) -+ call_trace = -1; -+ else if (strcmp(s, "both") == 0) -+ call_trace = 0; -+ else if (strcmp(s, "newfallback") == 0) -+ call_trace = 1; -+ else if (strcmp(s, "new") == 0) -+ call_trace = 2; -+ return 0; -+} -+early_param("call_trace", call_trace_setup); -+#endif ---- a/arch/x86/kernel/dumpstack_32.c -+++ b/arch/x86/kernel/dumpstack_32.c -@@ -27,6 +27,10 @@ void dump_trace(struct task_struct *task - if (!task) - task = current; - -+ bp = stack_frame(task, regs); -+ if (try_stack_unwind(task, regs, &stack, &bp, ops, data)) -+ return; -+ - if (!stack) { - unsigned long dummy; - -@@ -35,7 +39,6 @@ void dump_trace(struct task_struct *task - stack = (unsigned long *)task->thread.sp; - } - -- bp = stack_frame(task, regs); - for (;;) { - struct thread_info *context; - ---- a/arch/x86/kernel/dumpstack_64.c -+++ b/arch/x86/kernel/dumpstack_64.c -@@ -14,6 +14,7 @@ - #include - #include - -+#include - #include - - -@@ -155,13 +156,18 @@ void dump_trace(struct task_struct *task - if (!task) - task = current; - -+ bp = stack_frame(task, regs); -+ if (try_stack_unwind(task, regs, &stack, &bp, ops, data)) { -+ put_cpu(); -+ return; -+ } -+ - if (!stack) { - stack = &dummy; - if (task && task != current) - stack = (unsigned long *)task->thread.sp; - } - -- bp = stack_frame(task, regs); - /* - * Print function call entries in all stacks, starting at the - * current stack address. If the stacks consist of nested ---- a/arch/x86/kernel/entry_32.S -+++ b/arch/x86/kernel/entry_32.S -@@ -1002,6 +1002,41 @@ END(spurious_interrupt_bug) - */ - .popsection - -+#ifdef CONFIG_STACK_UNWIND -+ENTRY(arch_unwind_init_running) -+ CFI_STARTPROC -+ movl 4(%esp), %edx -+ movl (%esp), %ecx -+ leal 4(%esp), %eax -+ movl %ebx, PT_EBX(%edx) -+ xorl %ebx, %ebx -+ movl %ebx, PT_ECX(%edx) -+ movl %ebx, PT_EDX(%edx) -+ movl %esi, PT_ESI(%edx) -+ movl %edi, PT_EDI(%edx) -+ movl %ebp, PT_EBP(%edx) -+ movl %ebx, PT_EAX(%edx) -+ movl $__USER_DS, PT_DS(%edx) -+ movl $__USER_DS, PT_ES(%edx) -+ movl $__KERNEL_PERCPU, PT_FS(%edx) -+ movl $__KERNEL_STACK_CANARY, PT_GS(%edx) -+ movl %eax, PT_OLDESP(%edx) -+ movl 16(%esp), %eax -+ movl %ebx, PT_ORIG_EAX(%edx) -+ movl %ecx, PT_EIP(%edx) -+ movl 12(%esp), %ecx -+ movl $__KERNEL_CS, PT_CS(%edx) -+ movl %eax, 12(%esp) -+ movl 8(%esp), %eax -+ movl %ecx, 8(%esp) -+ movl %ebx, PT_EFLAGS(%edx) -+ movl PT_EBX(%edx), %ebx -+ movl $__KERNEL_DS, PT_OLDSS(%edx) -+ jmpl *%eax -+ CFI_ENDPROC -+ENDPROC(arch_unwind_init_running) -+#endif -+ - ENTRY(kernel_thread_helper) - pushl $0 # fake return address for unwinder - CFI_STARTPROC ---- a/arch/x86/kernel/entry_64.S -+++ b/arch/x86/kernel/entry_64.S -@@ -1212,6 +1212,40 @@ ENTRY(call_softirq) - CFI_ENDPROC - END(call_softirq) - -+#ifdef CONFIG_STACK_UNWIND -+ENTRY(arch_unwind_init_running) -+ CFI_STARTPROC -+ movq %r15, R15(%rdi) -+ movq %r14, R14(%rdi) -+ xchgq %rsi, %rdx -+ movq %r13, R13(%rdi) -+ movq %r12, R12(%rdi) -+ xorl %eax, %eax -+ movq %rbp, RBP(%rdi) -+ movq %rbx, RBX(%rdi) -+ movq (%rsp), %r9 -+ xchgq %rdx, %rcx -+ movq %rax, R11(%rdi) -+ movq %rax, R10(%rdi) -+ movq %rax, R9(%rdi) -+ movq %rax, R8(%rdi) -+ movq %rax, RAX(%rdi) -+ movq %rax, RCX(%rdi) -+ movq %rax, RDX(%rdi) -+ movq %rax, RSI(%rdi) -+ movq %rax, RDI(%rdi) -+ movq %rax, ORIG_RAX(%rdi) -+ movq %r9, RIP(%rdi) -+ leaq 8(%rsp), %r9 -+ movq $__KERNEL_CS, CS(%rdi) -+ movq %rax, EFLAGS(%rdi) -+ movq %r9, RSP(%rdi) -+ movq $__KERNEL_DS, SS(%rdi) -+ jmpq *%rcx -+ CFI_ENDPROC -+END(arch_unwind_init_running) -+#endif -+ - #ifdef CONFIG_XEN - zeroentry xen_hypervisor_callback xen_do_hypervisor_callback - ---- a/arch/x86/kernel/vmlinux.lds.S -+++ b/arch/x86/kernel/vmlinux.lds.S -@@ -358,7 +358,9 @@ SECTIONS - - /* Sections to be discarded */ - DISCARDS -+#ifndef CONFIG_UNWIND_INFO - /DISCARD/ : { *(.eh_frame) } -+#endif - } - - ---- a/include/asm-generic/vmlinux.lds.h -+++ b/include/asm-generic/vmlinux.lds.h -@@ -359,6 +359,8 @@ - MEM_KEEP(exit.rodata) \ - } \ - \ -+ EH_FRAME \ -+ \ - /* Built-in module parameters. */ \ - __param : AT(ADDR(__param) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start___param) = .; \ -@@ -798,3 +800,23 @@ - BSS(bss_align) \ - . = ALIGN(stop_align); \ - VMLINUX_SYMBOL(__bss_stop) = .; -+ -+#ifdef CONFIG_STACK_UNWIND -+#define EH_FRAME \ -+ /* Unwind data binary search table */ \ -+ . = ALIGN(8); \ -+ .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \ -+ VMLINUX_SYMBOL(__start_unwind_hdr) = .; \ -+ *(.eh_frame_hdr) \ -+ VMLINUX_SYMBOL(__end_unwind_hdr) = .; \ -+ } \ -+ /* Unwind data */ \ -+ . = ALIGN(8); \ -+ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \ -+ VMLINUX_SYMBOL(__start_unwind) = .; \ -+ *(.eh_frame) \ -+ VMLINUX_SYMBOL(__end_unwind) = .; \ -+ } -+#else -+#define EH_FRAME -+#endif ---- a/include/linux/module.h -+++ b/include/linux/module.h -@@ -338,6 +338,9 @@ struct module - /* Size of RO sections of the module (text+rodata) */ - unsigned int init_ro_size, core_ro_size; - -+ /* The handle returned from unwind_add_table. */ -+ void *unwind_info; -+ - /* Arch-specific module values */ - struct mod_arch_specific arch; - ---- /dev/null -+++ b/include/linux/unwind.h -@@ -0,0 +1,135 @@ -+#ifndef _LINUX_UNWIND_H -+#define _LINUX_UNWIND_H -+ -+/* -+ * Copyright (C) 2002-2009 Novell, Inc. -+ * Jan Beulich -+ * This code is released under version 2 of the GNU GPL. -+ * -+ * A simple API for unwinding kernel stacks. This is used for -+ * debugging and error reporting purposes. The kernel doesn't need -+ * full-blown stack unwinding with all the bells and whistles, so there -+ * is not much point in implementing the full Dwarf2 unwind API. -+ */ -+ -+#include -+ -+struct module; -+struct stacktrace_ops; -+struct unwind_frame_info; -+ -+typedef asmlinkage int (*unwind_callback_fn)(struct unwind_frame_info *, -+ const struct stacktrace_ops *, -+ void *); -+ -+#ifdef CONFIG_STACK_UNWIND -+ -+#include -+#include -+ -+#ifndef ARCH_UNWIND_SECTION_NAME -+#define ARCH_UNWIND_SECTION_NAME ".eh_frame" -+#endif -+ -+/* -+ * Initialize unwind support. -+ */ -+extern void unwind_init(void); -+extern void unwind_setup(void); -+ -+#ifdef CONFIG_MODULES -+ -+extern void *unwind_add_table(struct module *, -+ const void *table_start, -+ unsigned long table_size); -+ -+extern void unwind_remove_table(void *handle, int init_only); -+ -+#endif -+ -+extern int unwind_init_frame_info(struct unwind_frame_info *, -+ struct task_struct *, -+ /*const*/ struct pt_regs *); -+ -+/* -+ * Prepare to unwind a blocked task. -+ */ -+extern int unwind_init_blocked(struct unwind_frame_info *, -+ struct task_struct *); -+ -+/* -+ * Prepare to unwind the currently running thread. -+ */ -+extern int unwind_init_running(struct unwind_frame_info *, -+ unwind_callback_fn, -+ const struct stacktrace_ops *, -+ void *data); -+ -+/* -+ * Unwind to previous to frame. Returns 0 if successful, negative -+ * number in case of an error. -+ */ -+extern int unwind(struct unwind_frame_info *); -+ -+/* -+ * Unwind until the return pointer is in user-land (or until an error -+ * occurs). Returns 0 if successful, negative number in case of -+ * error. -+ */ -+extern int unwind_to_user(struct unwind_frame_info *); -+ -+#else /* CONFIG_STACK_UNWIND */ -+ -+struct unwind_frame_info {}; -+ -+static inline void unwind_init(void) {} -+static inline void unwind_setup(void) {} -+ -+#ifdef CONFIG_MODULES -+ -+static inline void *unwind_add_table(struct module *mod, -+ const void *table_start, -+ unsigned long table_size) -+{ -+ return NULL; -+} -+ -+#endif -+ -+static inline void unwind_remove_table(void *handle, int init_only) -+{ -+} -+ -+static inline int unwind_init_frame_info(struct unwind_frame_info *info, -+ struct task_struct *tsk, -+ const struct pt_regs *regs) -+{ -+ return -ENOSYS; -+} -+ -+static inline int unwind_init_blocked(struct unwind_frame_info *info, -+ struct task_struct *tsk) -+{ -+ return -ENOSYS; -+} -+ -+static inline int unwind_init_running(struct unwind_frame_info *info, -+ unwind_callback_fn cb, -+ const struct stacktrace_ops *ops, -+ void *data) -+{ -+ return -ENOSYS; -+} -+ -+static inline int unwind(struct unwind_frame_info *info) -+{ -+ return -ENOSYS; -+} -+ -+static inline int unwind_to_user(struct unwind_frame_info *info) -+{ -+ return -ENOSYS; -+} -+ -+#endif /* CONFIG_STACK_UNWIND */ -+#endif /* _LINUX_UNWIND_H */ ---- a/init/main.c -+++ b/init/main.c -@@ -48,6 +48,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -552,6 +553,7 @@ asmlinkage void __init start_kernel(void - * Need to run as early as possible, to initialize the - * lockdep hash: - */ -+ unwind_init(); - lockdep_init(); - debug_objects_early_init(); - -@@ -576,6 +578,7 @@ asmlinkage void __init start_kernel(void - setup_arch(&command_line); - mm_init_owner(&init_mm, &init_task); - setup_command_line(command_line); -+ unwind_setup(); - setup_nr_cpu_ids(); - setup_per_cpu_areas(); - smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ ---- a/kernel/Makefile -+++ b/kernel/Makefile -@@ -53,6 +53,7 @@ obj-$(CONFIG_PROVE_LOCKING) += spinlock. - obj-$(CONFIG_UID16) += uid16.o - obj-$(CONFIG_MODULES) += module.o - obj-$(CONFIG_KALLSYMS) += kallsyms.o -+obj-$(CONFIG_STACK_UNWIND) += unwind.o - obj-$(CONFIG_PM) += power/ - obj-$(CONFIG_FREEZER) += power/ - obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -44,6 +44,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -158,7 +159,7 @@ struct load_info { - struct _ddebug *debug; - unsigned int num_debug; - struct { -- unsigned int sym, str, mod, vers, info, pcpu; -+ unsigned int sym, str, mod, vers, info, pcpu, unwind; - } index; - }; - -@@ -532,6 +533,27 @@ bool is_module_percpu_address(unsigned l - - #endif /* CONFIG_SMP */ - -+static unsigned int find_unwind(struct load_info *info) -+{ -+ int section = 0; -+#ifdef ARCH_UNWIND_SECTION_NAME -+ section = find_sec(info, ARCH_UNWIND_SECTION_NAME); -+ if (section) -+ info->sechdrs[section].sh_flags |= SHF_ALLOC; -+#endif -+ return section; -+} -+ -+static void add_unwind_table(struct module *mod, struct load_info *info) -+{ -+ int index = info->index.unwind; -+ -+ /* Size of section 0 is 0, so this is ok if there is no unwind info. */ -+ mod->unwind_info = unwind_add_table(mod, -+ (void *)info->sechdrs[index].sh_addr, -+ info->sechdrs[index].sh_size); -+} -+ - #define MODINFO_ATTR(field) \ - static void setup_modinfo_##field(struct module *mod, const char *s) \ - { \ -@@ -1759,6 +1781,8 @@ static void free_module(struct module *m - /* Remove dynamic debug info */ - ddebug_remove_module(mod->name); - -+ unwind_remove_table(mod->unwind_info, 0); -+ - /* Arch-specific cleanup. */ - module_arch_cleanup(mod); - -@@ -2464,6 +2488,8 @@ static struct module *setup_load_info(st - - info->index.pcpu = find_pcpusec(info); - -+ info->index.unwind = find_unwind(info); -+ - /* Check module struct version now, before we try to use module. */ - if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) - return ERR_PTR(-ENOEXEC); -@@ -2885,6 +2911,9 @@ static struct module *load_module(void _ - if (err < 0) - goto unlink; - -+ /* Initialize unwind table */ -+ add_unwind_table(mod, &info); -+ - /* Get rid of temporary copy and strmap. */ - kfree(info.strmap); - free_copy(&info); -@@ -2999,6 +3028,7 @@ SYSCALL_DEFINE3(init_module, void __user - /* Drop initial reference. */ - module_put(mod); - trim_init_extable(mod); -+ unwind_remove_table(mod->unwind_info, 1); - #ifdef CONFIG_KALLSYMS - mod->num_symtab = mod->core_num_syms; - mod->symtab = mod->core_symtab; ---- /dev/null -+++ b/kernel/unwind.c -@@ -0,0 +1,1305 @@ -+/* -+ * Copyright (C) 2002-2006 Novell, Inc. -+ * Jan Beulich -+ * This code is released under version 2 of the GNU GPL. -+ * -+ * A simple API for unwinding kernel stacks. This is used for -+ * debugging and error reporting purposes. The kernel doesn't need -+ * full-blown stack unwinding with all the bells and whistles, so there -+ * is not much point in implementing the full Dwarf2 unwind API. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern const char __start_unwind[], __end_unwind[]; -+extern const u8 __start_unwind_hdr[], __end_unwind_hdr[]; -+ -+#define MAX_STACK_DEPTH 8 -+ -+#define EXTRA_INFO(f) { \ -+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \ -+ % FIELD_SIZEOF(struct unwind_frame_info, f)) \ -+ + offsetof(struct unwind_frame_info, f) \ -+ / FIELD_SIZEOF(struct unwind_frame_info, f), \ -+ FIELD_SIZEOF(struct unwind_frame_info, f) \ -+ } -+#define PTREGS_INFO(f) EXTRA_INFO(regs.f) -+ -+static const struct { -+ unsigned offs:BITS_PER_LONG / 2; -+ unsigned width:BITS_PER_LONG / 2; -+} reg_info[] = { -+ UNW_REGISTER_INFO -+}; -+ -+#undef PTREGS_INFO -+#undef EXTRA_INFO -+ -+#ifndef REG_INVALID -+#define REG_INVALID(r) (reg_info[r].width == 0) -+#endif -+ -+#define DW_CFA_nop 0x00 -+#define DW_CFA_set_loc 0x01 -+#define DW_CFA_advance_loc1 0x02 -+#define DW_CFA_advance_loc2 0x03 -+#define DW_CFA_advance_loc4 0x04 -+#define DW_CFA_offset_extended 0x05 -+#define DW_CFA_restore_extended 0x06 -+#define DW_CFA_undefined 0x07 -+#define DW_CFA_same_value 0x08 -+#define DW_CFA_register 0x09 -+#define DW_CFA_remember_state 0x0a -+#define DW_CFA_restore_state 0x0b -+#define DW_CFA_def_cfa 0x0c -+#define DW_CFA_def_cfa_register 0x0d -+#define DW_CFA_def_cfa_offset 0x0e -+#define DW_CFA_def_cfa_expression 0x0f -+#define DW_CFA_expression 0x10 -+#define DW_CFA_offset_extended_sf 0x11 -+#define DW_CFA_def_cfa_sf 0x12 -+#define DW_CFA_def_cfa_offset_sf 0x13 -+#define DW_CFA_val_offset 0x14 -+#define DW_CFA_val_offset_sf 0x15 -+#define DW_CFA_val_expression 0x16 -+#define DW_CFA_lo_user 0x1c -+#define DW_CFA_GNU_window_save 0x2d -+#define DW_CFA_GNU_args_size 0x2e -+#define DW_CFA_GNU_negative_offset_extended 0x2f -+#define DW_CFA_hi_user 0x3f -+ -+#define DW_EH_PE_FORM 0x07 -+#define DW_EH_PE_native 0x00 -+#define DW_EH_PE_leb128 0x01 -+#define DW_EH_PE_data2 0x02 -+#define DW_EH_PE_data4 0x03 -+#define DW_EH_PE_data8 0x04 -+#define DW_EH_PE_signed 0x08 -+#define DW_EH_PE_ADJUST 0x70 -+#define DW_EH_PE_abs 0x00 -+#define DW_EH_PE_pcrel 0x10 -+#define DW_EH_PE_textrel 0x20 -+#define DW_EH_PE_datarel 0x30 -+#define DW_EH_PE_funcrel 0x40 -+#define DW_EH_PE_aligned 0x50 -+#define DW_EH_PE_indirect 0x80 -+#define DW_EH_PE_omit 0xff -+ -+typedef unsigned long uleb128_t; -+typedef signed long sleb128_t; -+#define sleb128abs __builtin_labs -+ -+static struct unwind_table { -+ struct { -+ unsigned long pc; -+ unsigned long range; -+ } core, init; -+ const void *address; -+ unsigned long size; -+ const unsigned char *header; -+ unsigned long hdrsz; -+ struct unwind_table *link; -+ const char *name; -+} root_table; -+ -+struct unwind_item { -+ enum item_location { -+ Nowhere, -+ Memory, -+ Register, -+ Value -+ } where; -+ uleb128_t value; -+}; -+ -+struct unwind_state { -+ uleb128_t loc, org; -+ const u8 *cieStart, *cieEnd; -+ uleb128_t codeAlign; -+ sleb128_t dataAlign; -+ struct cfa { -+ uleb128_t reg, offs; -+ } cfa; -+ struct unwind_item regs[ARRAY_SIZE(reg_info)]; -+ unsigned stackDepth:8; -+ unsigned version:8; -+ const u8 *label; -+ const u8 *stack[MAX_STACK_DEPTH]; -+}; -+ -+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 }; -+ -+static unsigned unwind_debug; -+static int __init unwind_debug_setup(char *s) -+{ -+ unwind_debug = simple_strtoul(s, NULL, 0); -+ return 1; -+} -+__setup("unwind_debug=", unwind_debug_setup); -+#define dprintk(lvl, fmt, args...) \ -+ ((void)(lvl > unwind_debug \ -+ || printk(KERN_DEBUG "unwind: " fmt "\n", ##args))) -+ -+static struct unwind_table *find_table(unsigned long pc) -+{ -+ struct unwind_table *table; -+ -+ for (table = &root_table; table; table = table->link) -+ if ((pc >= table->core.pc -+ && pc < table->core.pc + table->core.range) -+ || (pc >= table->init.pc -+ && pc < table->init.pc + table->init.range)) -+ break; -+ -+ return table; -+} -+ -+static unsigned long read_pointer(const u8 **pLoc, -+ const void *end, -+ signed ptrType, -+ unsigned long text_base, -+ unsigned long data_base); -+ -+static void init_unwind_table(struct unwind_table *table, -+ const char *name, -+ const void *core_start, -+ unsigned long core_size, -+ const void *init_start, -+ unsigned long init_size, -+ const void *table_start, -+ unsigned long table_size, -+ const u8 *header_start, -+ unsigned long header_size) -+{ -+ const u8 *ptr = header_start + 4; -+ const u8 *end = header_start + header_size; -+ -+ table->core.pc = (unsigned long)core_start; -+ table->core.range = core_size; -+ table->init.pc = (unsigned long)init_start; -+ table->init.range = init_size; -+ table->address = table_start; -+ table->size = table_size; -+ /* See if the linker provided table looks valid. */ -+ if (header_size <= 4 -+ || header_start[0] != 1 -+ || (void *)read_pointer(&ptr, end, header_start[1], 0, 0) -+ != table_start -+ || !read_pointer(&ptr, end, header_start[2], 0, 0) -+ || !read_pointer(&ptr, end, header_start[3], 0, -+ (unsigned long)header_start) -+ || !read_pointer(&ptr, end, header_start[3], 0, -+ (unsigned long)header_start)) -+ header_start = NULL; -+ table->hdrsz = header_size; -+ smp_wmb(); -+ table->header = header_start; -+ table->link = NULL; -+ table->name = name; -+} -+ -+void __init unwind_init(void) -+{ -+ init_unwind_table(&root_table, "kernel", -+ _text, _end - _text, -+ NULL, 0, -+ __start_unwind, __end_unwind - __start_unwind, -+ __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr); -+} -+ -+static const u32 bad_cie, not_fde; -+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *); -+static signed fde_pointer_type(const u32 *cie); -+ -+struct eh_frame_hdr_table_entry { -+ unsigned long start, fde; -+}; -+ -+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2) -+{ -+ const struct eh_frame_hdr_table_entry *e1 = p1; -+ const struct eh_frame_hdr_table_entry *e2 = p2; -+ -+ return (e1->start > e2->start) - (e1->start < e2->start); -+} -+ -+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size) -+{ -+ struct eh_frame_hdr_table_entry *e1 = p1; -+ struct eh_frame_hdr_table_entry *e2 = p2; -+ unsigned long v; -+ -+ v = e1->start; -+ e1->start = e2->start; -+ e2->start = v; -+ v = e1->fde; -+ e1->fde = e2->fde; -+ e2->fde = v; -+} -+ -+static void __init setup_unwind_table(struct unwind_table *table, -+ void *(*alloc)(unsigned long)) -+{ -+ const u8 *ptr; -+ unsigned long tableSize = table->size, hdrSize; -+ unsigned n; -+ const u32 *fde; -+ struct { -+ u8 version; -+ u8 eh_frame_ptr_enc; -+ u8 fde_count_enc; -+ u8 table_enc; -+ unsigned long eh_frame_ptr; -+ unsigned int fde_count; -+ struct eh_frame_hdr_table_entry table[]; -+ } __attribute__((__packed__)) *header; -+ -+ if (table->header) -+ return; -+ -+ if (table->hdrsz) -+ printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n", -+ table->name); -+ -+ if (tableSize & (sizeof(*fde) - 1)) -+ return; -+ -+ for (fde = table->address, n = 0; -+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; -+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { -+ const u32 *cie = cie_for_fde(fde, table); -+ signed ptrType; -+ -+ if (cie == ¬_fde) -+ continue; -+ if (cie == NULL -+ || cie == &bad_cie -+ || (ptrType = fde_pointer_type(cie)) < 0) -+ return; -+ ptr = (const u8 *)(fde + 2); -+ if (!read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ ptrType, 0, 0)) -+ return; -+ ++n; -+ } -+ -+ if (tableSize || !n) -+ return; -+ -+ hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) -+ + 2 * n * sizeof(unsigned long); -+ dprintk(2, "Binary lookup table size for %s: %lu bytes", table->name, hdrSize); -+ header = alloc(hdrSize); -+ if (!header) -+ return; -+ header->version = 1; -+ header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native; -+ header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4; -+ header->table_enc = DW_EH_PE_abs|DW_EH_PE_native; -+ put_unaligned((unsigned long)table->address, &header->eh_frame_ptr); -+ BUILD_BUG_ON(offsetof(typeof(*header), fde_count) -+ % __alignof(typeof(header->fde_count))); -+ header->fde_count = n; -+ -+ BUILD_BUG_ON(offsetof(typeof(*header), table) -+ % __alignof(typeof(*header->table))); -+ for (fde = table->address, tableSize = table->size, n = 0; -+ tableSize; -+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { -+ const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); -+ -+ if (!fde[1]) -+ continue; /* this is a CIE */ -+ ptr = (const u8 *)(fde + 2); -+ header->table[n].start = read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ fde_pointer_type(cie), 0, 0); -+ header->table[n].fde = (unsigned long)fde; -+ ++n; -+ } -+ WARN_ON(n != header->fde_count); -+ -+ sort(header->table, -+ n, -+ sizeof(*header->table), -+ cmp_eh_frame_hdr_table_entries, -+ swap_eh_frame_hdr_table_entries); -+ -+ table->hdrsz = hdrSize; -+ smp_wmb(); -+ table->header = (const void *)header; -+} -+ -+static void *__init balloc(unsigned long sz) -+{ -+ return __alloc_bootmem_nopanic(sz, -+ sizeof(unsigned int), -+ __pa(MAX_DMA_ADDRESS)); -+} -+ -+void __init unwind_setup(void) -+{ -+ setup_unwind_table(&root_table, balloc); -+} -+ -+#ifdef CONFIG_MODULES -+ -+static struct unwind_table *last_table; -+ -+/* Must be called with module_mutex held. */ -+void *unwind_add_table(struct module *module, -+ const void *table_start, -+ unsigned long table_size) -+{ -+ struct unwind_table *table; -+ -+ if (table_size <= 0) -+ return NULL; -+ -+ table = kmalloc(sizeof(*table), GFP_KERNEL); -+ if (!table) -+ return NULL; -+ -+ init_unwind_table(table, module->name, -+ module->module_core, module->core_size, -+ module->module_init, module->init_size, -+ table_start, table_size, -+ NULL, 0); -+ -+ if (last_table) -+ last_table->link = table; -+ else -+ root_table.link = table; -+ last_table = table; -+ -+ return table; -+} -+ -+struct unlink_table_info -+{ -+ struct unwind_table *table; -+ int init_only; -+}; -+ -+static int unlink_table(void *arg) -+{ -+ struct unlink_table_info *info = arg; -+ struct unwind_table *table = info->table, *prev; -+ -+ for (prev = &root_table; prev->link && prev->link != table; prev = prev->link) -+ ; -+ -+ if (prev->link) { -+ if (info->init_only) { -+ table->init.pc = 0; -+ table->init.range = 0; -+ info->table = NULL; -+ } else { -+ prev->link = table->link; -+ if (!prev->link) -+ last_table = prev; -+ } -+ } else -+ info->table = NULL; -+ -+ return 0; -+} -+ -+/* Must be called with module_mutex held. */ -+void unwind_remove_table(void *handle, int init_only) -+{ -+ struct unwind_table *table = handle; -+ struct unlink_table_info info; -+ -+ if (!table || table == &root_table) -+ return; -+ -+ if (init_only && table == last_table) { -+ table->init.pc = 0; -+ table->init.range = 0; -+ return; -+ } -+ -+ info.table = table; -+ info.init_only = init_only; -+ stop_machine(unlink_table, &info, NULL); -+ -+ if (info.table) -+ kfree(table); -+} -+ -+#endif /* CONFIG_MODULES */ -+ -+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) -+{ -+ const u8 *cur = *pcur; -+ uleb128_t value; -+ unsigned shift; -+ -+ for (shift = 0, value = 0; cur < end; shift += 7) { -+ if (shift + 7 > 8 * sizeof(value) -+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { -+ cur = end + 1; -+ break; -+ } -+ value |= (uleb128_t)(*cur & 0x7f) << shift; -+ if (!(*cur++ & 0x80)) -+ break; -+ } -+ *pcur = cur; -+ -+ return value; -+} -+ -+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) -+{ -+ const u8 *cur = *pcur; -+ sleb128_t value; -+ unsigned shift; -+ -+ for (shift = 0, value = 0; cur < end; shift += 7) { -+ if (shift + 7 > 8 * sizeof(value) -+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { -+ cur = end + 1; -+ break; -+ } -+ value |= (sleb128_t)(*cur & 0x7f) << shift; -+ if (!(*cur & 0x80)) { -+ value |= -(*cur++ & 0x40) << shift; -+ break; -+ } -+ } -+ *pcur = cur; -+ -+ return value; -+} -+ -+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) -+{ -+ const u32 *cie; -+ -+ if (!*fde || (*fde & (sizeof(*fde) - 1))) -+ return &bad_cie; -+ if (!fde[1]) -+ return ¬_fde; /* this is a CIE */ -+ if ((fde[1] & (sizeof(*fde) - 1)) -+ || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) -+ return NULL; /* this is not a valid FDE */ -+ cie = fde + 1 - fde[1] / sizeof(*fde); -+ if (*cie <= sizeof(*cie) + 4 -+ || *cie >= fde[1] - sizeof(*fde) -+ || (*cie & (sizeof(*cie) - 1)) -+ || cie[1]) -+ return NULL; /* this is not a (valid) CIE */ -+ return cie; -+} -+ -+static unsigned long read_pointer(const u8 **pLoc, -+ const void *end, -+ signed ptrType, -+ unsigned long text_base, -+ unsigned long data_base) -+{ -+ unsigned long value = 0; -+ union { -+ const u8 *p8; -+ const u16 *p16u; -+ const s16 *p16s; -+ const u32 *p32u; -+ const s32 *p32s; -+ const unsigned long *pul; -+ } ptr; -+ -+ if (ptrType < 0 || ptrType == DW_EH_PE_omit) { -+ dprintk(1, "Invalid pointer encoding %02X (%p,%p).", ptrType, *pLoc, end); -+ return 0; -+ } -+ ptr.p8 = *pLoc; -+ switch (ptrType & DW_EH_PE_FORM) { -+ case DW_EH_PE_data2: -+ if (end < (const void *)(ptr.p16u + 1)) { -+ dprintk(1, "Data16 overrun (%p,%p).", ptr.p8, end); -+ return 0; -+ } -+ if (ptrType & DW_EH_PE_signed) -+ value = get_unaligned(ptr.p16s++); -+ else -+ value = get_unaligned(ptr.p16u++); -+ break; -+ case DW_EH_PE_data4: -+#ifdef CONFIG_64BIT -+ if (end < (const void *)(ptr.p32u + 1)) { -+ dprintk(1, "Data32 overrun (%p,%p).", ptr.p8, end); -+ return 0; -+ } -+ if (ptrType & DW_EH_PE_signed) -+ value = get_unaligned(ptr.p32s++); -+ else -+ value = get_unaligned(ptr.p32u++); -+ break; -+ case DW_EH_PE_data8: -+ BUILD_BUG_ON(sizeof(u64) != sizeof(value)); -+#else -+ BUILD_BUG_ON(sizeof(u32) != sizeof(value)); -+#endif -+ case DW_EH_PE_native: -+ if (end < (const void *)(ptr.pul + 1)) { -+ dprintk(1, "DataUL overrun (%p,%p).", ptr.p8, end); -+ return 0; -+ } -+ value = get_unaligned(ptr.pul++); -+ break; -+ case DW_EH_PE_leb128: -+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value)); -+ value = ptrType & DW_EH_PE_signed -+ ? get_sleb128(&ptr.p8, end) -+ : get_uleb128(&ptr.p8, end); -+ if ((const void *)ptr.p8 > end) { -+ dprintk(1, "DataLEB overrun (%p,%p).", ptr.p8, end); -+ return 0; -+ } -+ break; -+ default: -+ dprintk(2, "Cannot decode pointer type %02X (%p,%p).", -+ ptrType, ptr.p8, end); -+ return 0; -+ } -+ switch (ptrType & DW_EH_PE_ADJUST) { -+ case DW_EH_PE_abs: -+ break; -+ case DW_EH_PE_pcrel: -+ value += (unsigned long)*pLoc; -+ break; -+ case DW_EH_PE_textrel: -+ if (likely(text_base)) { -+ value += text_base; -+ break; -+ } -+ dprintk(2, "Text-relative encoding %02X (%p,%p), but zero text base.", -+ ptrType, *pLoc, end); -+ return 0; -+ case DW_EH_PE_datarel: -+ if (likely(data_base)) { -+ value += data_base; -+ break; -+ } -+ dprintk(2, "Data-relative encoding %02X (%p,%p), but zero data base.", -+ ptrType, *pLoc, end); -+ return 0; -+ default: -+ dprintk(2, "Cannot adjust pointer type %02X (%p,%p).", -+ ptrType, *pLoc, end); -+ return 0; -+ } -+ if ((ptrType & DW_EH_PE_indirect) -+ && probe_kernel_address(value, value)) { -+ dprintk(1, "Cannot read indirect value %lx (%p,%p).", -+ value, *pLoc, end); -+ return 0; -+ } -+ *pLoc = ptr.p8; -+ -+ return value; -+} -+ -+static signed fde_pointer_type(const u32 *cie) -+{ -+ const u8 *ptr = (const u8 *)(cie + 2); -+ unsigned version = *ptr; -+ -+ if (version != 1) -+ return -1; /* unsupported */ -+ if (*++ptr) { -+ const char *aug; -+ const u8 *end = (const u8 *)(cie + 1) + *cie; -+ uleb128_t len; -+ -+ /* check if augmentation size is first (and thus present) */ -+ if (*ptr != 'z') -+ return -1; -+ /* check if augmentation string is nul-terminated */ -+ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL) -+ return -1; -+ ++ptr; /* skip terminator */ -+ get_uleb128(&ptr, end); /* skip code alignment */ -+ get_sleb128(&ptr, end); /* skip data alignment */ -+ /* skip return address column */ -+ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end); -+ len = get_uleb128(&ptr, end); /* augmentation length */ -+ if (ptr + len < ptr || ptr + len > end) -+ return -1; -+ end = ptr + len; -+ while (*++aug) { -+ if (ptr >= end) -+ return -1; -+ switch (*aug) { -+ case 'L': -+ ++ptr; -+ break; -+ case 'P': { -+ signed ptrType = *ptr++; -+ -+ if (!read_pointer(&ptr, end, ptrType, 0, 0) -+ || ptr > end) -+ return -1; -+ } -+ break; -+ case 'R': -+ return *ptr; -+ default: -+ return -1; -+ } -+ } -+ } -+ return DW_EH_PE_native|DW_EH_PE_abs; -+} -+ -+static int advance_loc(unsigned long delta, struct unwind_state *state) -+{ -+ state->loc += delta * state->codeAlign; -+ -+ return delta > 0; -+} -+ -+static void set_rule(uleb128_t reg, -+ enum item_location where, -+ uleb128_t value, -+ struct unwind_state *state) -+{ -+ if (reg < ARRAY_SIZE(state->regs)) { -+ state->regs[reg].where = where; -+ state->regs[reg].value = value; -+ } -+} -+ -+static int processCFI(const u8 *start, -+ const u8 *end, -+ unsigned long targetLoc, -+ signed ptrType, -+ struct unwind_state *state) -+{ -+ union { -+ const u8 *p8; -+ const u16 *p16; -+ const u32 *p32; -+ } ptr; -+ int result = 1; -+ -+ if (start != state->cieStart) { -+ state->loc = state->org; -+ result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state); -+ if (targetLoc == 0 && state->label == NULL) -+ return result; -+ } -+ for (ptr.p8 = start; result && ptr.p8 < end; ) { -+ switch (*ptr.p8 >> 6) { -+ uleb128_t value; -+ -+ case 0: -+ switch (*ptr.p8++) { -+ case DW_CFA_nop: -+ break; -+ case DW_CFA_set_loc: -+ state->loc = read_pointer(&ptr.p8, end, ptrType, 0, 0); -+ if (state->loc == 0) -+ result = 0; -+ break; -+ case DW_CFA_advance_loc1: -+ result = ptr.p8 < end && advance_loc(*ptr.p8++, state); -+ break; -+ case DW_CFA_advance_loc2: -+ result = ptr.p8 <= end + 2 -+ && advance_loc(*ptr.p16++, state); -+ break; -+ case DW_CFA_advance_loc4: -+ result = ptr.p8 <= end + 4 -+ && advance_loc(*ptr.p32++, state); -+ break; -+ case DW_CFA_offset_extended: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_val_offset: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Value, get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_offset_extended_sf: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Memory, get_sleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_val_offset_sf: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Value, get_sleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_restore_extended: -+ case DW_CFA_undefined: -+ case DW_CFA_same_value: -+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state); -+ break; -+ case DW_CFA_register: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, -+ Register, -+ get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_remember_state: -+ if (ptr.p8 == state->label) { -+ state->label = NULL; -+ return 1; -+ } -+ if (state->stackDepth >= MAX_STACK_DEPTH) { -+ dprintk(1, "State stack overflow (%p,%p).", ptr.p8, end); -+ return 0; -+ } -+ state->stack[state->stackDepth++] = ptr.p8; -+ break; -+ case DW_CFA_restore_state: -+ if (state->stackDepth) { -+ const uleb128_t loc = state->loc; -+ const u8 *label = state->label; -+ -+ state->label = state->stack[state->stackDepth - 1]; -+ memcpy(&state->cfa, &badCFA, sizeof(state->cfa)); -+ memset(state->regs, 0, sizeof(state->regs)); -+ state->stackDepth = 0; -+ result = processCFI(start, end, 0, ptrType, state); -+ state->loc = loc; -+ state->label = label; -+ } else { -+ dprintk(1, "State stack underflow (%p,%p).", ptr.p8, end); -+ return 0; -+ } -+ break; -+ case DW_CFA_def_cfa: -+ state->cfa.reg = get_uleb128(&ptr.p8, end); -+ /*nobreak*/ -+ case DW_CFA_def_cfa_offset: -+ state->cfa.offs = get_uleb128(&ptr.p8, end); -+ break; -+ case DW_CFA_def_cfa_sf: -+ state->cfa.reg = get_uleb128(&ptr.p8, end); -+ /*nobreak*/ -+ case DW_CFA_def_cfa_offset_sf: -+ state->cfa.offs = get_sleb128(&ptr.p8, end) -+ * state->dataAlign; -+ break; -+ case DW_CFA_def_cfa_register: -+ state->cfa.reg = get_uleb128(&ptr.p8, end); -+ break; -+ /*todo case DW_CFA_def_cfa_expression: */ -+ /*todo case DW_CFA_expression: */ -+ /*todo case DW_CFA_val_expression: */ -+ case DW_CFA_GNU_args_size: -+ get_uleb128(&ptr.p8, end); -+ break; -+ case DW_CFA_GNU_negative_offset_extended: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, -+ Memory, -+ (uleb128_t)0 - get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_GNU_window_save: -+ default: -+ dprintk(1, "Unrecognized CFI op %02X (%p,%p).", ptr.p8[-1], ptr.p8 - 1, end); -+ result = 0; -+ break; -+ } -+ break; -+ case 1: -+ result = advance_loc(*ptr.p8++ & 0x3f, state); -+ break; -+ case 2: -+ value = *ptr.p8++ & 0x3f; -+ set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); -+ break; -+ case 3: -+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state); -+ break; -+ } -+ if (ptr.p8 > end) { -+ dprintk(1, "Data overrun (%p,%p).", ptr.p8, end); -+ result = 0; -+ } -+ if (result && targetLoc != 0 && targetLoc < state->loc) -+ return 1; -+ } -+ -+ if (result && ptr.p8 < end) -+ dprintk(1, "Data underrun (%p,%p).", ptr.p8, end); -+ -+ return result -+ && ptr.p8 == end -+ && (targetLoc == 0 -+ || (/*todo While in theory this should apply, gcc in practice omits -+ everything past the function prolog, and hence the location -+ never reaches the end of the function. -+ targetLoc < state->loc &&*/ state->label == NULL)); -+} -+ -+/* Unwind to previous to frame. Returns 0 if successful, negative -+ * number in case of an error. */ -+int unwind(struct unwind_frame_info *frame) -+{ -+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) -+ const u32 *fde = NULL, *cie = NULL; -+ const u8 *ptr = NULL, *end = NULL; -+ unsigned long pc = UNW_PC(frame) - frame->call_frame, sp; -+ unsigned long startLoc = 0, endLoc = 0, cfa; -+ unsigned i; -+ signed ptrType = -1; -+ uleb128_t retAddrReg = 0; -+ const struct unwind_table *table; -+ struct unwind_state state; -+ -+ if (UNW_PC(frame) == 0) -+ return -EINVAL; -+ if ((table = find_table(pc)) != NULL -+ && !(table->size & (sizeof(*fde) - 1))) { -+ const u8 *hdr = table->header; -+ unsigned long tableSize; -+ -+ smp_rmb(); -+ if (hdr && hdr[0] == 1) { -+ switch (hdr[3] & DW_EH_PE_FORM) { -+ case DW_EH_PE_native: tableSize = sizeof(unsigned long); break; -+ case DW_EH_PE_data2: tableSize = 2; break; -+ case DW_EH_PE_data4: tableSize = 4; break; -+ case DW_EH_PE_data8: tableSize = 8; break; -+ default: tableSize = 0; break; -+ } -+ ptr = hdr + 4; -+ end = hdr + table->hdrsz; -+ if (tableSize -+ && read_pointer(&ptr, end, hdr[1], 0, 0) -+ == (unsigned long)table->address -+ && (i = read_pointer(&ptr, end, hdr[2], 0, 0)) > 0 -+ && i == (end - ptr) / (2 * tableSize) -+ && !((end - ptr) % (2 * tableSize))) { -+ do { -+ const u8 *cur = ptr + (i / 2) * (2 * tableSize); -+ -+ startLoc = read_pointer(&cur, -+ cur + tableSize, -+ hdr[3], 0, -+ (unsigned long)hdr); -+ if (pc < startLoc) -+ i /= 2; -+ else { -+ ptr = cur - tableSize; -+ i = (i + 1) / 2; -+ } -+ } while (startLoc && i > 1); -+ if (i == 1 -+ && (startLoc = read_pointer(&ptr, -+ ptr + tableSize, -+ hdr[3], 0, -+ (unsigned long)hdr)) != 0 -+ && pc >= startLoc) -+ fde = (void *)read_pointer(&ptr, -+ ptr + tableSize, -+ hdr[3], 0, -+ (unsigned long)hdr); -+ } -+ } -+ if (hdr && !fde) -+ dprintk(3, "Binary lookup for %lx failed.", pc); -+ -+ if (fde != NULL) { -+ cie = cie_for_fde(fde, table); -+ ptr = (const u8 *)(fde + 2); -+ if (cie != NULL -+ && cie != &bad_cie -+ && cie != ¬_fde -+ && (ptrType = fde_pointer_type(cie)) >= 0 -+ && read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ ptrType, 0, 0) == startLoc) { -+ if (!(ptrType & DW_EH_PE_indirect)) -+ ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; -+ endLoc = startLoc -+ + read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ ptrType, 0, 0); -+ if (pc >= endLoc) -+ fde = NULL; -+ } else -+ fde = NULL; -+ if (!fde) -+ dprintk(1, "Binary lookup result for %lx discarded.", pc); -+ } -+ if (fde == NULL) { -+ for (fde = table->address, tableSize = table->size; -+ cie = NULL, tableSize > sizeof(*fde) -+ && tableSize - sizeof(*fde) >= *fde; -+ tableSize -= sizeof(*fde) + *fde, -+ fde += 1 + *fde / sizeof(*fde)) { -+ cie = cie_for_fde(fde, table); -+ if (cie == &bad_cie) { -+ cie = NULL; -+ break; -+ } -+ if (cie == NULL -+ || cie == ¬_fde -+ || (ptrType = fde_pointer_type(cie)) < 0) -+ continue; -+ ptr = (const u8 *)(fde + 2); -+ startLoc = read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ ptrType, 0, 0); -+ if (!startLoc) -+ continue; -+ if (!(ptrType & DW_EH_PE_indirect)) -+ ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; -+ endLoc = startLoc -+ + read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ ptrType, 0, 0); -+ if (pc >= startLoc && pc < endLoc) -+ break; -+ } -+ if (!fde) -+ dprintk(3, "Linear lookup for %lx failed.", pc); -+ } -+ } -+ if (cie != NULL) { -+ memset(&state, 0, sizeof(state)); -+ state.cieEnd = ptr; /* keep here temporarily */ -+ ptr = (const u8 *)(cie + 2); -+ end = (const u8 *)(cie + 1) + *cie; -+ frame->call_frame = 1; -+ if ((state.version = *ptr) != 1) -+ cie = NULL; /* unsupported version */ -+ else if (*++ptr) { -+ /* check if augmentation size is first (and thus present) */ -+ if (*ptr == 'z') { -+ while (++ptr < end && *ptr) { -+ switch (*ptr) { -+ /* check for ignorable (or already handled) -+ * nul-terminated augmentation string */ -+ case 'L': -+ case 'P': -+ case 'R': -+ continue; -+ case 'S': -+ frame->call_frame = 0; -+ continue; -+ default: -+ break; -+ } -+ break; -+ } -+ } -+ if (ptr >= end || *ptr) -+ cie = NULL; -+ } -+ if (!cie) -+ dprintk(1, "CIE unusable (%p,%p).", ptr, end); -+ ++ptr; -+ } -+ if (cie != NULL) { -+ /* get code aligment factor */ -+ state.codeAlign = get_uleb128(&ptr, end); -+ /* get data aligment factor */ -+ state.dataAlign = get_sleb128(&ptr, end); -+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) -+ cie = NULL; -+ else if (UNW_PC(frame) % state.codeAlign -+ || UNW_SP(frame) % sleb128abs(state.dataAlign)) { -+ dprintk(1, "Input pointer(s) misaligned (%lx,%lx).", -+ UNW_PC(frame), UNW_SP(frame)); -+ return -EPERM; -+ } else { -+ retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); -+ /* skip augmentation */ -+ if (((const char *)(cie + 2))[1] == 'z') { -+ uleb128_t augSize = get_uleb128(&ptr, end); -+ -+ ptr += augSize; -+ } -+ if (ptr > end -+ || retAddrReg >= ARRAY_SIZE(reg_info) -+ || REG_INVALID(retAddrReg) -+ || reg_info[retAddrReg].width != sizeof(unsigned long)) -+ cie = NULL; -+ } -+ if (!cie) -+ dprintk(1, "CIE validation failed (%p,%p).", ptr, end); -+ } -+ if (cie != NULL) { -+ state.cieStart = ptr; -+ ptr = state.cieEnd; -+ state.cieEnd = end; -+ end = (const u8 *)(fde + 1) + *fde; -+ /* skip augmentation */ -+ if (((const char *)(cie + 2))[1] == 'z') { -+ uleb128_t augSize = get_uleb128(&ptr, end); -+ -+ if ((ptr += augSize) > end) -+ fde = NULL; -+ } -+ if (!fde) -+ dprintk(1, "FDE validation failed (%p,%p).", ptr, end); -+ } -+ if (cie == NULL || fde == NULL) { -+#ifdef CONFIG_FRAME_POINTER -+ unsigned long top = TSK_STACK_TOP(frame->task); -+ unsigned long bottom = STACK_BOTTOM(frame->task); -+ unsigned long fp = UNW_FP(frame); -+ unsigned long sp = UNW_SP(frame); -+ unsigned long link; -+ -+ if ((sp | fp) & (sizeof(unsigned long) - 1)) -+ return -EPERM; -+ -+# if FRAME_RETADDR_OFFSET < 0 -+ if (!(sp < top && fp <= sp && bottom < fp)) -+# else -+ if (!(sp > top && fp >= sp && bottom > fp)) -+# endif -+ return -ENXIO; -+ -+ if (probe_kernel_address(fp + FRAME_LINK_OFFSET, link)) -+ return -ENXIO; -+ -+# if FRAME_RETADDR_OFFSET < 0 -+ if (!(link > bottom && link < fp)) -+# else -+ if (!(link < bottom && link > fp)) -+# endif -+ return -ENXIO; -+ -+ if (link & (sizeof(link) - 1)) -+ return -ENXIO; -+ -+ fp += FRAME_RETADDR_OFFSET; -+ if (probe_kernel_address(fp, UNW_PC(frame))) -+ return -ENXIO; -+ -+ /* Ok, we can use it */ -+# if FRAME_RETADDR_OFFSET < 0 -+ UNW_SP(frame) = fp - sizeof(UNW_PC(frame)); -+# else -+ UNW_SP(frame) = fp + sizeof(UNW_PC(frame)); -+# endif -+ UNW_FP(frame) = link; -+ return 0; -+#else -+ return -ENXIO; -+#endif -+ } -+ state.org = startLoc; -+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); -+ /* process instructions */ -+ if (!processCFI(ptr, end, pc, ptrType, &state) -+ || state.loc > endLoc -+ || state.regs[retAddrReg].where == Nowhere -+ || state.cfa.reg >= ARRAY_SIZE(reg_info) -+ || reg_info[state.cfa.reg].width != sizeof(unsigned long) -+ || FRAME_REG(state.cfa.reg, unsigned long) % sizeof(unsigned long) -+ || state.cfa.offs % sizeof(unsigned long)) { -+ dprintk(1, "Unusable unwind info (%p,%p).", ptr, end); -+ return -EIO; -+ } -+ /* update frame */ -+#ifndef CONFIG_AS_CFI_SIGNAL_FRAME -+ if (frame->call_frame -+ && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign)) -+ frame->call_frame = 0; -+#endif -+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; -+ startLoc = min((unsigned long)UNW_SP(frame), cfa); -+ endLoc = max((unsigned long)UNW_SP(frame), cfa); -+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) { -+ startLoc = min(STACK_LIMIT(cfa), cfa); -+ endLoc = max(STACK_LIMIT(cfa), cfa); -+ } -+#ifndef CONFIG_64BIT -+# define CASES CASE(8); CASE(16); CASE(32) -+#else -+# define CASES CASE(8); CASE(16); CASE(32); CASE(64) -+#endif -+ pc = UNW_PC(frame); -+ sp = UNW_SP(frame); -+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { -+ if (REG_INVALID(i)) { -+ if (state.regs[i].where == Nowhere) -+ continue; -+ dprintk(1, "Cannot restore register %u (%d).", -+ i, state.regs[i].where); -+ return -EIO; -+ } -+ switch (state.regs[i].where) { -+ default: -+ break; -+ case Register: -+ if (state.regs[i].value >= ARRAY_SIZE(reg_info) -+ || REG_INVALID(state.regs[i].value) -+ || reg_info[i].width > reg_info[state.regs[i].value].width) { -+ dprintk(1, "Cannot restore register %u from register %lu.", -+ i, state.regs[i].value); -+ return -EIO; -+ } -+ switch (reg_info[state.regs[i].value].width) { -+#define CASE(n) \ -+ case sizeof(u##n): \ -+ state.regs[i].value = FRAME_REG(state.regs[i].value, \ -+ const u##n); \ -+ break -+ CASES; -+#undef CASE -+ default: -+ dprintk(1, "Unsupported register size %u (%lu).", -+ reg_info[state.regs[i].value].width, -+ state.regs[i].value); -+ return -EIO; -+ } -+ break; -+ } -+ } -+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { -+ if (REG_INVALID(i)) -+ continue; -+ switch (state.regs[i].where) { -+ case Nowhere: -+ if (reg_info[i].width != sizeof(UNW_SP(frame)) -+ || &FRAME_REG(i, __typeof__(UNW_SP(frame))) -+ != &UNW_SP(frame)) -+ continue; -+ UNW_SP(frame) = cfa; -+ break; -+ case Register: -+ switch (reg_info[i].width) { -+#define CASE(n) case sizeof(u##n): \ -+ FRAME_REG(i, u##n) = state.regs[i].value; \ -+ break -+ CASES; -+#undef CASE -+ default: -+ dprintk(1, "Unsupported register size %u (%u).", -+ reg_info[i].width, i); -+ return -EIO; -+ } -+ break; -+ case Value: -+ if (reg_info[i].width != sizeof(unsigned long)) { -+ dprintk(1, "Unsupported value size %u (%u).", -+ reg_info[i].width, i); -+ return -EIO; -+ } -+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value -+ * state.dataAlign; -+ break; -+ case Memory: { -+ unsigned long addr = cfa + state.regs[i].value -+ * state.dataAlign; -+ -+ if ((state.regs[i].value * state.dataAlign) -+ % sizeof(unsigned long) -+ || addr < startLoc -+ || addr + sizeof(unsigned long) < addr -+ || addr + sizeof(unsigned long) > endLoc) { -+ dprintk(1, "Bad memory location %lx (%lx).", -+ addr, state.regs[i].value); -+ return -EIO; -+ } -+ switch (reg_info[i].width) { -+#define CASE(n) case sizeof(u##n): \ -+ if (probe_kernel_address(addr, \ -+ FRAME_REG(i, u##n))) \ -+ return -EFAULT; \ -+ break -+ CASES; -+#undef CASE -+ default: -+ dprintk(1, "Unsupported memory size %u (%u).", -+ reg_info[i].width, i); -+ return -EIO; -+ } -+ } -+ break; -+ } -+ } -+ -+ if (UNW_PC(frame) % state.codeAlign -+ || UNW_SP(frame) % sleb128abs(state.dataAlign)) { -+ dprintk(1, "Output pointer(s) misaligned (%lx,%lx).", -+ UNW_PC(frame), UNW_SP(frame)); -+ return -EIO; -+ } -+ if (pc == UNW_PC(frame) && sp == UNW_SP(frame)) { -+ dprintk(1, "No progress (%lx,%lx).", pc, sp); -+ return -EIO; -+ } -+ -+ return 0; -+#undef CASES -+#undef FRAME_REG -+} -+EXPORT_SYMBOL_GPL(unwind); -+ -+int unwind_init_frame_info(struct unwind_frame_info *info, -+ struct task_struct *tsk, -+ /*const*/ struct pt_regs *regs) -+{ -+ info->task = tsk; -+ info->call_frame = 0; -+ arch_unw_init_frame_info(info, regs); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(unwind_init_frame_info); -+ -+/* -+ * Prepare to unwind a blocked task. -+ */ -+int unwind_init_blocked(struct unwind_frame_info *info, -+ struct task_struct *tsk) -+{ -+ info->task = tsk; -+ info->call_frame = 0; -+ arch_unw_init_blocked(info); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(unwind_init_blocked); -+ -+/* -+ * Prepare to unwind the currently running thread. -+ */ -+int unwind_init_running(struct unwind_frame_info *info, -+ asmlinkage unwind_callback_fn callback, -+ const struct stacktrace_ops *ops, void *data) -+{ -+ info->task = current; -+ info->call_frame = 0; -+ -+ return arch_unwind_init_running(info, callback, ops, data); -+} -+EXPORT_SYMBOL_GPL(unwind_init_running); -+ -+/* -+ * Unwind until the return pointer is in user-land (or until an error -+ * occurs). Returns 0 if successful, negative number in case of -+ * error. -+ */ -+int unwind_to_user(struct unwind_frame_info *info) -+{ -+ while (!arch_unw_user_mode(info)) { -+ int err = unwind(info); -+ -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(unwind_to_user); ---- a/lib/Kconfig.debug -+++ b/lib/Kconfig.debug -@@ -828,6 +828,24 @@ config FRAME_POINTER - larger and slower, but it gives very useful debugging information - in case of kernel bugs. (precise oopses/stacktraces/warnings) - -+config UNWIND_INFO -+ bool "Compile the kernel with frame unwind information" -+ depends on !IA64 && !PARISC && !ARM -+ depends on !MODULES || !(MIPS || PPC || SUPERH || V850) -+ help -+ If you say Y here the resulting kernel image will be slightly larger -+ but not slower, and it will give very useful debugging information. -+ If you don't debug the kernel, you can say N, but we may not be able -+ to solve problems without frame unwind information or frame pointers. -+ -+config STACK_UNWIND -+ bool "Stack unwind support" -+ depends on UNWIND_INFO -+ depends on X86 -+ help -+ This enables more precise stack traces, omitting all unrelated -+ occurrences of pointers into kernel code from the dump. -+ - config BOOT_PRINTK_DELAY - bool "Delay each boot printk message by N milliseconds" - depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY diff --git a/patches.suse/supported-flag b/patches.suse/supported-flag deleted file mode 100644 index 9a922e8..0000000 --- a/patches.suse/supported-flag +++ /dev/null @@ -1,442 +0,0 @@ -From: Andreas Gruenbacher -Subject: Novell/external support flag in modules -Patch-mainline: Never, SLES feature - -Upon module load, check if a module is supported, and set the -N (TAINT_NO_SUPPORT) or X (TAINT_EXTERNAL_SUPPORT) tail flags -for unsupported or externally suported modules. - -Changes: -* Feb 21 2008 - jeffm -- 2.6.25 claimed -S and bumped the flags up a bit, modpost now uses -N - -Signed-off-by: Andreas Gruenbacher - ---- - - Documentation/kernel-parameters.txt | 5 ++ - Documentation/sysctl/kernel.txt | 12 ++++++ - Makefile | 5 ++ - include/linux/kernel.h | 9 ++++ - include/linux/module.h | 1 - kernel/ksysfs.c | 23 ++++++++++++ - kernel/module.c | 65 ++++++++++++++++++++++++++++++++++++ - kernel/panic.c | 9 ++++ - kernel/sysctl.c | 9 ++++ - scripts/Makefile.modpost | 4 +- - scripts/mod/modpost.c | 65 +++++++++++++++++++++++++++++++++++- - 11 files changed, 205 insertions(+), 2 deletions(-) - ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -2504,6 +2504,11 @@ and is between 256 and 4096 characters. - [X86] - Set unknown_nmi_panic=1 early on boot. - -+ unsupported Allow loading of unsupported kernel modules: -+ 0 = only allow supported modules, -+ 1 = warn when loading unsupported modules, -+ 2 = don't warn. -+ - usbcore.autosuspend= - [USB] The autosuspend time delay (in seconds) used - for newly-detected USB devices (default 2). This ---- a/Documentation/sysctl/kernel.txt -+++ b/Documentation/sysctl/kernel.txt -@@ -491,6 +491,18 @@ can be ORed together: - instead of using the one provided by the hardware. - 512 - A kernel warning has occurred. - 1024 - A module from drivers/staging was loaded. -+ 0x40000000 - An unsupported kernel module was loaded. -+ 0x80000000 - An kernel module with external support was loaded. -+ -+============================================================== -+ -+unsupported: -+ -+Allow to load unsupported kernel modules: -+ -+ 0 - refuse to load unsupported modules, -+ 1 - warn when loading unsupported modules, -+ 2 - don't warn. - - ============================================================== - ---- a/Makefile -+++ b/Makefile -@@ -362,6 +362,11 @@ KBUILD_AFLAGS_MODULE := -DMODULE - KBUILD_CFLAGS_MODULE := -DMODULE - KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds - -+# Warn about unsupported modules in kernels built inside Autobuild -+ifneq ($(wildcard /.buildenv),) -+CFLAGS += -DUNSUPPORTED_MODULES=2 -+endif -+ - # Read KERNELRELEASE from include/config/kernel.release (if it exists) - KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) - KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -222,8 +222,10 @@ extern int panic_timeout; - extern int panic_on_oops; - extern int panic_on_unrecovered_nmi; - extern int panic_on_io_nmi; -+extern int unsupported; - extern const char *print_tainted(void); - extern void add_taint(unsigned flag); -+extern void add_nonfatal_taint(unsigned flag); - extern int test_taint(unsigned flag); - extern unsigned long get_taint(void); - extern int root_mountflags; -@@ -251,6 +253,13 @@ extern enum system_states { - #define TAINT_CRAP 10 - #define TAINT_FIRMWARE_WORKAROUND 11 - -+/* -+ * Take the upper bits to hopefully allow them -+ * to stay the same for more than one release. -+ */ -+#define TAINT_NO_SUPPORT 30 -+#define TAINT_EXTERNAL_SUPPORT 31 -+ - extern const char hex_asc[]; - #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] - #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] ---- a/include/linux/module.h -+++ b/include/linux/module.h -@@ -410,6 +410,7 @@ struct module *__module_address(unsigned - bool is_module_address(unsigned long addr); - bool is_module_percpu_address(unsigned long addr); - bool is_module_text_address(unsigned long addr); -+const char *supported_printable(int taint); - - static inline int within_module_core(unsigned long addr, struct module *mod) - { ---- a/kernel/ksysfs.c -+++ b/kernel/ksysfs.c -@@ -157,6 +157,28 @@ static struct bin_attribute notes_attr = - struct kobject *kernel_kobj; - EXPORT_SYMBOL_GPL(kernel_kobj); - -+const char *supported_printable(int taint) -+{ -+ int mask = TAINT_PROPRIETARY_MODULE|TAINT_NO_SUPPORT; -+ if ((taint & mask) == mask) -+ return "No, Proprietary and Unsupported modules are loaded"; -+ else if (taint & TAINT_PROPRIETARY_MODULE) -+ return "No, Proprietary modules are loaded"; -+ else if (taint & TAINT_NO_SUPPORT) -+ return "No, Unsupported modules are loaded"; -+ else if (taint & TAINT_EXTERNAL_SUPPORT) -+ return "Yes, External"; -+ else -+ return "Yes"; -+} -+ -+static ssize_t supported_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%s\n", supported_printable(get_taint())); -+} -+KERNEL_ATTR_RO(supported); -+ - static struct attribute * kernel_attrs[] = { - #if defined(CONFIG_HOTPLUG) - &uevent_seqnum_attr.attr, -@@ -171,6 +193,7 @@ static struct attribute * kernel_attrs[] - &kexec_crash_size_attr.attr, - &vmcoreinfo_attr.attr, - #endif -+ &supported_attr.attr, - NULL - }; - ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -73,6 +73,20 @@ - /* If this is set, the section belongs in the init part of the module */ - #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) - -+/* Allow unsupported modules switch. */ -+#ifdef UNSUPPORTED_MODULES -+int unsupported = UNSUPPORTED_MODULES; -+#else -+int unsupported = 2; /* don't warn when loading unsupported modules. */ -+#endif -+ -+static int __init unsupported_setup(char *str) -+{ -+ get_option(&str, &unsupported); -+ return 1; -+} -+__setup("unsupported=", unsupported_setup); -+ - /* - * Mutex protects: - * 1) List of modules (also safely readable with preempt_disable), -@@ -931,10 +945,36 @@ static struct module_attribute initstate - .show = show_initstate, - }; - -+static void setup_modinfo_supported(struct module *mod, const char *s) -+{ -+ if (!s) { -+ mod->taints |= (1 << TAINT_NO_SUPPORT); -+ return; -+ } -+ -+ if (strcmp(s, "external") == 0) -+ mod->taints |= (1 << TAINT_EXTERNAL_SUPPORT); -+ else if (strcmp(s, "yes")) -+ mod->taints |= (1 << TAINT_NO_SUPPORT); -+} -+ -+static ssize_t show_modinfo_supported(struct module_attribute *mattr, -+ struct module *mod, char *buffer) -+{ -+ return sprintf(buffer, "%s\n", supported_printable(mod->taints)); -+} -+ -+static struct module_attribute modinfo_supported = { -+ .attr = { .name = "supported", .mode = 0444 }, -+ .show = show_modinfo_supported, -+ .setup = setup_modinfo_supported, -+}; -+ - static struct module_attribute *modinfo_attrs[] = { - &modinfo_version, - &modinfo_srcversion, - &initstate, -+ &modinfo_supported, - #ifdef CONFIG_MODULE_UNLOAD - &refcnt, - #endif -@@ -1476,6 +1516,26 @@ static int mod_sysfs_setup(struct module - add_sect_attrs(mod, info); - add_notes_attrs(mod, info); - -+ /* We don't use add_taint() here because it also disables lockdep. */ -+ if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT)) -+ add_nonfatal_taint(TAINT_EXTERNAL_SUPPORT); -+ else if (mod->taints == (1 << TAINT_NO_SUPPORT)) { -+ if (unsupported == 0) { -+ printk(KERN_WARNING "%s: module not supported by " -+ "Novell, refusing to load. To override, echo " -+ "1 > /proc/sys/kernel/unsupported\n", mod->name); -+ err = -ENOEXEC; -+ goto free_hdr; -+ } -+ add_nonfatal_taint(TAINT_NO_SUPPORT); -+ if (unsupported == 1) { -+ printk(KERN_WARNING "%s: module is not supported by " -+ "Novell. Novell Technical Services may decline " -+ "your support request if it involves a kernel " -+ "fault.\n", mod->name); -+ } -+ } -+ - kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); - return 0; - -@@ -3005,6 +3065,10 @@ static char *module_flags(struct module - buf[bx++] = 'F'; - if (mod->taints & (1 << TAINT_CRAP)) - buf[bx++] = 'C'; -+ if (mod->taints & (1 << TAINT_NO_SUPPORT)) -+ buf[bx++] = 'N'; -+ if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT)) -+ buf[bx++] = 'X'; - /* - * TAINT_FORCED_RMMOD: could be added. - * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't -@@ -3217,6 +3281,7 @@ void print_modules(void) - if (last_unloaded_module[0]) - printk(" [last unloaded: %s]", last_unloaded_module); - printk("\n"); -+ printk("Supported: %s\n", supported_printable(get_taint())); - } - - #ifdef CONFIG_MODVERSIONS ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -174,6 +174,8 @@ static const struct tnt tnts[] = { - { TAINT_WARN, 'W', ' ' }, - { TAINT_CRAP, 'C', ' ' }, - { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, -+ { TAINT_NO_SUPPORT, 'N', ' ' }, -+ { TAINT_EXTERNAL_SUPPORT, 'X', ' ' }, - }; - - /** -@@ -191,6 +193,8 @@ static const struct tnt tnts[] = { - * 'W' - Taint on warning. - * 'C' - modules from drivers/staging are loaded. - * 'I' - Working around severe firmware bug. -+ * 'N' - Unsuported modules loaded. -+ * 'X' - Modules with external support loaded. - * - * The string is overwritten by the next call to print_tainted(). - */ -@@ -226,6 +230,11 @@ unsigned long get_taint(void) - return tainted_mask; - } - -+void add_nonfatal_taint(unsigned flag) -+{ -+ set_bit(flag, &tainted_mask); -+} -+ - void add_taint(unsigned flag) - { - /* ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -664,6 +664,15 @@ static struct ctl_table kern_table[] = { - .extra1 = &pid_max_min, - .extra2 = &pid_max_max, - }, -+#ifdef CONFIG_MODULES -+ { -+ .procname = "unsupported", -+ .data = &unsupported, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif - { - .procname = "panic_on_oops", - .data = &panic_on_oops, ---- a/scripts/Makefile.modpost -+++ b/scripts/Makefile.modpost -@@ -81,7 +81,9 @@ modpost = scripts/mod/modpost - $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ - $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ - $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \ -- $(if $(cross_build),-c) -+ $(if $(cross_build),-c) \ -+ -N $(firstword $(wildcard $(dir $(MODVERDIR))/Module.supported \ -+ $(objtree)/Module.supported /dev/null)) - - quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules - cmd_modpost = $(modpost) -s ---- a/scripts/mod/modpost.c -+++ b/scripts/mod/modpost.c -@@ -1639,6 +1639,48 @@ static void check_sec_ref(struct module - } - } - -+void *supported_file; -+unsigned long supported_size; -+ -+static const char *supported(struct module *mod) -+{ -+ unsigned long pos = 0; -+ char *line; -+ -+ /* In a first shot, do a simple linear scan. */ -+ while ((line = get_next_line(&pos, supported_file, -+ supported_size))) { -+ const char *basename, *how = "yes"; -+ char *l = line; -+ -+ /* optional type-of-support flag */ -+ for (l = line; *l != '\0'; l++) { -+ if (*l == ' ' || *l == '\t') { -+ *l = '\0'; -+ how = l + 1; -+ break; -+ } -+ } -+ -+ /* skip directory components */ -+ if ((l = strrchr(line, '/'))) -+ line = l + 1; -+ /* strip .ko extension */ -+ l = line + strlen(line); -+ if (l - line > 3 && !strcmp(l-3, ".ko")) -+ *(l-3) = '\0'; -+ -+ /* skip directory components */ -+ if ((basename = strrchr(mod->name, '/'))) -+ basename++; -+ else -+ basename = mod->name; -+ if (!strcmp(basename, line)) -+ return how; -+ } -+ return NULL; -+} -+ - static void read_symbols(char *modname) - { - const char *symname; -@@ -1826,6 +1868,13 @@ static void add_staging_flag(struct buff - buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n"); - } - -+static void add_supported_flag(struct buffer *b, struct module *mod) -+{ -+ const char *how = supported(mod); -+ if (how) -+ buf_printf(b, "\nMODULE_INFO(supported, \"%s\");\n", how); -+} -+ - /** - * Record CRCs for unresolved symbols - **/ -@@ -1966,6 +2015,13 @@ static void write_if_changed(struct buff - fclose(file); - } - -+static void read_supported(const char *fname) -+{ -+ supported_file = grab_file(fname, &supported_size); -+ if (!supported_file) -+ ; /* ignore error */ -+} -+ - /* parse Module.symvers file. line format: - * 0x12345678symbolmodule[[export]something] - **/ -@@ -2059,12 +2115,13 @@ int main(int argc, char **argv) - struct buffer buf = { }; - char *kernel_read = NULL, *module_read = NULL; - char *dump_write = NULL; -+ const char *supported = NULL; - int opt; - int err; - struct ext_sym_list *extsym_iter; - struct ext_sym_list *extsym_start = NULL; - -- while ((opt = getopt(argc, argv, "i:I:e:cmsSo:awM:K:")) != -1) { -+ while ((opt = getopt(argc, argv, "i:I:e:cmsSo:awM:K:N:")) != -1) { - switch (opt) { - case 'i': - kernel_read = optarg; -@@ -2102,11 +2159,16 @@ int main(int argc, char **argv) - case 'w': - warn_unresolved = 1; - break; -+ case 'N': -+ supported = optarg; -+ break; - default: - exit(1); - } - } - -+ if (supported) -+ read_supported(supported); - if (kernel_read) - read_dump(kernel_read, 1); - if (module_read) -@@ -2139,6 +2201,7 @@ int main(int argc, char **argv) - - add_header(&buf, mod); - add_staging_flag(&buf, mod->name); -+ add_supported_flag(&buf, mod); - err |= add_versions(&buf, mod); - add_depends(&buf, mod, modules); - add_moddevtable(&buf, mod); diff --git a/patches.suse/supported-flag-enterprise b/patches.suse/supported-flag-enterprise deleted file mode 100644 index 6e32c32..0000000 --- a/patches.suse/supported-flag-enterprise +++ /dev/null @@ -1,245 +0,0 @@ -From: Jeff Mahoney -Subject: Make the supported flag configurable at build time -References: bnc#528097 -Patch-mainline: Never, SLES feature - - In the enterprise kernels, it makes sense to have the supportability - facility. For openSUSE, it's unnecessary, cumbersome, and just plain - wrong. The support commitments for the two releases are totally - different and it doesn't make any sense to pretend that they are. - - This patch adds a CONFIG_ENTERPRISE_SUPPORT option, which enables the support - reporting facility. When it is disabled, the reporting and checking are too. - -Signed-off-by: Jeff Mahoney ---- - Documentation/kernel-parameters.txt | 3 +++ - include/linux/kernel.h | 2 ++ - init/Kconfig | 18 ++++++++++++++++++ - kernel/ksysfs.c | 4 ++++ - kernel/module.c | 19 ++++++++++++++++++- - kernel/panic.c | 2 ++ - kernel/sysctl.c | 2 +- - scripts/Makefile.modpost | 5 +++-- - 8 files changed, 51 insertions(+), 4 deletions(-) - ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -2509,6 +2509,9 @@ and is between 256 and 4096 characters. - 1 = warn when loading unsupported modules, - 2 = don't warn. - -+ CONFIG_ENTERPRISE_SUPPORT must be enabled for this -+ to have any effect. -+ - usbcore.autosuspend= - [USB] The autosuspend time delay (in seconds) used - for newly-detected USB devices (default 2). This ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -253,12 +253,14 @@ extern enum system_states { - #define TAINT_CRAP 10 - #define TAINT_FIRMWARE_WORKAROUND 11 - -+#ifdef CONFIG_ENTERPRISE_SUPPORT - /* - * Take the upper bits to hopefully allow them - * to stay the same for more than one release. - */ - #define TAINT_NO_SUPPORT 30 - #define TAINT_EXTERNAL_SUPPORT 31 -+#endif - - extern const char hex_asc[]; - #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1,6 +1,23 @@ - config SUSE_KERNEL - def_bool y - -+config ENTERPRISE_SUPPORT -+ bool "Enable enterprise support facility" -+ depends on SUSE_KERNEL -+ help -+ This feature enables the handling of the "supported" module flag. -+ This flag can be used to report unsupported module loads or even -+ refuse them entirely. It is useful when ensuring that the kernel -+ remains in a state that Novell Technical Services, or its -+ technical partners, is prepared to support. -+ -+ Modules in the list of supported modules will be marked supported -+ on build. The default enforcement mode is to report, but not -+ deny, loading of unsupported modules. -+ -+ If you aren't building a kernel for an enterprise distribution, -+ say n. -+ - config SPLIT_PACKAGE - bool "Split the kernel package into multiple RPMs" - depends on SUSE_KERNEL && MODULES ---- a/kernel/ksysfs.c -+++ b/kernel/ksysfs.c -@@ -157,6 +157,7 @@ static struct bin_attribute notes_attr = - struct kobject *kernel_kobj; - EXPORT_SYMBOL_GPL(kernel_kobj); - -+#ifdef CONFIG_ENTERPRISE_SUPPORT - const char *supported_printable(int taint) - { - int mask = TAINT_PROPRIETARY_MODULE|TAINT_NO_SUPPORT; -@@ -178,6 +179,7 @@ static ssize_t supported_show(struct kob - return sprintf(buf, "%s\n", supported_printable(get_taint())); - } - KERNEL_ATTR_RO(supported); -+#endif - - static struct attribute * kernel_attrs[] = { - #if defined(CONFIG_HOTPLUG) -@@ -193,7 +195,9 @@ static struct attribute * kernel_attrs[] - &kexec_crash_size_attr.attr, - &vmcoreinfo_attr.attr, - #endif -+#ifdef CONFIG_ENTERPRISE_SUPPORT - &supported_attr.attr, -+#endif - NULL - }; - ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -73,6 +73,7 @@ - /* If this is set, the section belongs in the init part of the module */ - #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) - -+#ifdef CONFIG_ENTERPRISE_SUPPORT - /* Allow unsupported modules switch. */ - #ifdef UNSUPPORTED_MODULES - int unsupported = UNSUPPORTED_MODULES; -@@ -86,6 +87,7 @@ static int __init unsupported_setup(char - return 1; - } - __setup("unsupported=", unsupported_setup); -+#endif - - /* - * Mutex protects: -@@ -945,6 +947,7 @@ static struct module_attribute initstate - .show = show_initstate, - }; - -+#ifdef CONFIG_ENTERPRISE_SUPPORT - static void setup_modinfo_supported(struct module *mod, const char *s) - { - if (!s) { -@@ -969,12 +972,15 @@ static struct module_attribute modinfo_s - .show = show_modinfo_supported, - .setup = setup_modinfo_supported, - }; -+#endif - - static struct module_attribute *modinfo_attrs[] = { - &modinfo_version, - &modinfo_srcversion, - &initstate, -+#ifdef CONFIG_ENTERPRISE_SUPPORT - &modinfo_supported, -+#endif - #ifdef CONFIG_MODULE_UNLOAD - &refcnt, - #endif -@@ -1516,6 +1522,7 @@ static int mod_sysfs_setup(struct module - add_sect_attrs(mod, info); - add_notes_attrs(mod, info); - -+#ifdef CONFIG_ENTERPRISE_SUPPORT - /* We don't use add_taint() here because it also disables lockdep. */ - if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT)) - add_nonfatal_taint(TAINT_EXTERNAL_SUPPORT); -@@ -1525,7 +1532,7 @@ static int mod_sysfs_setup(struct module - "Novell, refusing to load. To override, echo " - "1 > /proc/sys/kernel/unsupported\n", mod->name); - err = -ENOEXEC; -- goto free_hdr; -+ goto out_remove_attrs; - } - add_nonfatal_taint(TAINT_NO_SUPPORT); - if (unsupported == 1) { -@@ -1535,10 +1542,16 @@ static int mod_sysfs_setup(struct module - "fault.\n", mod->name); - } - } -+#endif - - kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); - return 0; - -+out_remove_attrs: -+ remove_notes_attrs(mod); -+ remove_sect_attrs(mod); -+ del_usage_links(mod); -+ module_remove_modinfo_attrs(mod); - out_unreg_param: - module_param_sysfs_remove(mod); - out_unreg_holders: -@@ -3065,10 +3078,12 @@ static char *module_flags(struct module - buf[bx++] = 'F'; - if (mod->taints & (1 << TAINT_CRAP)) - buf[bx++] = 'C'; -+#ifdef CONFIG_ENTERPRISE_SUPPORT - if (mod->taints & (1 << TAINT_NO_SUPPORT)) - buf[bx++] = 'N'; - if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT)) - buf[bx++] = 'X'; -+#endif - /* - * TAINT_FORCED_RMMOD: could be added. - * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't -@@ -3281,7 +3296,9 @@ void print_modules(void) - if (last_unloaded_module[0]) - printk(" [last unloaded: %s]", last_unloaded_module); - printk("\n"); -+#ifdef CONFIG_ENTERPRISE_SUPPORT - printk("Supported: %s\n", supported_printable(get_taint())); -+#endif - } - - #ifdef CONFIG_MODVERSIONS ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -174,8 +174,10 @@ static const struct tnt tnts[] = { - { TAINT_WARN, 'W', ' ' }, - { TAINT_CRAP, 'C', ' ' }, - { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, -+#ifdef CONFIG_ENTERPRISE_SUPPORT - { TAINT_NO_SUPPORT, 'N', ' ' }, - { TAINT_EXTERNAL_SUPPORT, 'X', ' ' }, -+#endif - }; - - /** ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -664,7 +664,7 @@ static struct ctl_table kern_table[] = { - .extra1 = &pid_max_min, - .extra2 = &pid_max_max, - }, --#ifdef CONFIG_MODULES -+#if defined(CONFIG_MODULES) && defined(CONFIG_ENTERPRISE_SUPPORT) - { - .procname = "unsupported", - .data = &unsupported, ---- a/scripts/Makefile.modpost -+++ b/scripts/Makefile.modpost -@@ -82,8 +82,9 @@ modpost = scripts/mod/modpost - $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ - $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \ - $(if $(cross_build),-c) \ -- -N $(firstword $(wildcard $(dir $(MODVERDIR))/Module.supported \ -- $(objtree)/Module.supported /dev/null)) -+ $(if $(CONFIG_ENTERPRISE_SUPPORT), \ -+ -N $(firstword $(wildcard $(dir $(MODVERDIR))/Module.supported \ -+ $(objtree)/Module.supported /dev/null))) - - quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules - cmd_modpost = $(modpost) -s diff --git a/patches.suse/suse-ppc64-branding b/patches.suse/suse-ppc64-branding deleted file mode 100644 index 13b5409..0000000 --- a/patches.suse/suse-ppc64-branding +++ /dev/null @@ -1,21 +0,0 @@ -From: -Subject: display the product in the frontpanel LCD -Patch-mainline: never - -display the product in the frontpanel LCD -also the uname -r output instead of uname -v. - - arch/powerpc/platforms/pseries/setup.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/powerpc/platforms/pseries/setup.c -+++ b/arch/powerpc/platforms/pseries/setup.c -@@ -318,7 +318,7 @@ static void __init pSeries_setup_arch(vo - static int __init pSeries_init_panel(void) - { - /* Manually leave the kernel version on the panel. */ -- ppc_md.progress("Linux ppc64\n", 0); -+ ppc_md.progress("SUSE Linux\n", 0); - ppc_md.progress(init_utsname()->version, 0); - - return 0; diff --git a/patches.suse/unmap_vmas-lat b/patches.suse/unmap_vmas-lat deleted file mode 100644 index 9c32811..0000000 --- a/patches.suse/unmap_vmas-lat +++ /dev/null @@ -1,33 +0,0 @@ -From: andrea@suse.de -Subject: low-latency stuff -Patch-mainline: not yet - - -My point is that preempt and no-preempt should do the same thing there, -otherwise when you benchmark -preempt, you'll get better latency, -but not because of the preempt feature, but just because of unrelated -latency improvements that have nothing to do with preempt. - - ---- - mm/memory.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -1060,11 +1060,11 @@ static unsigned long unmap_page_range(st - return addr; - } - --#ifdef CONFIG_PREEMPT --# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) -+#ifdef CONFIG_SMP -+/* zap one pte page at a time */ -+#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE) - #else --/* No preempt: go for improved straight-line efficiency */ --# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) -+#define ZAP_BLOCK_SIZE (253 * PAGE_SIZE) - #endif - - /** diff --git a/patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch b/patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch deleted file mode 100644 index 653631f..0000000 --- a/patches.suse/uvcvideo-ignore-hue-control-for-5986-0241.patch +++ /dev/null @@ -1,57 +0,0 @@ -From: Brandon Philips -Subject: uvcvideo: ignore hue control for 5986:0241 -References: bnc#499152 -Patch-mainline: Never? I will submit upstream but there is probably a better fix - -Querying the hue control on Bison 5986:0241 causes the chipset to -lockup. So, create a quirk that will avoid offering V4L2_CID_HUE to user -space. - -Signed-off-by: Brandon Philips - ---- - drivers/media/video/uvc/uvc_ctrl.c | 4 ++++ - drivers/media/video/uvc/uvc_driver.c | 8 ++++++++ - drivers/media/video/uvc/uvcvideo.h | 1 + - 3 files changed, 13 insertions(+) - ---- a/drivers/media/video/uvc/uvc_ctrl.c -+++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -862,6 +862,10 @@ int uvc_query_v4l2_ctrl(struct uvc_video - if (ret < 0) - return -ERESTARTSYS; - -+ if ((chain->dev->quirks & UVC_QUIRK_HUE_EPIPE) && -+ (v4l2_ctrl->id == V4L2_CID_HUE)) -+ return -EINVAL; -+ - ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping); - if (ctrl == NULL) { - ret = -EINVAL; ---- a/drivers/media/video/uvc/uvc_driver.c -+++ b/drivers/media/video/uvc/uvc_driver.c -@@ -2239,6 +2239,14 @@ static struct usb_device_id uvc_ids[] = - .bInterfaceProtocol = 0, - .driver_info = UVC_QUIRK_PROBE_MINMAX - | UVC_QUIRK_IGNORE_SELECTOR_UNIT }, -+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE -+ | USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x5986, -+ .idProduct = 0x0241, -+ .bInterfaceClass = USB_CLASS_VIDEO, -+ .bInterfaceSubClass = 1, -+ .bInterfaceProtocol = 0, -+ .driver_info = UVC_QUIRK_HUE_EPIPE }, - /* Generic USB Video Class */ - { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, - {} ---- a/drivers/media/video/uvc/uvcvideo.h -+++ b/drivers/media/video/uvc/uvcvideo.h -@@ -182,6 +182,7 @@ struct uvc_xu_control { - #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 - #define UVC_QUIRK_PROBE_DEF 0x00000100 - #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 -+#define UVC_QUIRK_HUE_EPIPE 0x00000400 - - /* Format flags */ - #define UVC_FMT_FLAG_COMPRESSED 0x00000001 diff --git a/patches.suse/wireless-no-aes-select b/patches.suse/wireless-no-aes-select deleted file mode 100644 index b9570e8..0000000 --- a/patches.suse/wireless-no-aes-select +++ /dev/null @@ -1,33 +0,0 @@ -Subject: Don't force select AES in wireless -From: ak@suse.de -Patch-mainline: Not yet - -x86 kernels use always the assembler optimized versions of AES and TWOFISH. -But the wireless stack would force enable the generic C aes anyways. -Remove that. The optimized versions provide the cipher as well. - ---- - drivers/net/wireless/Kconfig | 1 - - net/mac80211/Kconfig | 1 - - 2 files changed, 2 deletions(-) - ---- a/drivers/net/wireless/Kconfig -+++ b/drivers/net/wireless/Kconfig -@@ -121,7 +121,6 @@ config AIRO_CS - select WEXT_SPY - select WEXT_PRIV - select CRYPTO -- select CRYPTO_AES - ---help--- - This is the standard Linux driver to support Cisco/Aironet PCMCIA - 802.11 wireless cards. This driver is the same as the Aironet ---- a/net/mac80211/Kconfig -+++ b/net/mac80211/Kconfig -@@ -4,7 +4,6 @@ config MAC80211 - select CRYPTO - select CRYPTO_ECB - select CRYPTO_ARC4 -- select CRYPTO_AES - select CRC32 - select AVERAGE - ---help--- diff --git a/patches.suse/x86-mark_rodata_rw.patch b/patches.suse/x86-mark_rodata_rw.patch deleted file mode 100644 index 60308ea..0000000 --- a/patches.suse/x86-mark_rodata_rw.patch +++ /dev/null @@ -1,184 +0,0 @@ -From: Nick Piggin -Subject: Add mark_rodata_rw() to un-protect read-only kernel code pages -References: bnc#439348 -Patch-mainline: probably never - -CONFIG_RODATA presents a problem for antivirus vendors who do not have a -clean user-space interface for getting virus scanning triggered, and -currently resort to patching the kernel code instead (presumably the -ystem call table). With CONFIG_RODATA enabled, the kernel rejects such -write accesses. - -Add a new mark_rodata_rw() function to un-protect the read-only kernel code -pages for now, and export mark_rodata_ro() and mark_rodata_rw() to modules. - -This is not meant as a permanent workaround, and will be removed again in the -next release! - -Acked-by: Andres Gruenbacher - ---- - arch/x86/include/asm/cacheflush.h | 3 +++ - arch/x86/mm/init_32.c | 14 ++++++++++++++ - arch/x86/mm/init_64.c | 31 +++++++++++++++++++++++++------ - arch/x86/mm/pageattr.c | 31 +++++++++++++++++++++++++++++-- - 4 files changed, 71 insertions(+), 8 deletions(-) - ---- a/arch/x86/include/asm/cacheflush.h -+++ b/arch/x86/include/asm/cacheflush.h -@@ -101,6 +101,7 @@ int set_memory_x(unsigned long addr, int - int set_memory_nx(unsigned long addr, int numpages); - int set_memory_ro(unsigned long addr, int numpages); - int set_memory_rw(unsigned long addr, int numpages); -+int set_memory_rw_force(unsigned long addr, int numpages); - int set_memory_np(unsigned long addr, int numpages); - int set_memory_4k(unsigned long addr, int numpages); - -@@ -138,12 +139,14 @@ int set_pages_x(struct page *page, int n - int set_pages_nx(struct page *page, int numpages); - int set_pages_ro(struct page *page, int numpages); - int set_pages_rw(struct page *page, int numpages); -+int set_pages_rw_force(struct page *page, int numpages); - - - void clflush_cache_range(void *addr, unsigned int size); - - #ifdef CONFIG_DEBUG_RODATA - void mark_rodata_ro(void); -+void mark_rodata_rw(void); - extern const int rodata_test_data; - extern int kernel_set_to_readonly; - void set_kernel_text_rw(void); ---- a/arch/x86/mm/init_32.c -+++ b/arch/x86/mm/init_32.c -@@ -967,5 +967,19 @@ void mark_rodata_ro(void) - #endif - mark_nxdata_nx(); - } -+EXPORT_SYMBOL_GPL(mark_rodata_ro); -+ -+void mark_rodata_rw(void) -+{ -+ unsigned long start = PFN_ALIGN(_text); -+ unsigned long size = PFN_ALIGN(_etext) - start; -+ -+ start += size; -+ size = (unsigned long)__end_rodata - start; -+ set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT); -+ printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n", -+ size >> 10); -+} -+EXPORT_SYMBOL_GPL(mark_rodata_rw); - #endif - ---- a/arch/x86/mm/init_64.c -+++ b/arch/x86/mm/init_64.c -@@ -782,6 +782,7 @@ void set_kernel_text_ro(void) - set_memory_ro(start, (end - start) >> PAGE_SHIFT); - } - -+static int initmem_freed __read_mostly = 0; - void mark_rodata_ro(void) - { - unsigned long start = PFN_ALIGN(_text); -@@ -814,15 +815,33 @@ void mark_rodata_ro(void) - set_memory_ro(start, (end-start) >> PAGE_SHIFT); - #endif - -- free_init_pages("unused kernel memory", -- (unsigned long) page_address(virt_to_page(text_end)), -- (unsigned long) -+ if (!initmem_freed) { -+ initmem_freed = 1; -+ free_init_pages("unused kernel memory", -+ (unsigned long) -+ page_address(virt_to_page(text_end)), -+ (unsigned long) - page_address(virt_to_page(rodata_start))); -- free_init_pages("unused kernel memory", -- (unsigned long) page_address(virt_to_page(rodata_end)), -- (unsigned long) page_address(virt_to_page(data_start))); -+ free_init_pages("unused kernel memory", -+ (unsigned long) -+ page_address(virt_to_page(rodata_end)), -+ (unsigned long) -+ page_address(virt_to_page(data_start))); -+ } - } -+EXPORT_SYMBOL_GPL(mark_rodata_ro); - -+void mark_rodata_rw(void) -+{ -+ unsigned long rodata_start = -+ ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; -+ unsigned long end = (unsigned long) &__end_rodata_hpage_align; -+ -+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", -+ (end - rodata_start) >> 10); -+ set_memory_rw_force(rodata_start, (end - rodata_start) >> PAGE_SHIFT); -+} -+EXPORT_SYMBOL_GPL(mark_rodata_rw); - #endif - - int kern_addr_valid(unsigned long addr) ---- a/arch/x86/mm/pageattr.c -+++ b/arch/x86/mm/pageattr.c -@@ -246,6 +246,8 @@ static void cpa_flush_array(unsigned lon - } - } - -+static int static_protections_allow_rodata __read_mostly; -+ - /* - * Certain areas of memory on x86 require very specific protection flags, - * for example the BIOS area or kernel text. Callers don't always get this -@@ -279,8 +281,11 @@ static inline pgprot_t static_protection - * catches all aliases. - */ - if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, -- __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) -- pgprot_val(forbidden) |= _PAGE_RW; -+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) { -+ if (!static_protections_allow_rodata) -+ pgprot_val(forbidden) |= _PAGE_RW; -+ } -+ - - #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) - /* -@@ -1155,6 +1160,21 @@ int set_memory_rw(unsigned long addr, in - } - EXPORT_SYMBOL_GPL(set_memory_rw); - -+/* hack: bypass kernel rodata section static_protections check. */ -+int set_memory_rw_force(unsigned long addr, int numpages) -+{ -+ static DEFINE_MUTEX(lock); -+ int ret; -+ -+ mutex_lock(&lock); -+ static_protections_allow_rodata = 1; -+ ret = change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); -+ static_protections_allow_rodata = 0; -+ mutex_unlock(&lock); -+ -+ return ret; -+} -+ - int set_memory_np(unsigned long addr, int numpages) - { - return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); -@@ -1288,6 +1308,13 @@ int set_pages_rw(struct page *page, int - return set_memory_rw(addr, numpages); - } - -+int set_pages_rw_force(struct page *page, int numpages) -+{ -+ unsigned long addr = (unsigned long)page_address(page); -+ -+ return set_memory_rw_force(addr, numpages); -+} -+ - #ifdef CONFIG_DEBUG_PAGEALLOC - - static int __set_pages_p(struct page *page, int numpages) diff --git a/patches.trace/utrace-core b/patches.trace/utrace-core deleted file mode 100644 index be85833..0000000 --- a/patches.trace/utrace-core +++ /dev/null @@ -1,4101 +0,0 @@ -From: Roland McGrath -Date: Tue 27 Oct 2009 -Subject: utrace core -References: FATE#304321 -Patch-mainline: no - -This adds the utrace facility, a new modular interface in the kernel for -implementing user thread tracing and debugging. This fits on top of the -tracehook_* layer, so the new code is well-isolated. - -The new interface is in and the DocBook utrace book -describes it. It allows for multiple separate tracing engines to work in -parallel without interfering with each other. Higher-level tracing -facilities can be implemented as loadable kernel modules using this layer. - -The new facility is made optional under CONFIG_UTRACE. -When this is not enabled, no new code is added. -It can only be enabled on machines that have all the -prerequisites and select CONFIG_HAVE_ARCH_TRACEHOOK. - -In this initial version, utrace and ptrace do not play together at all. -If ptrace is attached to a thread, the attach calls in the utrace kernel -API return -EBUSY. If utrace is attached to a thread, the PTRACE_ATTACH -or PTRACE_TRACEME request will return EBUSY to userland. The old ptrace -code is otherwise unchanged and nothing using ptrace should be affected -by this patch as long as utrace is not used at the same time. In the -future we can clean up the ptrace implementation and rework it to use -the utrace API. - -Signed-off-by: Roland McGrath -Signed-off-by: Tony Jones ---- - Documentation/DocBook/Makefile | 2 - Documentation/DocBook/utrace.tmpl | 590 +++++++++ - fs/proc/array.c | 3 - include/linux/init_task.h | 1 - include/linux/sched.h | 6 - include/linux/tracehook.h | 66 + - include/linux/utrace.h | 702 +++++++++++ - include/linux/utrace_struct.h | 59 - init/Kconfig | 9 - kernel/Makefile | 1 - kernel/ptrace.c | 14 - kernel/utrace.c | 2340 ++++++++++++++++++++++++++++++++++++++ - 12 files changed, 3791 insertions(+), 2 deletions(-) - ---- a/Documentation/DocBook/Makefile -+++ b/Documentation/DocBook/Makefile -@@ -9,7 +9,7 @@ - DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \ - kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ - writing_usb_driver.xml networking.xml \ -- kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \ -+ kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml utrace.xml \ - gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ - genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ - 80211.xml debugobjects.xml sh.xml regulator.xml \ ---- /dev/null -+++ b/Documentation/DocBook/utrace.tmpl -@@ -0,0 +1,590 @@ -+ -+ -+ -+ -+ -+ The utrace User Debugging Infrastructure -+ -+ -+ -+ -+ utrace concepts -+ -+ Introduction -+ -+ -+ utrace is infrastructure code for tracing -+ and controlling user threads. This is the foundation for writing -+ tracing engines, which can be loadable kernel modules. -+ -+ -+ -+ The basic actors in utrace are the thread -+ and the tracing engine. A tracing engine is some body of code that -+ calls into the <linux/utrace.h> -+ interfaces, represented by a struct -+ utrace_engine_ops. (Usually it's a kernel module, -+ though the legacy ptrace support is a tracing -+ engine that is not in a kernel module.) The interface operates on -+ individual threads (struct task_struct). -+ If an engine wants to treat several threads as a group, that is up -+ to its higher-level code. -+ -+ -+ -+ Tracing begins by attaching an engine to a thread, using -+ utrace_attach_task or -+ utrace_attach_pid. If successful, it returns a -+ pointer that is the handle used in all other calls. -+ -+ -+ -+ -+ Events and Callbacks -+ -+ -+ An attached engine does nothing by default. An engine makes something -+ happen by requesting callbacks via utrace_set_events -+ and poking the thread with utrace_control. -+ The synchronization issues related to these two calls -+ are discussed further below in . -+ -+ -+ -+ Events are specified using the macro -+ UTRACE_EVENT(type). -+ Each event type is associated with a callback in struct -+ utrace_engine_ops. A tracing engine can leave unused -+ callbacks NULL. The only callbacks required -+ are those used by the event flags it sets. -+ -+ -+ -+ Many engines can be attached to each thread. When a thread has an -+ event, each engine gets a callback if it has set the event flag for -+ that event type. For most events, engines are called in the order they -+ attached. Engines that attach after the event has occurred do not get -+ callbacks for that event. This includes any new engines just attached -+ by an existing engine's callback function. Once the sequence of -+ callbacks for that one event has completed, such new engines are then -+ eligible in the next sequence that starts when there is another event. -+ -+ -+ -+ Event reporting callbacks have details particular to the event type, -+ but are all called in similar environments and have the same -+ constraints. Callbacks are made from safe points, where no locks -+ are held, no special resources are pinned (usually), and the -+ user-mode state of the thread is accessible. So, callback code has -+ a pretty free hand. But to be a good citizen, callback code should -+ never block for long periods. It is fine to block in -+ kmalloc and the like, but never wait for i/o or -+ for user mode to do something. If you need the thread to wait, use -+ UTRACE_STOP and return from the callback -+ quickly. When your i/o finishes or whatever, you can use -+ utrace_control to resume the thread. -+ -+ -+ -+ The UTRACE_EVENT(SYSCALL_ENTRY) event is a special -+ case. While other events happen in the kernel when it will return to -+ user mode soon, this event happens when entering the kernel before it -+ will proceed with the work requested from user mode. Because of this -+ difference, the report_syscall_entry callback is -+ special in two ways. For this event, engines are called in reverse of -+ the normal order (this includes the report_quiesce -+ call that precedes a report_syscall_entry call). -+ This preserves the semantics that the last engine to attach is called -+ "closest to user mode"--the engine that is first to see a thread's user -+ state when it enters the kernel is also the last to see that state when -+ the thread returns to user mode. For the same reason, if these -+ callbacks use UTRACE_STOP (see the next section), -+ the thread stops immediately after callbacks rather than only when it's -+ ready to return to user mode; when allowed to resume, it will actually -+ attempt the system call indicated by the register values at that time. -+ -+ -+ -+ -+ Stopping Safely -+ -+ Writing well-behaved callbacks -+ -+ -+ Well-behaved callbacks are important to maintain two essential -+ properties of the interface. The first of these is that unrelated -+ tracing engines should not interfere with each other. If your engine's -+ event callback does not return quickly, then another engine won't get -+ the event notification in a timely manner. The second important -+ property is that tracing should be as noninvasive as possible to the -+ normal operation of the system overall and of the traced thread in -+ particular. That is, attached tracing engines should not perturb a -+ thread's behavior, except to the extent that changing its user-visible -+ state is explicitly what you want to do. (Obviously some perturbation -+ is unavoidable, primarily timing changes, ranging from small delays due -+ to the overhead of tracing, to arbitrary pauses in user code execution -+ when a user stops a thread with a debugger for examination.) Even when -+ you explicitly want the perturbation of making the traced thread block, -+ just blocking directly in your callback has more unwanted effects. For -+ example, the CLONE event callbacks are called when -+ the new child thread has been created but not yet started running; the -+ child can never be scheduled until the CLONE -+ tracing callbacks return. (This allows engines tracing the parent to -+ attach to the child.) If a CLONE event callback -+ blocks the parent thread, it also prevents the child thread from -+ running (even to process a SIGKILL). If what you -+ want is to make both the parent and child block, then use -+ utrace_attach_task on the child and then use -+ UTRACE_STOP on both threads. A more crucial -+ problem with blocking in callbacks is that it can prevent -+ SIGKILL from working. A thread that is blocking -+ due to UTRACE_STOP will still wake up and die -+ immediately when sent a SIGKILL, as all threads -+ should. Relying on the utrace -+ infrastructure rather than on private synchronization calls in event -+ callbacks is an important way to help keep tracing robustly -+ noninvasive. -+ -+ -+ -+ -+ Using <constant>UTRACE_STOP</constant> -+ -+ -+ To control another thread and access its state, it must be stopped -+ with UTRACE_STOP. This means that it is -+ stopped and won't start running again while we access it. When a -+ thread is not already stopped, utrace_control -+ returns -EINPROGRESS and an engine must wait -+ for an event callback when the thread is ready to stop. The thread -+ may be running on another CPU or may be blocked. When it is ready -+ to be examined, it will make callbacks to engines that set the -+ UTRACE_EVENT(QUIESCE) event bit. To wake up an -+ interruptible wait, use UTRACE_INTERRUPT. -+ -+ -+ -+ As long as some engine has used UTRACE_STOP and -+ not called utrace_control to resume the thread, -+ then the thread will remain stopped. SIGKILL -+ will wake it up, but it will not run user code. When the stop is -+ cleared with utrace_control or a callback -+ return value, the thread starts running again. -+ (See also .) -+ -+ -+ -+ -+ -+ -+ Tear-down Races -+ -+ Primacy of <constant>SIGKILL</constant> -+ -+ Ordinarily synchronization issues for tracing engines are kept fairly -+ straightforward by using UTRACE_STOP. You ask a -+ thread to stop, and then once it makes the -+ report_quiesce callback it cannot do anything else -+ that would result in another callback, until you let it with a -+ utrace_control call. This simple arrangement -+ avoids complex and error-prone code in each one of a tracing engine's -+ event callbacks to keep them serialized with the engine's other -+ operations done on that thread from another thread of control. -+ However, giving tracing engines complete power to keep a traced thread -+ stuck in place runs afoul of a more important kind of simplicity that -+ the kernel overall guarantees: nothing can prevent or delay -+ SIGKILL from making a thread die and release its -+ resources. To preserve this important property of -+ SIGKILL, it as a special case can break -+ UTRACE_STOP like nothing else normally can. This -+ includes both explicit SIGKILL signals and the -+ implicit SIGKILL sent to each other thread in the -+ same thread group by a thread doing an exec, or processing a fatal -+ signal, or making an exit_group system call. A -+ tracing engine can prevent a thread from beginning the exit or exec or -+ dying by signal (other than SIGKILL) if it is -+ attached to that thread, but once the operation begins, no tracing -+ engine can prevent or delay all other threads in the same thread group -+ dying. -+ -+ -+ -+ Final callbacks -+ -+ The report_reap callback is always the final event -+ in the life cycle of a traced thread. Tracing engines can use this as -+ the trigger to clean up their own data structures. The -+ report_death callback is always the penultimate -+ event a tracing engine might see; it's seen unless the thread was -+ already in the midst of dying when the engine attached. Many tracing -+ engines will have no interest in when a parent reaps a dead process, -+ and nothing they want to do with a zombie thread once it dies; for -+ them, the report_death callback is the natural -+ place to clean up data structures and detach. To facilitate writing -+ such engines robustly, given the asynchrony of -+ SIGKILL, and without error-prone manual -+ implementation of synchronization schemes, the -+ utrace infrastructure provides some special -+ guarantees about the report_death and -+ report_reap callbacks. It still takes some care -+ to be sure your tracing engine is robust to tear-down races, but these -+ rules make it reasonably straightforward and concise to handle a lot of -+ corner cases correctly. -+ -+ -+ -+ Engine and task pointers -+ -+ The first sort of guarantee concerns the core data structures -+ themselves. struct utrace_engine is -+ a reference-counted data structure. While you hold a reference, an -+ engine pointer will always stay valid so that you can safely pass it to -+ any utrace call. Each call to -+ utrace_attach_task or -+ utrace_attach_pid returns an engine pointer with a -+ reference belonging to the caller. You own that reference until you -+ drop it using utrace_engine_put. There is an -+ implicit reference on the engine while it is attached. So if you drop -+ your only reference, and then use -+ utrace_attach_task without -+ UTRACE_ATTACH_CREATE to look up that same engine, -+ you will get the same pointer with a new reference to replace the one -+ you dropped, just like calling utrace_engine_get. -+ When an engine has been detached, either explicitly with -+ UTRACE_DETACH or implicitly after -+ report_reap, then any references you hold are all -+ that keep the old engine pointer alive. -+ -+ -+ -+ There is nothing a kernel module can do to keep a struct -+ task_struct alive outside of -+ rcu_read_lock. When the task dies and is reaped -+ by its parent (or itself), that structure can be freed so that any -+ dangling pointers you have stored become invalid. -+ utrace will not prevent this, but it can -+ help you detect it safely. By definition, a task that has been reaped -+ has had all its engines detached. All -+ utrace calls can be safely called on a -+ detached engine if the caller holds a reference on that engine pointer, -+ even if the task pointer passed in the call is invalid. All calls -+ return -ESRCH for a detached engine, which tells -+ you that the task pointer you passed could be invalid now. Since -+ utrace_control and -+ utrace_set_events do not block, you can call those -+ inside a rcu_read_lock section and be sure after -+ they don't return -ESRCH that the task pointer is -+ still valid until rcu_read_unlock. The -+ infrastructure never holds task references of its own. Though neither -+ rcu_read_lock nor any other lock is held while -+ making a callback, it's always guaranteed that the struct -+ task_struct and the struct -+ utrace_engine passed as arguments remain valid -+ until the callback function returns. -+ -+ -+ -+ The common means for safely holding task pointers that is available to -+ kernel modules is to use struct pid, which -+ permits put_pid from kernel modules. When using -+ that, the calls utrace_attach_pid, -+ utrace_control_pid, -+ utrace_set_events_pid, and -+ utrace_barrier_pid are available. -+ -+ -+ -+ -+ -+ Serialization of <constant>DEATH</constant> and <constant>REAP</constant> -+ -+ -+ The second guarantee is the serialization of -+ DEATH and REAP event -+ callbacks for a given thread. The actual reaping by the parent -+ (release_task call) can occur simultaneously -+ while the thread is still doing the final steps of dying, including -+ the report_death callback. If a tracing engine -+ has requested both DEATH and -+ REAP event reports, it's guaranteed that the -+ report_reap callback will not be made until -+ after the report_death callback has returned. -+ If the report_death callback itself detaches -+ from the thread, then the report_reap callback -+ will never be made. Thus it is safe for a -+ report_death callback to clean up data -+ structures and detach. -+ -+ -+ -+ Interlock with final callbacks -+ -+ The final sort of guarantee is that a tracing engine will know for sure -+ whether or not the report_death and/or -+ report_reap callbacks will be made for a certain -+ thread. These tear-down races are disambiguated by the error return -+ values of utrace_set_events and -+ utrace_control. Normally -+ utrace_control called with -+ UTRACE_DETACH returns zero, and this means that no -+ more callbacks will be made. If the thread is in the midst of dying, -+ it returns -EALREADY to indicate that the -+ report_death callback may already be in progress; -+ when you get this error, you know that any cleanup your -+ report_death callback does is about to happen or -+ has just happened--note that if the report_death -+ callback does not detach, the engine remains attached until the thread -+ gets reaped. If the thread is in the midst of being reaped, -+ utrace_control returns -ESRCH -+ to indicate that the report_reap callback may -+ already be in progress; this means the engine is implicitly detached -+ when the callback completes. This makes it possible for a tracing -+ engine that has decided asynchronously to detach from a thread to -+ safely clean up its data structures, knowing that no -+ report_death or report_reap -+ callback will try to do the same. utrace_detach -+ returns -ESRCH when the struct -+ utrace_engine has already been detached, but is -+ still a valid pointer because of its reference count. A tracing engine -+ can use this to safely synchronize its own independent multiple threads -+ of control with each other and with its event callbacks that detach. -+ -+ -+ -+ In the same vein, utrace_set_events normally -+ returns zero; if the target thread was stopped before the call, then -+ after a successful call, no event callbacks not requested in the new -+ flags will be made. It fails with -EALREADY if -+ you try to clear UTRACE_EVENT(DEATH) when the -+ report_death callback may already have begun, if -+ you try to clear UTRACE_EVENT(REAP) when the -+ report_reap callback may already have begun, or if -+ you try to newly set UTRACE_EVENT(DEATH) or -+ UTRACE_EVENT(QUIESCE) when the target is already -+ dead or dying. Like utrace_control, it returns -+ -ESRCH when the thread has already been detached -+ (including forcible detach on reaping). This lets the tracing engine -+ know for sure which event callbacks it will or won't see after -+ utrace_set_events has returned. By checking for -+ errors, it can know whether to clean up its data structures immediately -+ or to let its callbacks do the work. -+ -+ -+ -+ Using <function>utrace_barrier</function> -+ -+ When a thread is safely stopped, calling -+ utrace_control with UTRACE_DETACH -+ or calling utrace_set_events to disable some events -+ ensures synchronously that your engine won't get any more of the callbacks -+ that have been disabled (none at all when detaching). But these can also -+ be used while the thread is not stopped, when it might be simultaneously -+ making a callback to your engine. For this situation, these calls return -+ -EINPROGRESS when it's possible a callback is in -+ progress. If you are not prepared to have your old callbacks still run, -+ then you can synchronize to be sure all the old callbacks are finished, -+ using utrace_barrier. This is necessary if the -+ kernel module containing your callback code is going to be unloaded. -+ -+ -+ After using UTRACE_DETACH once, further calls to -+ utrace_control with the same engine pointer will -+ return -ESRCH. In contrast, after getting -+ -EINPROGRESS from -+ utrace_set_events, you can call -+ utrace_set_events again later and if it returns zero -+ then know the old callbacks have finished. -+ -+ -+ Unlike all other calls, utrace_barrier (and -+ utrace_barrier_pid) will accept any engine pointer you -+ hold a reference on, even if UTRACE_DETACH has already -+ been used. After any utrace_control or -+ utrace_set_events call (these do not block), you can -+ call utrace_barrier to block until callbacks have -+ finished. This returns -ESRCH only if the engine is -+ completely detached (finished all callbacks). Otherwise it waits -+ until the thread is definitely not in the midst of a callback to this -+ engine and then returns zero, but can return -+ -ERESTARTSYS if its wait is interrupted. -+ -+ -+ -+ -+ -+ -+ -+utrace core API -+ -+ -+ The utrace API is declared in <linux/utrace.h>. -+ -+ -+!Iinclude/linux/utrace.h -+!Ekernel/utrace.c -+ -+ -+ -+Machine State -+ -+ -+ The task_current_syscall function can be used on any -+ valid struct task_struct at any time, and does -+ not even require that utrace_attach_task was used at all. -+ -+ -+ -+ The other ways to access the registers and other machine-dependent state of -+ a task can only be used on a task that is at a known safe point. The safe -+ points are all the places where utrace_set_events can -+ request callbacks (except for the DEATH and -+ REAP events). So at any event callback, it is safe to -+ examine current. -+ -+ -+ -+ One task can examine another only after a callback in the target task that -+ returns UTRACE_STOP so that task will not return to user -+ mode after the safe point. This guarantees that the task will not resume -+ until the same engine uses utrace_control, unless the -+ task dies suddenly. To examine safely, one must use a pair of calls to -+ utrace_prepare_examine and -+ utrace_finish_examine surrounding the calls to -+ struct user_regset functions or direct examination -+ of task data structures. utrace_prepare_examine returns -+ an error if the task is not properly stopped and not dead. After a -+ successful examination, the paired utrace_finish_examine -+ call returns an error if the task ever woke up during the examination. If -+ so, any data gathered may be scrambled and should be discarded. This means -+ there was a spurious wake-up (which should not happen), or a sudden death. -+ -+ -+<structname>struct user_regset</structname> -+ -+ -+ The struct user_regset API -+ is declared in <linux/regset.h>. -+ -+ -+!Finclude/linux/regset.h -+ -+ -+ -+ -+ <filename>System Call Information</filename> -+ -+ -+ This function is declared in <linux/ptrace.h>. -+ -+ -+!Elib/syscall.c -+ -+ -+ -+<filename>System Call Tracing</filename> -+ -+ -+ The arch API for system call information is declared in -+ <asm/syscall.h>. -+ Each of these calls can be used only at system call entry tracing, -+ or can be used only at system call exit and the subsequent safe points -+ before returning to user mode. -+ At system call entry tracing means either during a -+ report_syscall_entry callback, -+ or any time after that callback has returned UTRACE_STOP. -+ -+ -+!Finclude/asm-generic/syscall.h -+ -+ -+ -+ -+ -+Kernel Internals -+ -+ -+ This chapter covers the interface to the tracing infrastructure -+ from the core of the kernel and the architecture-specific code. -+ This is for maintainers of the kernel and arch code, and not relevant -+ to using the tracing facilities described in preceding chapters. -+ -+ -+Core Calls In -+ -+ -+ These calls are declared in <linux/tracehook.h>. -+ The core kernel calls these functions at various important places. -+ -+ -+!Finclude/linux/tracehook.h -+ -+ -+ -+Architecture Calls Out -+ -+ -+ An arch that has done all these things sets -+ CONFIG_HAVE_ARCH_TRACEHOOK. -+ This is required to enable the utrace code. -+ -+ -+<filename><asm/ptrace.h></filename> -+ -+ -+ An arch defines these in <asm/ptrace.h> -+ if it supports hardware single-step or block-step features. -+ -+ -+!Finclude/linux/ptrace.h arch_has_single_step arch_has_block_step -+!Finclude/linux/ptrace.h user_enable_single_step user_enable_block_step -+!Finclude/linux/ptrace.h user_disable_single_step -+ -+ -+ -+ -+ <filename><asm/syscall.h></filename> -+ -+ -+ An arch provides <asm/syscall.h> that -+ defines these as inlines, or declares them as exported functions. -+ These interfaces are described in . -+ -+ -+ -+ -+ -+ <filename><linux/tracehook.h></filename> -+ -+ -+ An arch must define TIF_NOTIFY_RESUME -+ and TIF_SYSCALL_TRACE -+ in its <asm/thread_info.h>. -+ The arch code must call the following functions, all declared -+ in <linux/tracehook.h> and -+ described in : -+ -+ -+ -+ tracehook_notify_resume -+ -+ -+ tracehook_report_syscall_entry -+ -+ -+ tracehook_report_syscall_exit -+ -+ -+ tracehook_signal_handler -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ ---- a/fs/proc/array.c -+++ b/fs/proc/array.c -@@ -81,6 +81,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -192,6 +193,8 @@ static inline void task_state(struct seq - cred->uid, cred->euid, cred->suid, cred->fsuid, - cred->gid, cred->egid, cred->sgid, cred->fsgid); - -+ task_utrace_proc_status(m, p); -+ - task_lock(p); - if (p->files) - fdt = files_fdtable(p->files); ---- a/include/linux/init_task.h -+++ b/include/linux/init_task.h -@@ -165,6 +165,7 @@ extern struct cred init_cred; - }, \ - .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ - .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ -+ INIT_UTRACE(tsk) \ - INIT_IDS \ - INIT_PERF_EVENTS(tsk) \ - INIT_TRACE_IRQFLAGS \ ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -61,6 +61,7 @@ struct sched_param { - #include - #include - #include -+#include - - #include - #include -@@ -1339,6 +1340,11 @@ struct task_struct { - #endif - seccomp_t seccomp; - -+#ifdef CONFIG_UTRACE -+ struct utrace utrace; -+ unsigned long utrace_flags; -+#endif -+ - /* Thread group tracking */ - u32 parent_exec_id; - u32 self_exec_id; ---- a/include/linux/tracehook.h -+++ b/include/linux/tracehook.h -@@ -49,6 +49,7 @@ - #include - #include - #include -+#include - struct linux_binprm; - - /** -@@ -63,6 +64,8 @@ struct linux_binprm; - */ - static inline int tracehook_expect_breakpoints(struct task_struct *task) - { -+ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_CORE))) -+ return 1; - return (task_ptrace(task) & PT_PTRACED) != 0; - } - -@@ -111,6 +114,9 @@ static inline void ptrace_report_syscall - static inline __must_check int tracehook_report_syscall_entry( - struct pt_regs *regs) - { -+ if ((task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_ENTRY)) && -+ utrace_report_syscall_entry(regs)) -+ return 1; - ptrace_report_syscall(regs); - return 0; - } -@@ -141,6 +147,8 @@ static inline void tracehook_report_sysc - return; - } - -+ if (task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_EXIT)) -+ utrace_report_syscall_exit(regs); - ptrace_report_syscall(regs); - } - -@@ -201,6 +209,8 @@ static inline void tracehook_report_exec - struct linux_binprm *bprm, - struct pt_regs *regs) - { -+ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXEC))) -+ utrace_report_exec(fmt, bprm, regs); - if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && - unlikely(task_ptrace(current) & PT_PTRACED)) - send_sig(SIGTRAP, current, 0); -@@ -218,6 +228,8 @@ static inline void tracehook_report_exec - */ - static inline void tracehook_report_exit(long *exit_code) - { -+ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXIT))) -+ utrace_report_exit(exit_code); - ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); - } - -@@ -261,6 +273,7 @@ static inline int tracehook_prepare_clon - static inline void tracehook_finish_clone(struct task_struct *child, - unsigned long clone_flags, int trace) - { -+ utrace_init_task(child); - ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); - } - -@@ -285,6 +298,8 @@ static inline void tracehook_report_clon - unsigned long clone_flags, - pid_t pid, struct task_struct *child) - { -+ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE))) -+ utrace_report_clone(clone_flags, child); - if (unlikely(task_ptrace(child))) { - /* - * It doesn't matter who attached/attaching to this -@@ -317,6 +332,9 @@ static inline void tracehook_report_clon - pid_t pid, - struct task_struct *child) - { -+ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)) && -+ (clone_flags & CLONE_VFORK)) -+ utrace_finish_vfork(current); - if (unlikely(trace)) - ptrace_event(0, trace, pid); - } -@@ -351,6 +369,10 @@ static inline void tracehook_report_vfor - */ - static inline void tracehook_prepare_release_task(struct task_struct *task) - { -+ /* see utrace_add_engine() about this barrier */ -+ smp_mb(); -+ if (task_utrace_flags(task)) -+ utrace_release_task(task); - } - - /** -@@ -365,6 +387,7 @@ static inline void tracehook_prepare_rel - static inline void tracehook_finish_release_task(struct task_struct *task) - { - ptrace_release_task(task); -+ BUG_ON(task->exit_state != EXIT_DEAD); - } - - /** -@@ -386,6 +409,8 @@ static inline void tracehook_signal_hand - const struct k_sigaction *ka, - struct pt_regs *regs, int stepping) - { -+ if (task_utrace_flags(current)) -+ utrace_signal_handler(current, stepping); - if (stepping) - ptrace_notify(SIGTRAP); - } -@@ -403,6 +428,8 @@ static inline void tracehook_signal_hand - static inline int tracehook_consider_ignored_signal(struct task_struct *task, - int sig) - { -+ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_IGN))) -+ return 1; - return (task_ptrace(task) & PT_PTRACED) != 0; - } - -@@ -422,6 +449,9 @@ static inline int tracehook_consider_ign - static inline int tracehook_consider_fatal_signal(struct task_struct *task, - int sig) - { -+ if (unlikely(task_utrace_flags(task) & (UTRACE_EVENT(SIGNAL_TERM) | -+ UTRACE_EVENT(SIGNAL_CORE)))) -+ return 1; - return (task_ptrace(task) & PT_PTRACED) != 0; - } - -@@ -436,6 +466,8 @@ static inline int tracehook_consider_fat - */ - static inline int tracehook_force_sigpending(void) - { -+ if (unlikely(task_utrace_flags(current))) -+ return utrace_interrupt_pending(); - return 0; - } - -@@ -465,6 +497,8 @@ static inline int tracehook_get_signal(s - siginfo_t *info, - struct k_sigaction *return_ka) - { -+ if (unlikely(task_utrace_flags(task))) -+ return utrace_get_signal(task, regs, info, return_ka); - return 0; - } - -@@ -492,6 +526,8 @@ static inline int tracehook_get_signal(s - */ - static inline int tracehook_notify_jctl(int notify, int why) - { -+ if (task_utrace_flags(current) & UTRACE_EVENT(JCTL)) -+ utrace_report_jctl(notify, why); - return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; - } - -@@ -502,6 +538,8 @@ static inline int tracehook_notify_jctl( - */ - static inline void tracehook_finish_jctl(void) - { -+ if (task_utrace_flags(current)) -+ utrace_finish_jctl(); - } - - #define DEATH_REAP -1 -@@ -524,6 +562,8 @@ static inline void tracehook_finish_jctl - static inline int tracehook_notify_death(struct task_struct *task, - void **death_cookie, int group_dead) - { -+ *death_cookie = task_utrace_struct(task); -+ - if (task_detached(task)) - return task->ptrace ? SIGCHLD : DEATH_REAP; - -@@ -560,6 +600,20 @@ static inline void tracehook_report_deat - int signal, void *death_cookie, - int group_dead) - { -+ /* -+ * This barrier ensures that our caller's setting of -+ * @task->exit_state precedes checking @task->utrace_flags here. -+ * If utrace_set_events() was just called to enable -+ * UTRACE_EVENT(DEATH), then we are obliged to call -+ * utrace_report_death() and not miss it. utrace_set_events() -+ * uses tasklist_lock to synchronize enabling the bit with the -+ * actual change to @task->exit_state, but we need this barrier -+ * to be sure we see a flags change made just before our caller -+ * took the tasklist_lock. -+ */ -+ smp_mb(); -+ if (task_utrace_flags(task) & _UTRACE_DEATH_EVENTS) -+ utrace_report_death(task, death_cookie, group_dead, signal); - } - - #ifdef TIF_NOTIFY_RESUME -@@ -589,10 +643,20 @@ static inline void set_notify_resume(str - * asynchronously, this will be called again before we return to - * user mode. - * -- * Called without locks. -+ * Called without locks. However, on some machines this may be -+ * called with interrupts disabled. - */ - static inline void tracehook_notify_resume(struct pt_regs *regs) - { -+ struct task_struct *task = current; -+ /* -+ * This pairs with the barrier implicit in set_notify_resume(). -+ * It ensures that we read the nonzero utrace_flags set before -+ * set_notify_resume() was called by utrace setup. -+ */ -+ smp_rmb(); -+ if (task_utrace_flags(task)) -+ utrace_resume(task, regs); - } - #endif /* TIF_NOTIFY_RESUME */ - ---- /dev/null -+++ b/include/linux/utrace.h -@@ -0,0 +1,702 @@ -+/* -+ * utrace infrastructure interface for debugging user processes -+ * -+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. -+ * -+ * This copyrighted material is made available to anyone wishing to use, -+ * modify, copy, or redistribute it subject to the terms and conditions -+ * of the GNU General Public License v.2. -+ * -+ * Red Hat Author: Roland McGrath. -+ * -+ * This interface allows for notification of interesting events in a -+ * thread. It also mediates access to thread state such as registers. -+ * Multiple unrelated users can be associated with a single thread. -+ * We call each of these a tracing engine. -+ * -+ * A tracing engine starts by calling utrace_attach_task() or -+ * utrace_attach_pid() on the chosen thread, passing in a set of hooks -+ * (&struct utrace_engine_ops), and some associated data. This produces a -+ * &struct utrace_engine, which is the handle used for all other -+ * operations. An attached engine has its ops vector, its data, and an -+ * event mask controlled by utrace_set_events(). -+ * -+ * For each event bit that is set, that engine will get the -+ * appropriate ops->report_*() callback when the event occurs. The -+ * &struct utrace_engine_ops need not provide callbacks for an event -+ * unless the engine sets one of the associated event bits. -+ */ -+ -+#ifndef _LINUX_UTRACE_H -+#define _LINUX_UTRACE_H 1 -+ -+#include -+#include -+#include -+#include -+ -+struct linux_binprm; -+struct pt_regs; -+struct utrace; -+struct user_regset; -+struct user_regset_view; -+ -+/* -+ * Event bits passed to utrace_set_events(). -+ * These appear in &struct task_struct.@utrace_flags -+ * and &struct utrace_engine.@flags. -+ */ -+enum utrace_events { -+ _UTRACE_EVENT_QUIESCE, /* Thread is available for examination. */ -+ _UTRACE_EVENT_REAP, /* Zombie reaped, no more tracing possible. */ -+ _UTRACE_EVENT_CLONE, /* Successful clone/fork/vfork just done. */ -+ _UTRACE_EVENT_EXEC, /* Successful execve just completed. */ -+ _UTRACE_EVENT_EXIT, /* Thread exit in progress. */ -+ _UTRACE_EVENT_DEATH, /* Thread has died. */ -+ _UTRACE_EVENT_SYSCALL_ENTRY, /* User entered kernel for system call. */ -+ _UTRACE_EVENT_SYSCALL_EXIT, /* Returning to user after system call. */ -+ _UTRACE_EVENT_SIGNAL, /* Signal delivery will run a user handler. */ -+ _UTRACE_EVENT_SIGNAL_IGN, /* No-op signal to be delivered. */ -+ _UTRACE_EVENT_SIGNAL_STOP, /* Signal delivery will suspend. */ -+ _UTRACE_EVENT_SIGNAL_TERM, /* Signal delivery will terminate. */ -+ _UTRACE_EVENT_SIGNAL_CORE, /* Signal delivery will dump core. */ -+ _UTRACE_EVENT_JCTL, /* Job control stop or continue completed. */ -+ _UTRACE_NEVENTS -+}; -+#define UTRACE_EVENT(type) (1UL << _UTRACE_EVENT_##type) -+ -+/* -+ * All the kinds of signal events. -+ * These all use the @report_signal() callback. -+ */ -+#define UTRACE_EVENT_SIGNAL_ALL (UTRACE_EVENT(SIGNAL) \ -+ | UTRACE_EVENT(SIGNAL_IGN) \ -+ | UTRACE_EVENT(SIGNAL_STOP) \ -+ | UTRACE_EVENT(SIGNAL_TERM) \ -+ | UTRACE_EVENT(SIGNAL_CORE)) -+/* -+ * Both kinds of syscall events; these call the @report_syscall_entry() -+ * and @report_syscall_exit() callbacks, respectively. -+ */ -+#define UTRACE_EVENT_SYSCALL \ -+ (UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT)) -+ -+/* -+ * The event reports triggered synchronously by task death. -+ */ -+#define _UTRACE_DEATH_EVENTS (UTRACE_EVENT(DEATH) | UTRACE_EVENT(QUIESCE)) -+ -+/* -+ * Hooks in call these entry points to the -+ * utrace dispatch. They are weak references here only so -+ * tracehook.h doesn't need to #ifndef CONFIG_UTRACE them to -+ * avoid external references in case of unoptimized compilation. -+ */ -+bool utrace_interrupt_pending(void) -+ __attribute__((weak)); -+void utrace_resume(struct task_struct *, struct pt_regs *) -+ __attribute__((weak)); -+int utrace_get_signal(struct task_struct *, struct pt_regs *, -+ siginfo_t *, struct k_sigaction *) -+ __attribute__((weak)); -+void utrace_report_clone(unsigned long, struct task_struct *) -+ __attribute__((weak)); -+void utrace_finish_vfork(struct task_struct *) -+ __attribute__((weak)); -+void utrace_report_exit(long *exit_code) -+ __attribute__((weak)); -+void utrace_report_death(struct task_struct *, struct utrace *, bool, int) -+ __attribute__((weak)); -+void utrace_report_jctl(int notify, int type) -+ __attribute__((weak)); -+void utrace_finish_jctl(void) -+ __attribute__((weak)); -+void utrace_report_exec(struct linux_binfmt *, struct linux_binprm *, -+ struct pt_regs *regs) -+ __attribute__((weak)); -+bool utrace_report_syscall_entry(struct pt_regs *) -+ __attribute__((weak)); -+void utrace_report_syscall_exit(struct pt_regs *) -+ __attribute__((weak)); -+void utrace_signal_handler(struct task_struct *, int) -+ __attribute__((weak)); -+ -+#ifndef CONFIG_UTRACE -+ -+/* -+ * uses these accessors to avoid #ifdef CONFIG_UTRACE. -+ */ -+static inline unsigned long task_utrace_flags(struct task_struct *task) -+{ -+ return 0; -+} -+static inline struct utrace *task_utrace_struct(struct task_struct *task) -+{ -+ return NULL; -+} -+static inline void utrace_init_task(struct task_struct *child) -+{ -+} -+static inline void utrace_release_task(struct task_struct *task) -+{ -+} -+ -+static inline void task_utrace_proc_status(struct seq_file *m, -+ struct task_struct *p) -+{ -+} -+ -+#else /* CONFIG_UTRACE */ -+ -+static inline unsigned long task_utrace_flags(struct task_struct *task) -+{ -+ return task->utrace_flags; -+} -+ -+static inline struct utrace *task_utrace_struct(struct task_struct *task) -+{ -+ return &task->utrace; -+} -+ -+static inline void utrace_init_task(struct task_struct *task) -+{ -+ task->utrace_flags = 0; -+ memset(&task->utrace, 0, sizeof(task->utrace)); -+ INIT_LIST_HEAD(&task->utrace.attached); -+ INIT_LIST_HEAD(&task->utrace.attaching); -+ spin_lock_init(&task->utrace.lock); -+} -+ -+void utrace_release_task(struct task_struct *); -+void task_utrace_proc_status(struct seq_file *m, struct task_struct *p); -+ -+ -+/* -+ * Version number of the API defined in this file. This will change -+ * whenever a tracing engine's code would need some updates to keep -+ * working. We maintain this here for the benefit of tracing engine code -+ * that is developed concurrently with utrace API improvements before they -+ * are merged into the kernel, making LINUX_VERSION_CODE checks unwieldy. -+ */ -+#define UTRACE_API_VERSION 20090416 -+ -+/** -+ * enum utrace_resume_action - engine's choice of action for a traced task -+ * @UTRACE_STOP: Stay quiescent after callbacks. -+ * @UTRACE_INTERRUPT: Make @report_signal() callback soon. -+ * @UTRACE_REPORT: Make some callback soon. -+ * @UTRACE_SINGLESTEP: Resume in user mode for one instruction. -+ * @UTRACE_BLOCKSTEP: Resume in user mode until next branch. -+ * @UTRACE_RESUME: Resume normally in user mode. -+ * @UTRACE_DETACH: Detach my engine (implies %UTRACE_RESUME). -+ * -+ * See utrace_control() for detailed descriptions of each action. This is -+ * encoded in the @action argument and the return value for every callback -+ * with a &u32 return value. -+ * -+ * The order of these is important. When there is more than one engine, -+ * each supplies its choice and the smallest value prevails. -+ */ -+enum utrace_resume_action { -+ UTRACE_STOP, -+ UTRACE_INTERRUPT, -+ UTRACE_REPORT, -+ UTRACE_SINGLESTEP, -+ UTRACE_BLOCKSTEP, -+ UTRACE_RESUME, -+ UTRACE_DETACH -+}; -+#define UTRACE_RESUME_MASK 0x0f -+ -+/** -+ * utrace_resume_action - &enum utrace_resume_action from callback action -+ * @action: &u32 callback @action argument or return value -+ * -+ * This extracts the &enum utrace_resume_action from @action, -+ * which is the @action argument to a &struct utrace_engine_ops -+ * callback or the return value from one. -+ */ -+static inline enum utrace_resume_action utrace_resume_action(u32 action) -+{ -+ return action & UTRACE_RESUME_MASK; -+} -+ -+/** -+ * enum utrace_signal_action - disposition of signal -+ * @UTRACE_SIGNAL_DELIVER: Deliver according to sigaction. -+ * @UTRACE_SIGNAL_IGN: Ignore the signal. -+ * @UTRACE_SIGNAL_TERM: Terminate the process. -+ * @UTRACE_SIGNAL_CORE: Terminate with core dump. -+ * @UTRACE_SIGNAL_STOP: Deliver as absolute stop. -+ * @UTRACE_SIGNAL_TSTP: Deliver as job control stop. -+ * @UTRACE_SIGNAL_REPORT: Reporting before pending signals. -+ * @UTRACE_SIGNAL_HANDLER: Reporting after signal handler setup. -+ * -+ * This is encoded in the @action argument and the return value for -+ * a @report_signal() callback. It says what will happen to the -+ * signal described by the &siginfo_t parameter to the callback. -+ * -+ * The %UTRACE_SIGNAL_REPORT value is used in an @action argument when -+ * a tracing report is being made before dequeuing any pending signal. -+ * If this is immediately after a signal handler has been set up, then -+ * %UTRACE_SIGNAL_HANDLER is used instead. A @report_signal callback -+ * that uses %UTRACE_SIGNAL_DELIVER|%UTRACE_SINGLESTEP will ensure -+ * it sees a %UTRACE_SIGNAL_HANDLER report. -+ */ -+enum utrace_signal_action { -+ UTRACE_SIGNAL_DELIVER = 0x00, -+ UTRACE_SIGNAL_IGN = 0x10, -+ UTRACE_SIGNAL_TERM = 0x20, -+ UTRACE_SIGNAL_CORE = 0x30, -+ UTRACE_SIGNAL_STOP = 0x40, -+ UTRACE_SIGNAL_TSTP = 0x50, -+ UTRACE_SIGNAL_REPORT = 0x60, -+ UTRACE_SIGNAL_HANDLER = 0x70 -+}; -+#define UTRACE_SIGNAL_MASK 0xf0 -+#define UTRACE_SIGNAL_HOLD 0x100 /* Flag, push signal back on queue. */ -+ -+/** -+ * utrace_signal_action - &enum utrace_signal_action from callback action -+ * @action: @report_signal callback @action argument or return value -+ * -+ * This extracts the &enum utrace_signal_action from @action, which -+ * is the @action argument to a @report_signal callback or the -+ * return value from one. -+ */ -+static inline enum utrace_signal_action utrace_signal_action(u32 action) -+{ -+ return action & UTRACE_SIGNAL_MASK; -+} -+ -+/** -+ * enum utrace_syscall_action - disposition of system call attempt -+ * @UTRACE_SYSCALL_RUN: Run the system call. -+ * @UTRACE_SYSCALL_ABORT: Don't run the system call. -+ * -+ * This is encoded in the @action argument and the return value for -+ * a @report_syscall_entry callback. -+ */ -+enum utrace_syscall_action { -+ UTRACE_SYSCALL_RUN = 0x00, -+ UTRACE_SYSCALL_ABORT = 0x10 -+}; -+#define UTRACE_SYSCALL_MASK 0xf0 -+ -+/** -+ * utrace_syscall_action - &enum utrace_syscall_action from callback action -+ * @action: @report_syscall_entry callback @action or return value -+ * -+ * This extracts the &enum utrace_syscall_action from @action, which -+ * is the @action argument to a @report_syscall_entry callback or the -+ * return value from one. -+ */ -+static inline enum utrace_syscall_action utrace_syscall_action(u32 action) -+{ -+ return action & UTRACE_SYSCALL_MASK; -+} -+ -+/* -+ * Flags for utrace_attach_task() and utrace_attach_pid(). -+ */ -+#define UTRACE_ATTACH_CREATE 0x0010 /* Attach a new engine. */ -+#define UTRACE_ATTACH_EXCLUSIVE 0x0020 /* Refuse if existing match. */ -+#define UTRACE_ATTACH_MATCH_OPS 0x0001 /* Match engines on ops. */ -+#define UTRACE_ATTACH_MATCH_DATA 0x0002 /* Match engines on data. */ -+#define UTRACE_ATTACH_MATCH_MASK 0x000f -+ -+/** -+ * struct utrace_engine - per-engine structure -+ * @ops: &struct utrace_engine_ops pointer passed to utrace_attach_task() -+ * @data: engine-private &void * passed to utrace_attach_task() -+ * @flags: event mask set by utrace_set_events() plus internal flag bits -+ * -+ * The task itself never has to worry about engines detaching while -+ * it's doing event callbacks. These structures are removed from the -+ * task's active list only when it's stopped, or by the task itself. -+ * -+ * utrace_engine_get() and utrace_engine_put() maintain a reference count. -+ * When it drops to zero, the structure is freed. One reference is held -+ * implicitly while the engine is attached to its task. -+ */ -+struct utrace_engine { -+/* private: */ -+ struct kref kref; -+ void (*release)(void *); -+ struct list_head entry; -+ -+/* public: */ -+ const struct utrace_engine_ops *ops; -+ void *data; -+ -+ unsigned long flags; -+}; -+ -+/** -+ * utrace_engine_get - acquire a reference on a &struct utrace_engine -+ * @engine: &struct utrace_engine pointer -+ * -+ * You must hold a reference on @engine, and you get another. -+ */ -+static inline void utrace_engine_get(struct utrace_engine *engine) -+{ -+ kref_get(&engine->kref); -+} -+ -+void __utrace_engine_release(struct kref *); -+ -+/** -+ * utrace_engine_put - release a reference on a &struct utrace_engine -+ * @engine: &struct utrace_engine pointer -+ * -+ * You must hold a reference on @engine, and you lose that reference. -+ * If it was the last one, @engine becomes an invalid pointer. -+ */ -+static inline void utrace_engine_put(struct utrace_engine *engine) -+{ -+ kref_put(&engine->kref, __utrace_engine_release); -+} -+ -+/** -+ * struct utrace_engine_ops - tracing engine callbacks -+ * -+ * Each @report_*() callback corresponds to an %UTRACE_EVENT(*) bit. -+ * utrace_set_events() calls on @engine choose which callbacks will be made -+ * to @engine from @task. -+ * -+ * Most callbacks take an @action argument, giving the resume action -+ * chosen by other tracing engines. All callbacks take an @engine -+ * argument, and a @task argument, which is always equal to @current. -+ * For some calls, @action also includes bits specific to that event -+ * and utrace_resume_action() is used to extract the resume action. -+ * This shows what would happen if @engine wasn't there, or will if -+ * the callback's return value uses %UTRACE_RESUME. This always -+ * starts as %UTRACE_RESUME when no other tracing is being done on -+ * this task. -+ * -+ * All return values contain &enum utrace_resume_action bits. For -+ * some calls, other bits specific to that kind of event are added to -+ * the resume action bits with OR. These are the same bits used in -+ * the @action argument. The resume action returned by a callback -+ * does not override previous engines' choices, it only says what -+ * @engine wants done. What @task actually does is the action that's -+ * most constrained among the choices made by all attached engines. -+ * See utrace_control() for more information on the actions. -+ * -+ * When %UTRACE_STOP is used in @report_syscall_entry, then @task -+ * stops before attempting the system call. In other cases, the -+ * resume action does not take effect until @task is ready to check -+ * for signals and return to user mode. If there are more callbacks -+ * to be made, the last round of calls determines the final action. -+ * A @report_quiesce callback with @event zero, or a @report_signal -+ * callback, will always be the last one made before @task resumes. -+ * Only %UTRACE_STOP is "sticky"--if @engine returned %UTRACE_STOP -+ * then @task stays stopped unless @engine returns different from a -+ * following callback. -+ * -+ * The report_death() and report_reap() callbacks do not take @action -+ * arguments, and only %UTRACE_DETACH is meaningful in the return value -+ * from a report_death() callback. None of the resume actions applies -+ * to a dead thread. -+ * -+ * All @report_*() hooks are called with no locks held, in a generally -+ * safe environment when we will be returning to user mode soon (or just -+ * entered the kernel). It is fine to block for memory allocation and -+ * the like, but all hooks are asynchronous and must not block on -+ * external events! If you want the thread to block, use %UTRACE_STOP -+ * in your hook's return value; then later wake it up with utrace_control(). -+ * -+ * @report_quiesce: -+ * Requested by %UTRACE_EVENT(%QUIESCE). -+ * This does not indicate any event, but just that @task (the current -+ * thread) is in a safe place for examination. This call is made -+ * before each specific event callback, except for @report_reap. -+ * The @event argument gives the %UTRACE_EVENT(@which) value for -+ * the event occurring. This callback might be made for events @engine -+ * has not requested, if some other engine is tracing the event; -+ * calling utrace_set_events() call here can request the immediate -+ * callback for this occurrence of @event. @event is zero when there -+ * is no other event, @task is now ready to check for signals and -+ * return to user mode, and some engine has used %UTRACE_REPORT or -+ * %UTRACE_INTERRUPT to request this callback. For this case, -+ * if @report_signal is not %NULL, the @report_quiesce callback -+ * may be replaced with a @report_signal callback passing -+ * %UTRACE_SIGNAL_REPORT in its @action argument, whenever @task is -+ * entering the signal-check path anyway. -+ * -+ * @report_signal: -+ * Requested by %UTRACE_EVENT(%SIGNAL_*) or %UTRACE_EVENT(%QUIESCE). -+ * Use utrace_signal_action() and utrace_resume_action() on @action. -+ * The signal action is %UTRACE_SIGNAL_REPORT when some engine has -+ * used %UTRACE_REPORT or %UTRACE_INTERRUPT; the callback can choose -+ * to stop or to deliver an artificial signal, before pending signals. -+ * It's %UTRACE_SIGNAL_HANDLER instead when signal handler setup just -+ * finished (after a previous %UTRACE_SIGNAL_DELIVER return); this -+ * serves in lieu of any %UTRACE_SIGNAL_REPORT callback requested by -+ * %UTRACE_REPORT or %UTRACE_INTERRUPT, and is also implicitly -+ * requested by %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP into the -+ * signal delivery. The other signal actions indicate a signal about -+ * to be delivered; the previous engine's return value sets the signal -+ * action seen by the the following engine's callback. The @info data -+ * can be changed at will, including @info->si_signo. The settings in -+ * @return_ka determines what %UTRACE_SIGNAL_DELIVER does. @orig_ka -+ * is what was in force before other tracing engines intervened, and -+ * it's %NULL when this report began as %UTRACE_SIGNAL_REPORT or -+ * %UTRACE_SIGNAL_HANDLER. For a report without a new signal, @info -+ * is left uninitialized and must be set completely by an engine that -+ * chooses to deliver a signal; if there was a previous @report_signal -+ * callback ending in %UTRACE_STOP and it was just resumed using -+ * %UTRACE_REPORT or %UTRACE_INTERRUPT, then @info is left unchanged -+ * from the previous callback. In this way, the original signal can -+ * be left in @info while returning %UTRACE_STOP|%UTRACE_SIGNAL_IGN -+ * and then found again when resuming @task with %UTRACE_INTERRUPT. -+ * The %UTRACE_SIGNAL_HOLD flag bit can be OR'd into the return value, -+ * and might be in @action if the previous engine returned it. This -+ * flag asks that the signal in @info be pushed back on @task's queue -+ * so that it will be seen again after whatever action is taken now. -+ * -+ * @report_clone: -+ * Requested by %UTRACE_EVENT(%CLONE). -+ * Event reported for parent, before the new task @child might run. -+ * @clone_flags gives the flags used in the clone system call, -+ * or equivalent flags for a fork() or vfork() system call. -+ * This function can use utrace_attach_task() on @child. It's guaranteed -+ * that asynchronous utrace_attach_task() calls will be ordered after -+ * any calls in @report_clone callbacks for the parent. Thus -+ * when using %UTRACE_ATTACH_EXCLUSIVE in the asynchronous calls, -+ * you can be sure that the parent's @report_clone callback has -+ * already attached to @child or chosen not to. Passing %UTRACE_STOP -+ * to utrace_control() on @child here keeps the child stopped before -+ * it ever runs in user mode, %UTRACE_REPORT or %UTRACE_INTERRUPT -+ * ensures a callback from @child before it starts in user mode. -+ * -+ * @report_jctl: -+ * Requested by %UTRACE_EVENT(%JCTL). -+ * Job control event; @type is %CLD_STOPPED or %CLD_CONTINUED, -+ * indicating whether we are stopping or resuming now. If @notify -+ * is nonzero, @task is the last thread to stop and so will send -+ * %SIGCHLD to its parent after this callback; @notify reflects -+ * what the parent's %SIGCHLD has in @si_code, which can sometimes -+ * be %CLD_STOPPED even when @type is %CLD_CONTINUED. -+ * -+ * @report_exec: -+ * Requested by %UTRACE_EVENT(%EXEC). -+ * An execve system call has succeeded and the new program is about to -+ * start running. The initial user register state is handy to be tweaked -+ * directly in @regs. @fmt and @bprm gives the details of this exec. -+ * -+ * @report_syscall_entry: -+ * Requested by %UTRACE_EVENT(%SYSCALL_ENTRY). -+ * Thread has entered the kernel to request a system call. -+ * The user register state is handy to be tweaked directly in @regs. -+ * The @action argument contains an &enum utrace_syscall_action, -+ * use utrace_syscall_action() to extract it. The return value -+ * overrides the last engine's action for the system call. -+ * If the final action is %UTRACE_SYSCALL_ABORT, no system call -+ * is made. The details of the system call being attempted can -+ * be fetched here with syscall_get_nr() and syscall_get_arguments(). -+ * The parameter registers can be changed with syscall_set_arguments(). -+ * -+ * @report_syscall_exit: -+ * Requested by %UTRACE_EVENT(%SYSCALL_EXIT). -+ * Thread is about to leave the kernel after a system call request. -+ * The user register state is handy to be tweaked directly in @regs. -+ * The results of the system call attempt can be examined here using -+ * syscall_get_error() and syscall_get_return_value(). It is safe -+ * here to call syscall_set_return_value() or syscall_rollback(). -+ * -+ * @report_exit: -+ * Requested by %UTRACE_EVENT(%EXIT). -+ * Thread is exiting and cannot be prevented from doing so, -+ * but all its state is still live. The @code value will be -+ * the wait result seen by the parent, and can be changed by -+ * this engine or others. The @orig_code value is the real -+ * status, not changed by any tracing engine. Returning %UTRACE_STOP -+ * here keeps @task stopped before it cleans up its state and dies, -+ * so it can be examined by other processes. When @task is allowed -+ * to run, it will die and get to the @report_death callback. -+ * -+ * @report_death: -+ * Requested by %UTRACE_EVENT(%DEATH). -+ * Thread is really dead now. It might be reaped by its parent at -+ * any time, or self-reap immediately. Though the actual reaping -+ * may happen in parallel, a report_reap() callback will always be -+ * ordered after a report_death() callback. -+ * -+ * @report_reap: -+ * Requested by %UTRACE_EVENT(%REAP). -+ * Called when someone reaps the dead task (parent, init, or self). -+ * This means the parent called wait, or else this was a detached -+ * thread or a process whose parent ignores SIGCHLD. -+ * No more callbacks are made after this one. -+ * The engine is always detached. -+ * There is nothing more a tracing engine can do about this thread. -+ * After this callback, the @engine pointer will become invalid. -+ * The @task pointer may become invalid if get_task_struct() hasn't -+ * been used to keep it alive. -+ * An engine should always request this callback if it stores the -+ * @engine pointer or stores any pointer in @engine->data, so it -+ * can clean up its data structures. -+ * Unlike other callbacks, this can be called from the parent's context -+ * rather than from the traced thread itself--it must not delay the -+ * parent by blocking. -+ * -+ * @release: -+ * If not %NULL, this is called after the last utrace_engine_put() -+ * call for a &struct utrace_engine, which could be implicit after -+ * a %UTRACE_DETACH return from another callback. Its argument is -+ * the engine's @data member. -+ */ -+struct utrace_engine_ops { -+ u32 (*report_quiesce)(enum utrace_resume_action action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ unsigned long event); -+ u32 (*report_signal)(u32 action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ struct pt_regs *regs, -+ siginfo_t *info, -+ const struct k_sigaction *orig_ka, -+ struct k_sigaction *return_ka); -+ u32 (*report_clone)(enum utrace_resume_action action, -+ struct utrace_engine *engine, -+ struct task_struct *parent, -+ unsigned long clone_flags, -+ struct task_struct *child); -+ u32 (*report_jctl)(enum utrace_resume_action action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ int type, int notify); -+ u32 (*report_exec)(enum utrace_resume_action action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ const struct linux_binfmt *fmt, -+ const struct linux_binprm *bprm, -+ struct pt_regs *regs); -+ u32 (*report_syscall_entry)(u32 action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ struct pt_regs *regs); -+ u32 (*report_syscall_exit)(enum utrace_resume_action action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ struct pt_regs *regs); -+ u32 (*report_exit)(enum utrace_resume_action action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ long orig_code, long *code); -+ u32 (*report_death)(struct utrace_engine *engine, -+ struct task_struct *task, -+ bool group_dead, int signal); -+ void (*report_reap)(struct utrace_engine *engine, -+ struct task_struct *task); -+ void (*release)(void *data); -+}; -+ -+/** -+ * struct utrace_examiner - private state for using utrace_prepare_examine() -+ * -+ * The members of &struct utrace_examiner are private to the implementation. -+ * This data type holds the state from a call to utrace_prepare_examine() -+ * to be used by a call to utrace_finish_examine(). -+ */ -+struct utrace_examiner { -+/* private: */ -+ long state; -+ unsigned long ncsw; -+}; -+ -+/* -+ * These are the exported entry points for tracing engines to use. -+ * See kernel/utrace.c for their kerneldoc comments with interface details. -+ */ -+struct utrace_engine *utrace_attach_task(struct task_struct *, int, -+ const struct utrace_engine_ops *, -+ void *); -+struct utrace_engine *utrace_attach_pid(struct pid *, int, -+ const struct utrace_engine_ops *, -+ void *); -+int __must_check utrace_control(struct task_struct *, -+ struct utrace_engine *, -+ enum utrace_resume_action); -+int __must_check utrace_set_events(struct task_struct *, -+ struct utrace_engine *, -+ unsigned long eventmask); -+int __must_check utrace_barrier(struct task_struct *, -+ struct utrace_engine *); -+int __must_check utrace_prepare_examine(struct task_struct *, -+ struct utrace_engine *, -+ struct utrace_examiner *); -+int __must_check utrace_finish_examine(struct task_struct *, -+ struct utrace_engine *, -+ struct utrace_examiner *); -+ -+/** -+ * utrace_control_pid - control a thread being traced by a tracing engine -+ * @pid: thread to affect -+ * @engine: attached engine to affect -+ * @action: &enum utrace_resume_action for thread to do -+ * -+ * This is the same as utrace_control(), but takes a &struct pid -+ * pointer rather than a &struct task_struct pointer. The caller must -+ * hold a ref on @pid, but does not need to worry about the task -+ * staying valid. If it's been reaped so that @pid points nowhere, -+ * then this call returns -%ESRCH. -+ */ -+static inline __must_check int utrace_control_pid( -+ struct pid *pid, struct utrace_engine *engine, -+ enum utrace_resume_action action) -+{ -+ /* -+ * We don't bother with rcu_read_lock() here to protect the -+ * task_struct pointer, because utrace_control will return -+ * -ESRCH without looking at that pointer if the engine is -+ * already detached. A task_struct pointer can't die before -+ * all the engines are detached in release_task() first. -+ */ -+ struct task_struct *task = pid_task(pid, PIDTYPE_PID); -+ return unlikely(!task) ? -ESRCH : utrace_control(task, engine, action); -+} -+ -+/** -+ * utrace_set_events_pid - choose which event reports a tracing engine gets -+ * @pid: thread to affect -+ * @engine: attached engine to affect -+ * @eventmask: new event mask -+ * -+ * This is the same as utrace_set_events(), but takes a &struct pid -+ * pointer rather than a &struct task_struct pointer. The caller must -+ * hold a ref on @pid, but does not need to worry about the task -+ * staying valid. If it's been reaped so that @pid points nowhere, -+ * then this call returns -%ESRCH. -+ */ -+static inline __must_check int utrace_set_events_pid( -+ struct pid *pid, struct utrace_engine *engine, unsigned long eventmask) -+{ -+ struct task_struct *task = pid_task(pid, PIDTYPE_PID); -+ return unlikely(!task) ? -ESRCH : -+ utrace_set_events(task, engine, eventmask); -+} -+ -+/** -+ * utrace_barrier_pid - synchronize with simultaneous tracing callbacks -+ * @pid: thread to affect -+ * @engine: engine to affect (can be detached) -+ * -+ * This is the same as utrace_barrier(), but takes a &struct pid -+ * pointer rather than a &struct task_struct pointer. The caller must -+ * hold a ref on @pid, but does not need to worry about the task -+ * staying valid. If it's been reaped so that @pid points nowhere, -+ * then this call returns -%ESRCH. -+ */ -+static inline __must_check int utrace_barrier_pid(struct pid *pid, -+ struct utrace_engine *engine) -+{ -+ struct task_struct *task = pid_task(pid, PIDTYPE_PID); -+ return unlikely(!task) ? -ESRCH : utrace_barrier(task, engine); -+} -+ -+#endif /* CONFIG_UTRACE */ -+ -+#endif /* linux/utrace.h */ ---- /dev/null -+++ b/include/linux/utrace_struct.h -@@ -0,0 +1,59 @@ -+/* -+ * 'struct utrace' data structure for kernel/utrace.c private use. -+ * -+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. -+ * -+ * This copyrighted material is made available to anyone wishing to use, -+ * modify, copy, or redistribute it subject to the terms and conditions -+ * of the GNU General Public License v.2. -+ */ -+ -+#ifndef _LINUX_UTRACE_STRUCT_H -+#define _LINUX_UTRACE_STRUCT_H 1 -+ -+#ifdef CONFIG_UTRACE -+ -+#include -+#include -+ -+/* -+ * Per-thread structure private to utrace implementation. This properly -+ * belongs in kernel/utrace.c and its use is entirely private to the code -+ * there. It is only defined in a header file so that it can be embedded -+ * in the struct task_struct layout. It is here rather than in utrace.h -+ * to avoid header nesting order issues getting too complex. -+ * -+ */ -+struct utrace { -+ struct task_struct *cloning; -+ -+ struct list_head attached, attaching; -+ spinlock_t lock; -+ -+ struct utrace_engine *reporting; -+ -+ unsigned int stopped:1; -+ unsigned int report:1; -+ unsigned int interrupt:1; -+ unsigned int signal_handler:1; -+ unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */ -+ unsigned int death:1; /* in utrace_report_death() now */ -+ unsigned int reap:1; /* release_task() has run */ -+ unsigned int pending_attach:1; /* need splice_attaching() */ -+}; -+ -+# define INIT_UTRACE(tsk) \ -+ .utrace_flags = 0, \ -+ .utrace = { \ -+ .lock = __SPIN_LOCK_UNLOCKED(tsk.utrace.lock), \ -+ .attached = LIST_HEAD_INIT(tsk.utrace.attached), \ -+ .attaching = LIST_HEAD_INIT(tsk.utrace.attaching), \ -+ }, -+ -+#else -+ -+# define INIT_UTRACE(tsk) /* Nothing. */ -+ -+#endif /* CONFIG_UTRACE */ -+ -+#endif /* linux/utrace_struct.h */ ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1315,6 +1315,15 @@ config STOP_MACHINE - help - Need stop_machine() primitive. - -+menuconfig UTRACE -+ bool "Infrastructure for tracing and debugging user processes" -+ depends on EXPERIMENTAL -+ depends on HAVE_ARCH_TRACEHOOK -+ help -+ Enable the utrace process tracing interface. This is an internal -+ kernel interface exported to kernel modules, to track events in -+ user threads, extract and change user thread state. -+ - source "block/Kconfig" - - config PREEMPT_NOTIFIERS ---- a/kernel/Makefile -+++ b/kernel/Makefile -@@ -71,6 +71,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o - obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o - obj-$(CONFIG_SMP) += stop_machine.o - obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o -+obj-$(CONFIG_UTRACE) += utrace.o - obj-$(CONFIG_AUDIT) += audit.o auditfilter.o - obj-$(CONFIG_AUDITSYSCALL) += auditsc.o - obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -163,6 +164,14 @@ bool ptrace_may_access(struct task_struc - return !err; - } - -+/* -+ * For experimental use of utrace, exclude ptrace on the same task. -+ */ -+static inline bool exclude_ptrace(struct task_struct *task) -+{ -+ return unlikely(!!task_utrace_flags(task)); -+} -+ - static int ptrace_attach(struct task_struct *task) - { - int retval; -@@ -186,6 +195,8 @@ int ptrace_attach(struct task_struct *ta - - task_lock(task); - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); -+ if (!retval && exclude_ptrace(task)) -+ retval = -EBUSY; - task_unlock(task); - if (retval) - goto unlock_creds; -@@ -223,6 +234,9 @@ int ptrace_traceme(void) - { - int ret = -EPERM; - -+ if (exclude_ptrace(current)) /* XXX locking */ -+ return -EBUSY; -+ - write_lock_irq(&tasklist_lock); - /* Are we already being traced? */ - if (!current->ptrace) { ---- /dev/null -+++ b/kernel/utrace.c -@@ -0,0 +1,2340 @@ -+/* -+ * utrace infrastructure interface for debugging user processes -+ * -+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. -+ * -+ * This copyrighted material is made available to anyone wishing to use, -+ * modify, copy, or redistribute it subject to the terms and conditions -+ * of the GNU General Public License v.2. -+ * -+ * Red Hat Author: Roland McGrath. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+/* -+ * Rules for 'struct utrace', defined in -+ * but used entirely privately in this file. -+ * -+ * The common event reporting loops are done by the task making the -+ * report without ever taking any locks. To facilitate this, the two -+ * lists @attached and @attaching work together for smooth asynchronous -+ * attaching with low overhead. Modifying either list requires @lock. -+ * The @attaching list can be modified any time while holding @lock. -+ * New engines being attached always go on this list. -+ * -+ * The @attached list is what the task itself uses for its reporting -+ * loops. When the task itself is not quiescent, it can use the -+ * @attached list without taking any lock. Nobody may modify the list -+ * when the task is not quiescent. When it is quiescent, that means -+ * that it won't run again without taking @lock itself before using -+ * the list. -+ * -+ * At each place where we know the task is quiescent (or it's current), -+ * while holding @lock, we call splice_attaching(), below. This moves -+ * the @attaching list members on to the end of the @attached list. -+ * Since this happens at the start of any reporting pass, any new -+ * engines attached asynchronously go on the stable @attached list -+ * in time to have their callbacks seen. -+ */ -+ -+static struct kmem_cache *utrace_engine_cachep; -+static const struct utrace_engine_ops utrace_detached_ops; /* forward decl */ -+ -+static int __init utrace_init(void) -+{ -+ utrace_engine_cachep = KMEM_CACHE(utrace_engine, SLAB_PANIC); -+ return 0; -+} -+module_init(utrace_init); -+ -+/* -+ * This is called with @utrace->lock held when the task is safely -+ * quiescent, i.e. it won't consult utrace->attached without the lock. -+ * Move any engines attached asynchronously from @utrace->attaching -+ * onto the @utrace->attached list. -+ */ -+static void splice_attaching(struct utrace *utrace) -+{ -+ list_splice_tail_init(&utrace->attaching, &utrace->attached); -+ utrace->pending_attach = 0; -+} -+ -+/* -+ * This is the exported function used by the utrace_engine_put() inline. -+ */ -+void __utrace_engine_release(struct kref *kref) -+{ -+ struct utrace_engine *engine = container_of(kref, struct utrace_engine, -+ kref); -+ BUG_ON(!list_empty(&engine->entry)); -+ if (engine->release) -+ (*engine->release)(engine->data); -+ kmem_cache_free(utrace_engine_cachep, engine); -+} -+EXPORT_SYMBOL_GPL(__utrace_engine_release); -+ -+static bool engine_matches(struct utrace_engine *engine, int flags, -+ const struct utrace_engine_ops *ops, void *data) -+{ -+ if ((flags & UTRACE_ATTACH_MATCH_OPS) && engine->ops != ops) -+ return false; -+ if ((flags & UTRACE_ATTACH_MATCH_DATA) && engine->data != data) -+ return false; -+ return engine->ops && engine->ops != &utrace_detached_ops; -+} -+ -+static struct utrace_engine *matching_engine( -+ struct utrace *utrace, int flags, -+ const struct utrace_engine_ops *ops, void *data) -+{ -+ struct utrace_engine *engine; -+ list_for_each_entry(engine, &utrace->attached, entry) -+ if (engine_matches(engine, flags, ops, data)) -+ return engine; -+ list_for_each_entry(engine, &utrace->attaching, entry) -+ if (engine_matches(engine, flags, ops, data)) -+ return engine; -+ return NULL; -+} -+ -+/* -+ * Called without locks, when we might be the first utrace engine to attach. -+ * If this is a newborn thread and we are not the creator, we have to wait -+ * for it. The creator gets the first chance to attach. The PF_STARTING -+ * flag is cleared after its report_clone hook has had a chance to run. -+ */ -+static inline int utrace_attach_delay(struct task_struct *target) -+{ -+ if ((target->flags & PF_STARTING) && -+ current->utrace.cloning != target) -+ do { -+ schedule_timeout_interruptible(1); -+ if (signal_pending(current)) -+ return -ERESTARTNOINTR; -+ } while (target->flags & PF_STARTING); -+ -+ return 0; -+} -+ -+/* -+ * Enqueue @engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE. -+ */ -+static int utrace_add_engine(struct task_struct *target, -+ struct utrace *utrace, -+ struct utrace_engine *engine, -+ int flags, -+ const struct utrace_engine_ops *ops, -+ void *data) -+{ -+ int ret; -+ -+ spin_lock(&utrace->lock); -+ -+ ret = -EEXIST; -+ if ((flags & UTRACE_ATTACH_EXCLUSIVE) && -+ unlikely(matching_engine(utrace, flags, ops, data))) -+ goto unlock; -+ -+ /* -+ * In case we had no engines before, make sure that -+ * utrace_flags is not zero. -+ */ -+ ret = -ESRCH; -+ if (!target->utrace_flags) { -+ target->utrace_flags = UTRACE_EVENT(REAP); -+ /* -+ * If we race with tracehook_prepare_release_task() -+ * make sure that either it sees utrace_flags != 0 -+ * or we see exit_state == EXIT_DEAD. -+ */ -+ smp_mb(); -+ if (unlikely(target->exit_state == EXIT_DEAD)) { -+ target->utrace_flags = 0; -+ goto unlock; -+ } -+ } -+ -+ /* -+ * Put the new engine on the pending ->attaching list. -+ * Make sure it gets onto the ->attached list by the next -+ * time it's examined. Setting ->pending_attach ensures -+ * that start_report() takes the lock and splices the lists -+ * before the next new reporting pass. -+ * -+ * When target == current, it would be safe just to call -+ * splice_attaching() right here. But if we're inside a -+ * callback, that would mean the new engine also gets -+ * notified about the event that precipitated its own -+ * creation. This is not what the user wants. -+ */ -+ list_add_tail(&engine->entry, &utrace->attaching); -+ utrace->pending_attach = 1; -+ ret = 0; -+unlock: -+ spin_unlock(&utrace->lock); -+ -+ return ret; -+} -+ -+/** -+ * utrace_attach_task - attach new engine, or look up an attached engine -+ * @target: thread to attach to -+ * @flags: flag bits combined with OR, see below -+ * @ops: callback table for new engine -+ * @data: engine private data pointer -+ * -+ * The caller must ensure that the @target thread does not get freed, -+ * i.e. hold a ref or be its parent. It is always safe to call this -+ * on @current, or on the @child pointer in a @report_clone callback. -+ * For most other cases, it's easier to use utrace_attach_pid() instead. -+ * -+ * UTRACE_ATTACH_CREATE: -+ * Create a new engine. If %UTRACE_ATTACH_CREATE is not specified, you -+ * only look up an existing engine already attached to the thread. -+ * -+ * UTRACE_ATTACH_EXCLUSIVE: -+ * Attempting to attach a second (matching) engine fails with -%EEXIST. -+ * -+ * UTRACE_ATTACH_MATCH_OPS: Only consider engines matching @ops. -+ * UTRACE_ATTACH_MATCH_DATA: Only consider engines matching @data. -+ * -+ * Calls with neither %UTRACE_ATTACH_MATCH_OPS nor %UTRACE_ATTACH_MATCH_DATA -+ * match the first among any engines attached to @target. That means that -+ * %UTRACE_ATTACH_EXCLUSIVE in such a call fails with -%EEXIST if there -+ * are any engines on @target at all. -+ */ -+struct utrace_engine *utrace_attach_task( -+ struct task_struct *target, int flags, -+ const struct utrace_engine_ops *ops, void *data) -+{ -+ struct utrace *utrace = task_utrace_struct(target); -+ struct utrace_engine *engine; -+ int ret; -+ -+ if (!(flags & UTRACE_ATTACH_CREATE)) { -+ spin_lock(&utrace->lock); -+ engine = matching_engine(utrace, flags, ops, data); -+ if (engine) -+ utrace_engine_get(engine); -+ spin_unlock(&utrace->lock); -+ return engine ?: ERR_PTR(-ENOENT); -+ } -+ -+ if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops)) -+ return ERR_PTR(-EINVAL); -+ -+ if (unlikely(target->flags & PF_KTHREAD)) -+ /* -+ * Silly kernel, utrace is for users! -+ */ -+ return ERR_PTR(-EPERM); -+ -+ engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL); -+ if (unlikely(!engine)) -+ return ERR_PTR(-ENOMEM); -+ -+ /* -+ * Initialize the new engine structure. It starts out with two -+ * refs: one ref to return, and one ref for being attached. -+ */ -+ kref_init(&engine->kref); -+ kref_get(&engine->kref); -+ engine->flags = 0; -+ engine->ops = ops; -+ engine->data = data; -+ engine->release = ops->release; -+ -+ ret = utrace_attach_delay(target); -+ if (likely(!ret)) -+ ret = utrace_add_engine(target, utrace, engine, -+ flags, ops, data); -+ -+ if (unlikely(ret)) { -+ kmem_cache_free(utrace_engine_cachep, engine); -+ engine = ERR_PTR(ret); -+ } -+ -+ return engine; -+} -+EXPORT_SYMBOL_GPL(utrace_attach_task); -+ -+/** -+ * utrace_attach_pid - attach new engine, or look up an attached engine -+ * @pid: &struct pid pointer representing thread to attach to -+ * @flags: flag bits combined with OR, see utrace_attach_task() -+ * @ops: callback table for new engine -+ * @data: engine private data pointer -+ * -+ * This is the same as utrace_attach_task(), but takes a &struct pid -+ * pointer rather than a &struct task_struct pointer. The caller must -+ * hold a ref on @pid, but does not need to worry about the task -+ * staying valid. If it's been reaped so that @pid points nowhere, -+ * then this call returns -%ESRCH. -+ */ -+struct utrace_engine *utrace_attach_pid( -+ struct pid *pid, int flags, -+ const struct utrace_engine_ops *ops, void *data) -+{ -+ struct utrace_engine *engine = ERR_PTR(-ESRCH); -+ struct task_struct *task = get_pid_task(pid, PIDTYPE_PID); -+ if (task) { -+ engine = utrace_attach_task(task, flags, ops, data); -+ put_task_struct(task); -+ } -+ return engine; -+} -+EXPORT_SYMBOL_GPL(utrace_attach_pid); -+ -+/* -+ * When an engine is detached, the target thread may still see it and -+ * make callbacks until it quiesces. We install a special ops vector -+ * with these two callbacks. When the target thread quiesces, it can -+ * safely free the engine itself. For any event we will always get -+ * the report_quiesce() callback first, so we only need this one -+ * pointer to be set. The only exception is report_reap(), so we -+ * supply that callback too. -+ */ -+static u32 utrace_detached_quiesce(enum utrace_resume_action action, -+ struct utrace_engine *engine, -+ struct task_struct *task, -+ unsigned long event) -+{ -+ return UTRACE_DETACH; -+} -+ -+static void utrace_detached_reap(struct utrace_engine *engine, -+ struct task_struct *task) -+{ -+} -+ -+static const struct utrace_engine_ops utrace_detached_ops = { -+ .report_quiesce = &utrace_detached_quiesce, -+ .report_reap = &utrace_detached_reap -+}; -+ -+/* -+ * The caller has to hold a ref on the engine. If the attached flag is -+ * true (all but utrace_barrier() calls), the engine is supposed to be -+ * attached. If the attached flag is false (utrace_barrier() only), -+ * then return -ERESTARTSYS for an engine marked for detach but not yet -+ * fully detached. The task pointer can be invalid if the engine is -+ * detached. -+ * -+ * Get the utrace lock for the target task. -+ * Returns the struct if locked, or ERR_PTR(-errno). -+ * -+ * This has to be robust against races with: -+ * utrace_control(target, UTRACE_DETACH) calls -+ * UTRACE_DETACH after reports -+ * utrace_report_death -+ * utrace_release_task -+ */ -+static struct utrace *get_utrace_lock(struct task_struct *target, -+ struct utrace_engine *engine, -+ bool attached) -+ __acquires(utrace->lock) -+{ -+ struct utrace *utrace; -+ -+ rcu_read_lock(); -+ -+ /* -+ * If this engine was already detached, bail out before we look at -+ * the task_struct pointer at all. If it's detached after this -+ * check, then RCU is still keeping this task_struct pointer valid. -+ * -+ * The ops pointer is NULL when the engine is fully detached. -+ * It's &utrace_detached_ops when it's marked detached but still -+ * on the list. In the latter case, utrace_barrier() still works, -+ * since the target might be in the middle of an old callback. -+ */ -+ if (unlikely(!engine->ops)) { -+ rcu_read_unlock(); -+ return ERR_PTR(-ESRCH); -+ } -+ -+ if (unlikely(engine->ops == &utrace_detached_ops)) { -+ rcu_read_unlock(); -+ return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS); -+ } -+ -+ utrace = &target->utrace; -+ spin_lock(&utrace->lock); -+ if (unlikely(!engine->ops) || -+ unlikely(engine->ops == &utrace_detached_ops)) { -+ /* -+ * By the time we got the utrace lock, -+ * it had been reaped or detached already. -+ */ -+ spin_unlock(&utrace->lock); -+ utrace = ERR_PTR(-ESRCH); -+ if (!attached && engine->ops == &utrace_detached_ops) -+ utrace = ERR_PTR(-ERESTARTSYS); -+ } -+ rcu_read_unlock(); -+ -+ return utrace; -+} -+ -+/* -+ * Now that we don't hold any locks, run through any -+ * detached engines and free their references. Each -+ * engine had one implicit ref while it was attached. -+ */ -+static void put_detached_list(struct list_head *list) -+{ -+ struct utrace_engine *engine, *next; -+ list_for_each_entry_safe(engine, next, list, entry) { -+ list_del_init(&engine->entry); -+ utrace_engine_put(engine); -+ } -+} -+ -+/* -+ * Called with utrace->lock held and utrace->reap set. -+ * Notify and clean up all engines, then free utrace. -+ */ -+static void utrace_reap(struct task_struct *target, struct utrace *utrace) -+ __releases(utrace->lock) -+{ -+ struct utrace_engine *engine, *next; -+ -+ /* utrace_add_engine() checks ->utrace_flags != 0 */ -+ target->utrace_flags = 0; -+ splice_attaching(utrace); -+ -+ /* -+ * Since we were called with @utrace->reap set, nobody can -+ * set/clear UTRACE_EVENT(REAP) in @engine->flags or change -+ * @engine->ops, and nobody can change @utrace->attached. -+ */ -+ spin_unlock(&utrace->lock); -+ -+ list_for_each_entry_safe(engine, next, &utrace->attached, entry) { -+ if (engine->flags & UTRACE_EVENT(REAP)) -+ engine->ops->report_reap(engine, target); -+ -+ engine->ops = NULL; -+ engine->flags = 0; -+ list_del_init(&engine->entry); -+ -+ utrace_engine_put(engine); -+ } -+} -+ -+ -+/* -+ * Called by release_task. After this, target->utrace must be cleared. -+ */ -+void utrace_release_task(struct task_struct *target) -+{ -+ struct utrace *utrace; -+ -+ utrace = &target->utrace; -+ -+ spin_lock(&utrace->lock); -+ -+ utrace->reap = 1; -+ -+ /* -+ * If the target will do some final callbacks but hasn't -+ * finished them yet, we know because it clears these event -+ * bits after it's done. Instead of cleaning up here and -+ * requiring utrace_report_death() to cope with it, we delay -+ * the REAP report and the teardown until after the target -+ * finishes its death reports. -+ */ -+ -+ if (target->utrace_flags & _UTRACE_DEATH_EVENTS) -+ spin_unlock(&utrace->lock); -+ else -+ utrace_reap(target, utrace); /* Unlocks. */ -+} -+ -+/* -+ * We use an extra bit in utrace_engine.flags past the event bits, -+ * to record whether the engine is keeping the target thread stopped. -+ * -+ * This bit is set in task_struct.utrace_flags whenever it is set in any -+ * engine's flags. Only utrace_reset() resets it in utrace_flags. -+ */ -+#define ENGINE_STOP (1UL << _UTRACE_NEVENTS) -+ -+static void mark_engine_wants_stop(struct task_struct *task, -+ struct utrace_engine *engine) -+{ -+ engine->flags |= ENGINE_STOP; -+ task->utrace_flags |= ENGINE_STOP; -+} -+ -+static void clear_engine_wants_stop(struct utrace_engine *engine) -+{ -+ engine->flags &= ~ENGINE_STOP; -+} -+ -+static bool engine_wants_stop(struct utrace_engine *engine) -+{ -+ return (engine->flags & ENGINE_STOP) != 0; -+} -+ -+/** -+ * utrace_set_events - choose which event reports a tracing engine gets -+ * @target: thread to affect -+ * @engine: attached engine to affect -+ * @events: new event mask -+ * -+ * This changes the set of events for which @engine wants callbacks made. -+ * -+ * This fails with -%EALREADY and does nothing if you try to clear -+ * %UTRACE_EVENT(%DEATH) when the @report_death callback may already have -+ * begun, if you try to clear %UTRACE_EVENT(%REAP) when the @report_reap -+ * callback may already have begun, or if you try to newly set -+ * %UTRACE_EVENT(%DEATH) or %UTRACE_EVENT(%QUIESCE) when @target is -+ * already dead or dying. -+ * -+ * This can fail with -%ESRCH when @target has already been detached, -+ * including forcible detach on reaping. -+ * -+ * If @target was stopped before the call, then after a successful call, -+ * no event callbacks not requested in @events will be made; if -+ * %UTRACE_EVENT(%QUIESCE) is included in @events, then a -+ * @report_quiesce callback will be made when @target resumes. -+ * -+ * If @target was not stopped and @events excludes some bits that were -+ * set before, this can return -%EINPROGRESS to indicate that @target -+ * may have been making some callback to @engine. When this returns -+ * zero, you can be sure that no event callbacks you've disabled in -+ * @events can be made. If @events only sets new bits that were not set -+ * before on @engine, then -%EINPROGRESS will never be returned. -+ * -+ * To synchronize after an -%EINPROGRESS return, see utrace_barrier(). -+ * -+ * When @target is @current, -%EINPROGRESS is not returned. But note -+ * that a newly-created engine will not receive any callbacks related to -+ * an event notification already in progress. This call enables @events -+ * callbacks to be made as soon as @engine becomes eligible for any -+ * callbacks, see utrace_attach_task(). -+ * -+ * These rules provide for coherent synchronization based on %UTRACE_STOP, -+ * even when %SIGKILL is breaking its normal simple rules. -+ */ -+int utrace_set_events(struct task_struct *target, -+ struct utrace_engine *engine, -+ unsigned long events) -+{ -+ struct utrace *utrace; -+ unsigned long old_flags, old_utrace_flags, set_utrace_flags; -+ int ret; -+ -+ utrace = get_utrace_lock(target, engine, true); -+ if (unlikely(IS_ERR(utrace))) -+ return PTR_ERR(utrace); -+ -+ old_utrace_flags = target->utrace_flags; -+ set_utrace_flags = events; -+ old_flags = engine->flags & ~ENGINE_STOP; -+ -+ if (target->exit_state && -+ (((events & ~old_flags) & _UTRACE_DEATH_EVENTS) || -+ (utrace->death && -+ ((old_flags & ~events) & _UTRACE_DEATH_EVENTS)) || -+ (utrace->reap && ((old_flags & ~events) & UTRACE_EVENT(REAP))))) { -+ spin_unlock(&utrace->lock); -+ return -EALREADY; -+ } -+ -+ /* -+ * When setting these flags, it's essential that we really -+ * synchronize with exit_notify(). They cannot be set after -+ * exit_notify() takes the tasklist_lock. By holding the read -+ * lock here while setting the flags, we ensure that the calls -+ * to tracehook_notify_death() and tracehook_report_death() will -+ * see the new flags. This ensures that utrace_release_task() -+ * knows positively that utrace_report_death() will be called or -+ * that it won't. -+ */ -+ if ((set_utrace_flags & ~old_utrace_flags) & _UTRACE_DEATH_EVENTS) { -+ read_lock(&tasklist_lock); -+ if (unlikely(target->exit_state)) { -+ read_unlock(&tasklist_lock); -+ spin_unlock(&utrace->lock); -+ return -EALREADY; -+ } -+ target->utrace_flags |= set_utrace_flags; -+ read_unlock(&tasklist_lock); -+ } -+ -+ engine->flags = events | (engine->flags & ENGINE_STOP); -+ target->utrace_flags |= set_utrace_flags; -+ -+ if ((set_utrace_flags & UTRACE_EVENT_SYSCALL) && -+ !(old_utrace_flags & UTRACE_EVENT_SYSCALL)) -+ set_tsk_thread_flag(target, TIF_SYSCALL_TRACE); -+ -+ ret = 0; -+ if ((old_flags & ~events) && -+ !utrace->stopped && target != current && !target->exit_state) { -+ /* -+ * This barrier ensures that our engine->flags changes -+ * have hit before we examine utrace->reporting, -+ * pairing with the barrier in start_callback(). If -+ * @target has not yet hit finish_callback() to clear -+ * utrace->reporting, we might be in the middle of a -+ * callback to @engine. -+ */ -+ smp_mb(); -+ if (utrace->reporting == engine) -+ ret = -EINPROGRESS; -+ } -+ -+ spin_unlock(&utrace->lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(utrace_set_events); -+ -+/* -+ * Asynchronously mark an engine as being detached. -+ * -+ * This must work while the target thread races with us doing -+ * start_callback(), defined below. It uses smp_rmb() between checking -+ * @engine->flags and using @engine->ops. Here we change @engine->ops -+ * first, then use smp_wmb() before changing @engine->flags. This ensures -+ * it can check the old flags before using the old ops, or check the old -+ * flags before using the new ops, or check the new flags before using the -+ * new ops, but can never check the new flags before using the old ops. -+ * Hence, utrace_detached_ops might be used with any old flags in place. -+ * It has report_quiesce() and report_reap() callbacks to handle all cases. -+ */ -+static void mark_engine_detached(struct utrace_engine *engine) -+{ -+ engine->ops = &utrace_detached_ops; -+ smp_wmb(); -+ engine->flags = UTRACE_EVENT(QUIESCE); -+} -+ -+/* -+ * Get @target to stop and return true if it is already stopped now. -+ * If we return false, it will make some event callback soonish. -+ * Called with @utrace locked. -+ */ -+static bool utrace_do_stop(struct task_struct *target, struct utrace *utrace) -+{ -+ bool stopped = false; -+ -+ if (task_is_stopped(target)) { -+ /* -+ * Stopped is considered quiescent; when it wakes up, it will -+ * go through utrace_finish_jctl() before doing anything else. -+ */ -+ spin_lock_irq(&target->sighand->siglock); -+ if (likely(task_is_stopped(target))) { -+ __set_task_state(target, TASK_TRACED); -+ utrace->stopped = stopped = true; -+ } -+ spin_unlock_irq(&target->sighand->siglock); -+ } else if (!utrace->report && !utrace->interrupt) { -+ utrace->report = 1; -+ set_notify_resume(target); -+ } -+ -+ return stopped; -+} -+ -+/* -+ * If the target is not dead it should not be in tracing -+ * stop any more. Wake it unless it's in job control stop. -+ * -+ * Called with @utrace->lock held and @utrace->stopped set. -+ */ -+static void utrace_wakeup(struct task_struct *target, struct utrace *utrace) -+{ -+ utrace->stopped = 0; -+ -+ /* The task must be either TASK_TRACED or killed */ -+ spin_lock_irq(&target->sighand->siglock); -+ if (target->signal->flags & SIGNAL_STOP_STOPPED || -+ target->signal->group_stop_count) -+ target->state = TASK_STOPPED; -+ else -+ wake_up_state(target, __TASK_TRACED); -+ spin_unlock_irq(&target->sighand->siglock); -+} -+ -+/* -+ * This is called when there might be some detached engines on the list or -+ * some stale bits in @task->utrace_flags. Clean them up and recompute the -+ * flags. Returns true if we're now fully detached. -+ * -+ * Called with @utrace->lock held, returns with it released. -+ * After this returns, @utrace might be freed if everything detached. -+ */ -+static bool utrace_reset(struct task_struct *task, struct utrace *utrace) -+ __releases(utrace->lock) -+{ -+ struct utrace_engine *engine, *next; -+ unsigned long flags = 0; -+ LIST_HEAD(detached); -+ -+ splice_attaching(utrace); -+ -+ /* -+ * Update the set of events of interest from the union -+ * of the interests of the remaining tracing engines. -+ * For any engine marked detached, remove it from the list. -+ * We'll collect them on the detached list. -+ */ -+ list_for_each_entry_safe(engine, next, &utrace->attached, entry) { -+ if (engine->ops == &utrace_detached_ops) { -+ engine->ops = NULL; -+ list_move(&engine->entry, &detached); -+ } else { -+ flags |= engine->flags | UTRACE_EVENT(REAP); -+ } -+ } -+ -+ if (task->exit_state) { -+ /* -+ * Once it's already dead, we never install any flags -+ * except REAP. When ->exit_state is set and events -+ * like DEATH are not set, then they never can be set. -+ * This ensures that utrace_release_task() knows -+ * positively that utrace_report_death() can never run. -+ */ -+ BUG_ON(utrace->death); -+ flags &= UTRACE_EVENT(REAP); -+ } else if (!(flags & UTRACE_EVENT_SYSCALL) && -+ test_tsk_thread_flag(task, TIF_SYSCALL_TRACE)) { -+ clear_tsk_thread_flag(task, TIF_SYSCALL_TRACE); -+ } -+ -+ if (!flags) -+ /* -+ * No more engines, cleared out the utrace. -+ */ -+ utrace->interrupt = utrace->report = utrace->signal_handler = 0; -+ -+ if (!(flags & ENGINE_STOP) && utrace->stopped) -+ /* -+ * No more engines want it stopped. Wake it up. -+ */ -+ utrace_wakeup(task, utrace); -+ -+ /* -+ * In theory spin_lock() doesn't imply rcu_read_lock(). -+ * Once we clear ->utrace_flags this task_struct can go away -+ * because tracehook_prepare_release_task() path does not take -+ * utrace->lock when ->utrace_flags == 0. -+ */ -+ rcu_read_lock(); -+ task->utrace_flags = flags; -+ spin_unlock(&utrace->lock); -+ rcu_read_unlock(); -+ -+ put_detached_list(&detached); -+ -+ return !flags; -+} -+ -+/* -+ * Perform %UTRACE_STOP, i.e. block in TASK_TRACED until woken up. -+ * @task == current, @utrace == current->utrace, which is not locked. -+ * Return true if we were woken up by SIGKILL even though some utrace -+ * engine may still want us to stay stopped. -+ */ -+static void utrace_stop(struct task_struct *task, struct utrace *utrace, -+ enum utrace_resume_action action) -+{ -+ /* -+ * @utrace->stopped is the flag that says we are safely -+ * inside this function. It should never be set on entry. -+ */ -+ BUG_ON(utrace->stopped); -+relock: -+ spin_lock(&utrace->lock); -+ -+ if (action == UTRACE_INTERRUPT) { -+ /* -+ * Ensure a %UTRACE_SIGNAL_REPORT reporting pass when we're -+ * resumed. The recalc_sigpending() call below will see -+ * this flag and set TIF_SIGPENDING. -+ */ -+ utrace->interrupt = 1; -+ } else if (action < UTRACE_RESUME) { -+ /* -+ * Ensure a reporting pass when we're resumed. -+ */ -+ utrace->report = 1; -+ set_thread_flag(TIF_NOTIFY_RESUME); -+ } -+ -+ /* -+ * If the ENGINE_STOP bit is clear in utrace_flags, that means -+ * utrace_reset() ran after we processed some UTRACE_STOP return -+ * values from callbacks to get here. If all engines have detached -+ * or resumed us, we don't stop. This check doesn't require -+ * siglock, but it should follow the interrupt/report bookkeeping -+ * steps (this can matter for UTRACE_RESUME but not UTRACE_DETACH). -+ */ -+ if (unlikely(!(task->utrace_flags & ENGINE_STOP))) { -+ utrace_reset(task, utrace); -+ if (task->utrace_flags & ENGINE_STOP) -+ goto relock; -+ return; -+ } -+ -+ /* -+ * The siglock protects us against signals. As well as SIGKILL -+ * waking us up, we must synchronize with the signal bookkeeping -+ * for stop signals and SIGCONT. -+ */ -+ spin_lock_irq(&task->sighand->siglock); -+ -+ if (unlikely(__fatal_signal_pending(task))) { -+ spin_unlock_irq(&task->sighand->siglock); -+ spin_unlock(&utrace->lock); -+ return; -+ } -+ -+ utrace->stopped = 1; -+ __set_current_state(TASK_TRACED); -+ -+ /* -+ * If there is a group stop in progress, -+ * we must participate in the bookkeeping. -+ */ -+ if (unlikely(task->signal->group_stop_count) && -+ !--task->signal->group_stop_count) -+ task->signal->flags = SIGNAL_STOP_STOPPED; -+ -+ spin_unlock_irq(&task->sighand->siglock); -+ spin_unlock(&utrace->lock); -+ -+ schedule(); -+ -+ /* -+ * While in TASK_TRACED, we were considered "frozen enough". -+ * Now that we woke up, it's crucial if we're supposed to be -+ * frozen that we freeze now before running anything substantial. -+ */ -+ try_to_freeze(); -+ -+ /* -+ * utrace_wakeup() clears @utrace->stopped before waking us up. -+ * We're officially awake if it's clear. -+ */ -+ if (unlikely(utrace->stopped)) { -+ /* -+ * If we're here with it still set, it must have been -+ * signal_wake_up() instead, waking us up for a SIGKILL. -+ */ -+ WARN_ON(!__fatal_signal_pending(task)); -+ spin_lock(&utrace->lock); -+ utrace->stopped = 0; -+ spin_unlock(&utrace->lock); -+ } -+ -+ /* -+ * While we were in TASK_TRACED, complete_signal() considered -+ * us "uninterested" in signal wakeups. Now make sure our -+ * TIF_SIGPENDING state is correct for normal running. -+ */ -+ spin_lock_irq(&task->sighand->siglock); -+ recalc_sigpending(); -+ spin_unlock_irq(&task->sighand->siglock); -+} -+ -+/* -+ * You can't do anything to a dead task but detach it. -+ * If release_task() has been called, you can't do that. -+ * -+ * On the exit path, DEATH and QUIESCE event bits are set only -+ * before utrace_report_death() has taken the lock. At that point, -+ * the death report will come soon, so disallow detach until it's -+ * done. This prevents us from racing with it detaching itself. -+ * -+ * Called with utrace->lock held, when @target->exit_state is nonzero. -+ */ -+static inline int utrace_control_dead(struct task_struct *target, -+ struct utrace *utrace, -+ enum utrace_resume_action action) -+{ -+ if (action != UTRACE_DETACH || unlikely(utrace->reap)) -+ return -ESRCH; -+ -+ if (unlikely(utrace->death)) -+ /* -+ * We have already started the death report. We can't -+ * prevent the report_death and report_reap callbacks, -+ * so tell the caller they will happen. -+ */ -+ return -EALREADY; -+ -+ return 0; -+} -+ -+/** -+ * utrace_control - control a thread being traced by a tracing engine -+ * @target: thread to affect -+ * @engine: attached engine to affect -+ * @action: &enum utrace_resume_action for thread to do -+ * -+ * This is how a tracing engine asks a traced thread to do something. -+ * This call is controlled by the @action argument, which has the -+ * same meaning as the &enum utrace_resume_action value returned by -+ * event reporting callbacks. -+ * -+ * If @target is already dead (@target->exit_state nonzero), -+ * all actions except %UTRACE_DETACH fail with -%ESRCH. -+ * -+ * The following sections describe each option for the @action argument. -+ * -+ * UTRACE_DETACH: -+ * -+ * After this, the @engine data structure is no longer accessible, -+ * and the thread might be reaped. The thread will start running -+ * again if it was stopped and no longer has any attached engines -+ * that want it stopped. -+ * -+ * If the @report_reap callback may already have begun, this fails -+ * with -%ESRCH. If the @report_death callback may already have -+ * begun, this fails with -%EALREADY. -+ * -+ * If @target is not already stopped, then a callback to this engine -+ * might be in progress or about to start on another CPU. If so, -+ * then this returns -%EINPROGRESS; the detach happens as soon as -+ * the pending callback is finished. To synchronize after an -+ * -%EINPROGRESS return, see utrace_barrier(). -+ * -+ * If @target is properly stopped before utrace_control() is called, -+ * then after successful return it's guaranteed that no more callbacks -+ * to the @engine->ops vector will be made. -+ * -+ * The only exception is %SIGKILL (and exec or group-exit by another -+ * thread in the group), which can cause asynchronous @report_death -+ * and/or @report_reap callbacks even when %UTRACE_STOP was used. -+ * (In that event, this fails with -%ESRCH or -%EALREADY, see above.) -+ * -+ * UTRACE_STOP: -+ * This asks that @target stop running. This returns 0 only if -+ * @target is already stopped, either for tracing or for job -+ * control. Then @target will remain stopped until another -+ * utrace_control() call is made on @engine; @target can be woken -+ * only by %SIGKILL (or equivalent, such as exec or termination by -+ * another thread in the same thread group). -+ * -+ * This returns -%EINPROGRESS if @target is not already stopped. -+ * Then the effect is like %UTRACE_REPORT. A @report_quiesce or -+ * @report_signal callback will be made soon. Your callback can -+ * then return %UTRACE_STOP to keep @target stopped. -+ * -+ * This does not interrupt system calls in progress, including ones -+ * that sleep for a long time. For that, use %UTRACE_INTERRUPT. -+ * To interrupt system calls and then keep @target stopped, your -+ * @report_signal callback can return %UTRACE_STOP. -+ * -+ * UTRACE_RESUME: -+ * -+ * Just let @target continue running normally, reversing the effect -+ * of a previous %UTRACE_STOP. If another engine is keeping @target -+ * stopped, then it remains stopped until all engines let it resume. -+ * If @target was not stopped, this has no effect. -+ * -+ * UTRACE_REPORT: -+ * -+ * This is like %UTRACE_RESUME, but also ensures that there will be -+ * a @report_quiesce or @report_signal callback made soon. If -+ * @target had been stopped, then there will be a callback before it -+ * resumes running normally. If another engine is keeping @target -+ * stopped, then there might be no callbacks until all engines let -+ * it resume. -+ * -+ * Since this is meaningless unless @report_quiesce callbacks will -+ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). -+ * -+ * UTRACE_INTERRUPT: -+ * -+ * This is like %UTRACE_REPORT, but ensures that @target will make a -+ * @report_signal callback before it resumes or delivers signals. -+ * If @target was in a system call or about to enter one, work in -+ * progress will be interrupted as if by %SIGSTOP. If another -+ * engine is keeping @target stopped, then there might be no -+ * callbacks until all engines let it resume. -+ * -+ * This gives @engine an opportunity to introduce a forced signal -+ * disposition via its @report_signal callback. -+ * -+ * UTRACE_SINGLESTEP: -+ * -+ * It's invalid to use this unless arch_has_single_step() returned true. -+ * This is like %UTRACE_RESUME, but resumes for one user instruction -+ * only. It's invalid to use this in utrace_control() unless @target -+ * had been stopped by @engine previously. -+ * -+ * Note that passing %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP to -+ * utrace_control() or returning it from an event callback alone does -+ * not necessarily ensure that stepping will be enabled. If there are -+ * more callbacks made to any engine before returning to user mode, -+ * then the resume action is chosen only by the last set of callbacks. -+ * To be sure, enable %UTRACE_EVENT(%QUIESCE) and look for the -+ * @report_quiesce callback with a zero event mask, or the -+ * @report_signal callback with %UTRACE_SIGNAL_REPORT. -+ * -+ * Since this is not robust unless @report_quiesce callbacks will -+ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). -+ * -+ * UTRACE_BLOCKSTEP: -+ * -+ * It's invalid to use this unless arch_has_block_step() returned true. -+ * This is like %UTRACE_SINGLESTEP, but resumes for one whole basic -+ * block of user instructions. -+ * -+ * Since this is not robust unless @report_quiesce callbacks will -+ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). -+ * -+ * %UTRACE_BLOCKSTEP devolves to %UTRACE_SINGLESTEP when another -+ * tracing engine is using %UTRACE_SINGLESTEP at the same time. -+ */ -+int utrace_control(struct task_struct *target, -+ struct utrace_engine *engine, -+ enum utrace_resume_action action) -+{ -+ struct utrace *utrace; -+ bool reset; -+ int ret; -+ -+ if (unlikely(action > UTRACE_DETACH)) -+ return -EINVAL; -+ -+ /* -+ * This is a sanity check for a programming error in the caller. -+ * Their request can only work properly in all cases by relying on -+ * a follow-up callback, but they didn't set one up! This check -+ * doesn't do locking, but it shouldn't matter. The caller has to -+ * be synchronously sure the callback is set up to be operating the -+ * interface properly. -+ */ -+ if (action >= UTRACE_REPORT && action < UTRACE_RESUME && -+ unlikely(!(engine->flags & UTRACE_EVENT(QUIESCE)))) -+ return -EINVAL; -+ -+ utrace = get_utrace_lock(target, engine, true); -+ if (unlikely(IS_ERR(utrace))) -+ return PTR_ERR(utrace); -+ -+ reset = utrace->stopped; -+ ret = 0; -+ -+ /* -+ * ->exit_state can change under us, this doesn't matter. -+ * We do not care about ->exit_state in fact, but we do -+ * care about ->reap and ->death. If either flag is set, -+ * we must also see ->exit_state != 0. -+ */ -+ if (unlikely(target->exit_state)) { -+ ret = utrace_control_dead(target, utrace, action); -+ if (ret) { -+ spin_unlock(&utrace->lock); -+ return ret; -+ } -+ reset = true; -+ } -+ -+ switch (action) { -+ case UTRACE_STOP: -+ mark_engine_wants_stop(target, engine); -+ if (!reset && !utrace_do_stop(target, utrace)) -+ ret = -EINPROGRESS; -+ reset = false; -+ break; -+ -+ case UTRACE_DETACH: -+ if (engine_wants_stop(engine)) -+ target->utrace_flags &= ~ENGINE_STOP; -+ mark_engine_detached(engine); -+ reset = reset || utrace_do_stop(target, utrace); -+ if (!reset) { -+ /* -+ * As in utrace_set_events(), this barrier ensures -+ * that our engine->flags changes have hit before we -+ * examine utrace->reporting, pairing with the barrier -+ * in start_callback(). If @target has not yet hit -+ * finish_callback() to clear utrace->reporting, we -+ * might be in the middle of a callback to @engine. -+ */ -+ smp_mb(); -+ if (utrace->reporting == engine) -+ ret = -EINPROGRESS; -+ } -+ break; -+ -+ case UTRACE_RESUME: -+ /* -+ * This and all other cases imply resuming if stopped. -+ * There might not be another report before it just -+ * resumes, so make sure single-step is not left set. -+ */ -+ clear_engine_wants_stop(engine); -+ if (likely(reset)) -+ user_disable_single_step(target); -+ break; -+ -+ case UTRACE_REPORT: -+ /* -+ * Make the thread call tracehook_notify_resume() soon. -+ * But don't bother if it's already been interrupted. -+ * In that case, utrace_get_signal() will be reporting soon. -+ */ -+ clear_engine_wants_stop(engine); -+ if (!utrace->report && !utrace->interrupt) { -+ utrace->report = 1; -+ set_notify_resume(target); -+ } -+ break; -+ -+ case UTRACE_INTERRUPT: -+ /* -+ * Make the thread call tracehook_get_signal() soon. -+ */ -+ clear_engine_wants_stop(engine); -+ if (utrace->interrupt) -+ break; -+ utrace->interrupt = 1; -+ -+ /* -+ * If it's not already stopped, interrupt it now. -+ * We need the siglock here in case it calls -+ * recalc_sigpending() and clears its own -+ * TIF_SIGPENDING. By taking the lock, we've -+ * serialized any later recalc_sigpending() after -+ * our setting of utrace->interrupt to force it on. -+ */ -+ if (reset) { -+ /* -+ * This is really just to keep the invariant -+ * that TIF_SIGPENDING is set with utrace->interrupt. -+ * When it's stopped, we know it's always going -+ * through utrace_get_signal and will recalculate. -+ */ -+ set_tsk_thread_flag(target, TIF_SIGPENDING); -+ } else { -+ struct sighand_struct *sighand; -+ unsigned long irqflags; -+ sighand = lock_task_sighand(target, &irqflags); -+ if (likely(sighand)) { -+ signal_wake_up(target, 0); -+ unlock_task_sighand(target, &irqflags); -+ } -+ } -+ break; -+ -+ case UTRACE_BLOCKSTEP: -+ /* -+ * Resume from stopped, step one block. -+ */ -+ clear_engine_wants_stop(engine); -+ if (unlikely(!arch_has_block_step())) { -+ WARN_ON(1); -+ /* Fall through to treat it as SINGLESTEP. */ -+ } else if (likely(reset)) { -+ user_enable_block_step(target); -+ break; -+ } -+ -+ case UTRACE_SINGLESTEP: -+ /* -+ * Resume from stopped, step one instruction. -+ */ -+ clear_engine_wants_stop(engine); -+ if (unlikely(!arch_has_single_step())) { -+ WARN_ON(1); -+ reset = false; -+ ret = -EOPNOTSUPP; -+ break; -+ } -+ -+ if (likely(reset)) -+ user_enable_single_step(target); -+ else -+ /* -+ * You were supposed to stop it before asking -+ * it to step. -+ */ -+ ret = -EAGAIN; -+ break; -+ } -+ -+ /* -+ * Let the thread resume running. If it's not stopped now, -+ * there is nothing more we need to do. -+ */ -+ if (reset) -+ utrace_reset(target, utrace); -+ else -+ spin_unlock(&utrace->lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(utrace_control); -+ -+/** -+ * utrace_barrier - synchronize with simultaneous tracing callbacks -+ * @target: thread to affect -+ * @engine: engine to affect (can be detached) -+ * -+ * This blocks while @target might be in the midst of making a callback to -+ * @engine. It can be interrupted by signals and will return -%ERESTARTSYS. -+ * A return value of zero means no callback from @target to @engine was -+ * in progress. Any effect of its return value (such as %UTRACE_STOP) has -+ * already been applied to @engine. -+ * -+ * It's not necessary to keep the @target pointer alive for this call. -+ * It's only necessary to hold a ref on @engine. This will return -+ * safely even if @target has been reaped and has no task refs. -+ * -+ * A successful return from utrace_barrier() guarantees its ordering -+ * with respect to utrace_set_events() and utrace_control() calls. If -+ * @target was not properly stopped, event callbacks just disabled might -+ * still be in progress; utrace_barrier() waits until there is no chance -+ * an unwanted callback can be in progress. -+ */ -+int utrace_barrier(struct task_struct *target, struct utrace_engine *engine) -+{ -+ struct utrace *utrace; -+ int ret = -ERESTARTSYS; -+ -+ if (unlikely(target == current)) -+ return 0; -+ -+ do { -+ utrace = get_utrace_lock(target, engine, false); -+ if (unlikely(IS_ERR(utrace))) { -+ ret = PTR_ERR(utrace); -+ if (ret != -ERESTARTSYS) -+ break; -+ } else { -+ /* -+ * All engine state changes are done while -+ * holding the lock, i.e. before we get here. -+ * Since we have the lock, we only need to -+ * worry about @target making a callback. -+ * When it has entered start_callback() but -+ * not yet gotten to finish_callback(), we -+ * will see utrace->reporting == @engine. -+ * When @target doesn't take the lock, it uses -+ * barriers to order setting utrace->reporting -+ * before it examines the engine state. -+ */ -+ if (utrace->reporting != engine) -+ ret = 0; -+ spin_unlock(&utrace->lock); -+ if (!ret) -+ break; -+ } -+ schedule_timeout_interruptible(1); -+ } while (!signal_pending(current)); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(utrace_barrier); -+ -+/* -+ * This is local state used for reporting loops, perhaps optimized away. -+ */ -+struct utrace_report { -+ u32 result; -+ enum utrace_resume_action action; -+ enum utrace_resume_action resume_action; -+ bool detaches; -+ bool takers; -+}; -+ -+#define INIT_REPORT(var) \ -+ struct utrace_report var = { \ -+ .action = UTRACE_RESUME, \ -+ .resume_action = UTRACE_RESUME \ -+ } -+ -+/* -+ * We are now making the report, so clear the flag saying we need one. -+ * When there is a new attach, ->pending_attach is set just so we will -+ * know to do splice_attaching() here before the callback loop. -+ */ -+static void start_report(struct utrace *utrace) -+{ -+ BUG_ON(utrace->stopped); -+ if (utrace->report || utrace->pending_attach) { -+ spin_lock(&utrace->lock); -+ splice_attaching(utrace); -+ utrace->report = 0; -+ spin_unlock(&utrace->lock); -+ } -+} -+ -+static inline void finish_report_reset(struct task_struct *task, -+ struct utrace *utrace, -+ struct utrace_report *report) -+{ -+ if (unlikely(!report->takers || report->detaches)) { -+ spin_lock(&utrace->lock); -+ if (utrace_reset(task, utrace)) -+ report->action = UTRACE_RESUME; -+ } -+} -+ -+/* -+ * Complete a normal reporting pass, pairing with a start_report() call. -+ * This handles any UTRACE_DETACH or UTRACE_REPORT or UTRACE_INTERRUPT -+ * returns from engine callbacks. If any engine's last callback used -+ * UTRACE_STOP, we do UTRACE_REPORT here to ensure we stop before user -+ * mode. If there were no callbacks made, it will recompute -+ * @task->utrace_flags to avoid another false-positive. -+ */ -+static void finish_report(struct utrace_report *report, -+ struct task_struct *task, struct utrace *utrace) -+{ -+ if (report->action <= UTRACE_REPORT && !utrace->interrupt && -+ (report->action == UTRACE_INTERRUPT || !utrace->report)) { -+ spin_lock(&utrace->lock); -+ if (report->action == UTRACE_INTERRUPT) { -+ utrace->interrupt = 1; -+ set_tsk_thread_flag(task, TIF_SIGPENDING); -+ } else { -+ utrace->report = 1; -+ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME); -+ } -+ spin_unlock(&utrace->lock); -+ } -+ -+ finish_report_reset(task, utrace, report); -+} -+ -+static inline void finish_callback_report(struct task_struct *task, -+ struct utrace *utrace, -+ struct utrace_report *report, -+ struct utrace_engine *engine, -+ enum utrace_resume_action action) -+{ -+ /* -+ * If utrace_control() was used, treat that like UTRACE_DETACH here. -+ */ -+ if (action == UTRACE_DETACH || engine->ops == &utrace_detached_ops) { -+ engine->ops = &utrace_detached_ops; -+ report->detaches = true; -+ return; -+ } -+ -+ if (action < report->action) -+ report->action = action; -+ -+ if (action != UTRACE_STOP) { -+ if (action < report->resume_action) -+ report->resume_action = action; -+ -+ if (engine_wants_stop(engine)) { -+ spin_lock(&utrace->lock); -+ clear_engine_wants_stop(engine); -+ spin_unlock(&utrace->lock); -+ } -+ -+ return; -+ } -+ -+ if (!engine_wants_stop(engine)) { -+ spin_lock(&utrace->lock); -+ /* -+ * If utrace_control() came in and detached us -+ * before we got the lock, we must not stop now. -+ */ -+ if (unlikely(engine->ops == &utrace_detached_ops)) -+ report->detaches = true; -+ else -+ mark_engine_wants_stop(task, engine); -+ spin_unlock(&utrace->lock); -+ } -+} -+ -+/* -+ * Apply the return value of one engine callback to @report. -+ * Returns true if @engine detached and should not get any more callbacks. -+ */ -+static bool finish_callback(struct task_struct *task, struct utrace *utrace, -+ struct utrace_report *report, -+ struct utrace_engine *engine, -+ u32 ret) -+{ -+ report->result = ret & ~UTRACE_RESUME_MASK; -+ finish_callback_report(task, utrace, report, engine, -+ utrace_resume_action(ret)); -+ -+ /* -+ * Now that we have applied the effect of the return value, -+ * clear this so that utrace_barrier() can stop waiting. -+ * A subsequent utrace_control() can stop or resume @engine -+ * and know this was ordered after its callback's action. -+ * -+ * We don't need any barriers here because utrace_barrier() -+ * takes utrace->lock. If we touched engine->flags above, -+ * the lock guaranteed this change was before utrace_barrier() -+ * examined utrace->reporting. -+ */ -+ utrace->reporting = NULL; -+ -+ /* -+ * This is a good place to make sure tracing engines don't -+ * introduce too much latency under voluntary preemption. -+ */ -+ if (need_resched()) -+ cond_resched(); -+ -+ return engine->ops == &utrace_detached_ops; -+} -+ -+/* -+ * Start the callbacks for @engine to consider @event (a bit mask). -+ * This makes the report_quiesce() callback first. If @engine wants -+ * a specific callback for @event, we return the ops vector to use. -+ * If not, we return NULL. The return value from the ops->callback -+ * function called should be passed to finish_callback(). -+ */ -+static const struct utrace_engine_ops *start_callback( -+ struct utrace *utrace, struct utrace_report *report, -+ struct utrace_engine *engine, struct task_struct *task, -+ unsigned long event) -+{ -+ const struct utrace_engine_ops *ops; -+ unsigned long want; -+ -+ /* -+ * This barrier ensures that we've set utrace->reporting before -+ * we examine engine->flags or engine->ops. utrace_barrier() -+ * relies on this ordering to indicate that the effect of any -+ * utrace_control() and utrace_set_events() calls is in place -+ * by the time utrace->reporting can be seen to be NULL. -+ */ -+ utrace->reporting = engine; -+ smp_mb(); -+ -+ /* -+ * This pairs with the barrier in mark_engine_detached(). -+ * It makes sure that we never see the old ops vector with -+ * the new flags, in case the original vector had no report_quiesce. -+ */ -+ want = engine->flags; -+ smp_rmb(); -+ ops = engine->ops; -+ -+ if (want & UTRACE_EVENT(QUIESCE)) { -+ if (finish_callback(task, utrace, report, engine, -+ (*ops->report_quiesce)(report->action, -+ engine, task, -+ event))) -+ return NULL; -+ -+ /* -+ * finish_callback() reset utrace->reporting after the -+ * quiesce callback. Now we set it again (as above) -+ * before re-examining engine->flags, which could have -+ * been changed synchronously by ->report_quiesce or -+ * asynchronously by utrace_control() or utrace_set_events(). -+ */ -+ utrace->reporting = engine; -+ smp_mb(); -+ want = engine->flags; -+ } -+ -+ if (want & ENGINE_STOP) -+ report->action = UTRACE_STOP; -+ -+ if (want & event) { -+ report->takers = true; -+ return ops; -+ } -+ -+ utrace->reporting = NULL; -+ return NULL; -+} -+ -+/* -+ * Do a normal reporting pass for engines interested in @event. -+ * @callback is the name of the member in the ops vector, and remaining -+ * args are the extras it takes after the standard three args. -+ */ -+#define REPORT(task, utrace, report, event, callback, ...) \ -+ do { \ -+ start_report(utrace); \ -+ REPORT_CALLBACKS(, task, utrace, report, event, callback, \ -+ (report)->action, engine, current, \ -+ ## __VA_ARGS__); \ -+ finish_report(report, task, utrace); \ -+ } while (0) -+#define REPORT_CALLBACKS(rev, task, utrace, report, event, callback, ...) \ -+ do { \ -+ struct utrace_engine *engine; \ -+ const struct utrace_engine_ops *ops; \ -+ list_for_each_entry##rev(engine, &utrace->attached, entry) { \ -+ ops = start_callback(utrace, report, engine, task, \ -+ event); \ -+ if (!ops) \ -+ continue; \ -+ finish_callback(task, utrace, report, engine, \ -+ (*ops->callback)(__VA_ARGS__)); \ -+ } \ -+ } while (0) -+ -+/* -+ * Called iff UTRACE_EVENT(EXEC) flag is set. -+ */ -+void utrace_report_exec(struct linux_binfmt *fmt, struct linux_binprm *bprm, -+ struct pt_regs *regs) -+{ -+ struct task_struct *task = current; -+ struct utrace *utrace = task_utrace_struct(task); -+ INIT_REPORT(report); -+ -+ REPORT(task, utrace, &report, UTRACE_EVENT(EXEC), -+ report_exec, fmt, bprm, regs); -+} -+ -+/* -+ * Called iff UTRACE_EVENT(SYSCALL_ENTRY) flag is set. -+ * Return true to prevent the system call. -+ */ -+bool utrace_report_syscall_entry(struct pt_regs *regs) -+{ -+ struct task_struct *task = current; -+ struct utrace *utrace = task_utrace_struct(task); -+ INIT_REPORT(report); -+ -+ start_report(utrace); -+ REPORT_CALLBACKS(_reverse, task, utrace, &report, -+ UTRACE_EVENT(SYSCALL_ENTRY), report_syscall_entry, -+ report.result | report.action, engine, current, regs); -+ finish_report(&report, task, utrace); -+ -+ if (report.action == UTRACE_STOP) { -+ utrace_stop(task, utrace, report.resume_action); -+ if (fatal_signal_pending(task)) -+ /* -+ * We are continuing despite UTRACE_STOP because of a -+ * SIGKILL. Don't let the system call actually proceed. -+ */ -+ return true; -+ } -+ -+ return report.result == UTRACE_SYSCALL_ABORT; -+} -+ -+/* -+ * Called iff UTRACE_EVENT(SYSCALL_EXIT) flag is set. -+ */ -+void utrace_report_syscall_exit(struct pt_regs *regs) -+{ -+ struct task_struct *task = current; -+ struct utrace *utrace = task_utrace_struct(task); -+ INIT_REPORT(report); -+ -+ REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT), -+ report_syscall_exit, regs); -+} -+ -+/* -+ * Called iff UTRACE_EVENT(CLONE) flag is set. -+ * This notification call blocks the wake_up_new_task call on the child. -+ * So we must not quiesce here. tracehook_report_clone_complete will do -+ * a quiescence check momentarily. -+ */ -+void utrace_report_clone(unsigned long clone_flags, struct task_struct *child) -+{ -+ struct task_struct *task = current; -+ struct utrace *utrace = task_utrace_struct(task); -+ INIT_REPORT(report); -+ -+ /* -+ * We don't use the REPORT() macro here, because we need -+ * to clear utrace->cloning before finish_report(). -+ * After finish_report(), utrace can be a stale pointer -+ * in cases when report.action is still UTRACE_RESUME. -+ */ -+ start_report(utrace); -+ utrace->cloning = child; -+ -+ REPORT_CALLBACKS(, task, utrace, &report, -+ UTRACE_EVENT(CLONE), report_clone, -+ report.action, engine, task, clone_flags, child); -+ -+ utrace->cloning = NULL; -+ finish_report(&report, task, utrace); -+ -+ /* -+ * For a vfork, we will go into an uninterruptible block waiting -+ * for the child. We need UTRACE_STOP to happen before this, not -+ * after. For CLONE_VFORK, utrace_finish_vfork() will be called. -+ */ -+ if (report.action == UTRACE_STOP && (clone_flags & CLONE_VFORK)) { -+ spin_lock(&utrace->lock); -+ utrace->vfork_stop = 1; -+ spin_unlock(&utrace->lock); -+ } -+} -+ -+/* -+ * We're called after utrace_report_clone() for a CLONE_VFORK. -+ * If UTRACE_STOP was left from the clone report, we stop here. -+ * After this, we'll enter the uninterruptible wait_for_completion() -+ * waiting for the child. -+ */ -+void utrace_finish_vfork(struct task_struct *task) -+{ -+ struct utrace *utrace = task_utrace_struct(task); -+ -+ if (utrace->vfork_stop) { -+ spin_lock(&utrace->lock); -+ utrace->vfork_stop = 0; -+ spin_unlock(&utrace->lock); -+ utrace_stop(task, utrace, UTRACE_RESUME); /* XXX */ -+ } -+} -+ -+/* -+ * Called iff UTRACE_EVENT(JCTL) flag is set. -+ * -+ * Called with siglock held. -+ */ -+void utrace_report_jctl(int notify, int what) -+{ -+ struct task_struct *task = current; -+ struct utrace *utrace = task_utrace_struct(task); -+ INIT_REPORT(report); -+ -+ spin_unlock_irq(&task->sighand->siglock); -+ -+ REPORT(task, utrace, &report, UTRACE_EVENT(JCTL), -+ report_jctl, what, notify); -+ -+ spin_lock_irq(&task->sighand->siglock); -+} -+ -+/* -+ * Called without locks. -+ */ -+void utrace_finish_jctl(void) -+{ -+ struct utrace *utrace = task_utrace_struct(current); -+ /* -+ * While in TASK_STOPPED, we can be considered safely stopped by -+ * utrace_do_stop(). Clear ->stopped if we were woken by SIGKILL. -+ */ -+ if (utrace->stopped) { -+ spin_lock(&utrace->lock); -+ utrace->stopped = false; -+ spin_unlock(&utrace->lock); -+ } -+} -+ -+/* -+ * Called iff UTRACE_EVENT(EXIT) flag is set. -+ */ -+void utrace_report_exit(long *exit_code) -+{ -+ struct task_struct *task = current; -+ struct utrace *utrace = task_utrace_struct(task); -+ INIT_REPORT(report); -+ long orig_code = *exit_code; -+ -+ REPORT(task, utrace, &report, UTRACE_EVENT(EXIT), -+ report_exit, orig_code, exit_code); -+ -+ if (report.action == UTRACE_STOP) -+ utrace_stop(task, utrace, report.resume_action); -+} -+ -+/* -+ * Called iff UTRACE_EVENT(DEATH) or UTRACE_EVENT(QUIESCE) flag is set. -+ * -+ * It is always possible that we are racing with utrace_release_task here. -+ * For this reason, utrace_release_task checks for the event bits that get -+ * us here, and delays its cleanup for us to do. -+ */ -+void utrace_report_death(struct task_struct *task, struct utrace *utrace, -+ bool group_dead, int signal) -+{ -+ INIT_REPORT(report); -+ -+ BUG_ON(!task->exit_state); -+ -+ /* -+ * We are presently considered "quiescent"--which is accurate -+ * inasmuch as we won't run any more user instructions ever again. -+ * But for utrace_control and utrace_set_events to be robust, they -+ * must be sure whether or not we will run any more callbacks. If -+ * a call comes in before we do, taking the lock here synchronizes -+ * us so we don't run any callbacks just disabled. Calls that come -+ * in while we're running the callbacks will see the exit.death -+ * flag and know that we are not yet fully quiescent for purposes -+ * of detach bookkeeping. -+ */ -+ spin_lock(&utrace->lock); -+ BUG_ON(utrace->death); -+ utrace->death = 1; -+ utrace->report = 0; -+ utrace->interrupt = 0; -+ splice_attaching(utrace); -+ spin_unlock(&utrace->lock); -+ -+ REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH), -+ report_death, engine, task, group_dead, signal); -+ -+ spin_lock(&utrace->lock); -+ -+ /* -+ * After we unlock (possibly inside utrace_reap for callbacks) with -+ * this flag clear, competing utrace_control/utrace_set_events calls -+ * know that we've finished our callbacks and any detach bookkeeping. -+ */ -+ utrace->death = 0; -+ -+ if (utrace->reap) -+ /* -+ * utrace_release_task() was already called in parallel. -+ * We must complete its work now. -+ */ -+ utrace_reap(task, utrace); -+ else -+ utrace_reset(task, utrace); -+} -+ -+/* -+ * Finish the last reporting pass before returning to user mode. -+ */ -+static void finish_resume_report(struct utrace_report *report, -+ struct task_struct *task, -+ struct utrace *utrace) -+{ -+ finish_report_reset(task, utrace, report); -+ -+ switch (report->action) { -+ case UTRACE_STOP: -+ utrace_stop(task, utrace, report->resume_action); -+ break; -+ -+ case UTRACE_INTERRUPT: -+ if (!signal_pending(task)) -+ set_tsk_thread_flag(task, TIF_SIGPENDING); -+ break; -+ -+ case UTRACE_BLOCKSTEP: -+ if (likely(arch_has_block_step())) { -+ user_enable_block_step(task); -+ break; -+ } -+ -+ /* -+ * This means some callback is to blame for failing -+ * to check arch_has_block_step() itself. Warn and -+ * then fall through to treat it as SINGLESTEP. -+ */ -+ WARN_ON(1); -+ -+ case UTRACE_SINGLESTEP: -+ if (likely(arch_has_single_step())) -+ user_enable_single_step(task); -+ else -+ /* -+ * This means some callback is to blame for failing -+ * to check arch_has_single_step() itself. Spew -+ * about it so the loser will fix his module. -+ */ -+ WARN_ON(1); -+ break; -+ -+ case UTRACE_REPORT: -+ case UTRACE_RESUME: -+ default: -+ user_disable_single_step(task); -+ break; -+ } -+} -+ -+/* -+ * This is called when TIF_NOTIFY_RESUME had been set (and is now clear). -+ * We are close to user mode, and this is the place to report or stop. -+ * When we return, we're going to user mode or into the signals code. -+ */ -+void utrace_resume(struct task_struct *task, struct pt_regs *regs) -+{ -+ struct utrace *utrace = task_utrace_struct(task); -+ INIT_REPORT(report); -+ struct utrace_engine *engine; -+ -+ /* -+ * Some machines get here with interrupts disabled. The same arch -+ * code path leads to calling into get_signal_to_deliver(), which -+ * implicitly reenables them by virtue of spin_unlock_irq. -+ */ -+ local_irq_enable(); -+ -+ /* -+ * If this flag is still set it's because there was a signal -+ * handler setup done but no report_signal following it. Clear -+ * the flag before we get to user so it doesn't confuse us later. -+ */ -+ if (unlikely(utrace->signal_handler)) { -+ int skip; -+ spin_lock(&utrace->lock); -+ utrace->signal_handler = 0; -+ skip = !utrace->report; -+ spin_unlock(&utrace->lock); -+ if (skip) -+ return; -+ } -+ -+ /* -+ * If UTRACE_INTERRUPT was just used, we don't bother with a report -+ * here. We will report and stop in utrace_get_signal(). In case -+ * of a race with utrace_control(), make sure we don't momentarily -+ * return to user mode because TIF_SIGPENDING was not set yet. -+ */ -+ if (unlikely(utrace->interrupt)) { -+ set_thread_flag(TIF_SIGPENDING); -+ return; -+ } -+ -+ /* -+ * Update our bookkeeping even if there are no callbacks made here. -+ */ -+ start_report(utrace); -+ -+ if (likely(task->utrace_flags & UTRACE_EVENT(QUIESCE))) { -+ /* -+ * Do a simple reporting pass, with no specific -+ * callback after report_quiesce. -+ */ -+ list_for_each_entry(engine, &utrace->attached, entry) -+ start_callback(utrace, &report, engine, task, 0); -+ } -+ -+ /* -+ * Finish the report and either stop or get ready to resume. -+ */ -+ finish_resume_report(&report, task, utrace); -+} -+ -+/* -+ * Return true if current has forced signal_pending(). -+ * -+ * This is called only when current->utrace_flags is nonzero, so we know -+ * that current->utrace must be set. It's not inlined in tracehook.h -+ * just so that struct utrace can stay opaque outside this file. -+ */ -+bool utrace_interrupt_pending(void) -+{ -+ return task_utrace_struct(current)->interrupt; -+} -+ -+/* -+ * Take the siglock and push @info back on our queue. -+ * Returns with @task->sighand->siglock held. -+ */ -+static void push_back_signal(struct task_struct *task, siginfo_t *info) -+ __acquires(task->sighand->siglock) -+{ -+ struct sigqueue *q; -+ -+ if (unlikely(!info->si_signo)) { /* Oh, a wise guy! */ -+ spin_lock_irq(&task->sighand->siglock); -+ return; -+ } -+ -+ q = sigqueue_alloc(); -+ if (likely(q)) { -+ q->flags = 0; -+ copy_siginfo(&q->info, info); -+ } -+ -+ spin_lock_irq(&task->sighand->siglock); -+ -+ sigaddset(&task->pending.signal, info->si_signo); -+ if (likely(q)) -+ list_add(&q->list, &task->pending.list); -+ -+ set_tsk_thread_flag(task, TIF_SIGPENDING); -+} -+ -+/* -+ * This is the hook from the signals code, called with the siglock held. -+ * Here is the ideal place to stop. We also dequeue and intercept signals. -+ */ -+int utrace_get_signal(struct task_struct *task, struct pt_regs *regs, -+ siginfo_t *info, struct k_sigaction *return_ka) -+ __releases(task->sighand->siglock) -+ __acquires(task->sighand->siglock) -+{ -+ struct utrace *utrace; -+ struct k_sigaction *ka; -+ INIT_REPORT(report); -+ struct utrace_engine *engine; -+ const struct utrace_engine_ops *ops; -+ unsigned long event, want; -+ u32 ret; -+ int signr; -+ -+ utrace = &task->utrace; -+ if (utrace->interrupt || utrace->report || utrace->signal_handler) { -+ /* -+ * We've been asked for an explicit report before we -+ * even check for pending signals. -+ */ -+ -+ spin_unlock_irq(&task->sighand->siglock); -+ -+ spin_lock(&utrace->lock); -+ -+ splice_attaching(utrace); -+ -+ if (unlikely(!utrace->interrupt) && unlikely(!utrace->report)) -+ report.result = UTRACE_SIGNAL_IGN; -+ else if (utrace->signal_handler) -+ report.result = UTRACE_SIGNAL_HANDLER; -+ else -+ report.result = UTRACE_SIGNAL_REPORT; -+ -+ /* -+ * We are now making the report and it's on the -+ * interrupt path, so clear the flags asking for those. -+ */ -+ utrace->interrupt = utrace->report = utrace->signal_handler = 0; -+ /* -+ * Make sure signal_pending() only returns true -+ * if there are real signals pending. -+ */ -+ if (signal_pending(task)) { -+ spin_lock_irq(&task->sighand->siglock); -+ recalc_sigpending(); -+ spin_unlock_irq(&task->sighand->siglock); -+ } -+ -+ spin_unlock(&utrace->lock); -+ -+ if (!(task->utrace_flags & UTRACE_EVENT(QUIESCE)) || -+ unlikely(report.result == UTRACE_SIGNAL_IGN)) -+ /* -+ * We only got here to clear utrace->signal_handler. -+ */ -+ return -1; -+ -+ /* -+ * Do a reporting pass for no signal, just for EVENT(QUIESCE). -+ * The engine callbacks can fill in *info and *return_ka. -+ * We'll pass NULL for the @orig_ka argument to indicate -+ * that there was no original signal. -+ */ -+ event = 0; -+ ka = NULL; -+ memset(return_ka, 0, sizeof *return_ka); -+ } else if (!(task->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) || -+ unlikely(task->signal->group_stop_count)) { -+ /* -+ * If no engine is interested in intercepting signals or -+ * we must stop, let the caller just dequeue them normally -+ * or participate in group-stop. -+ */ -+ return 0; -+ } else { -+ /* -+ * Steal the next signal so we can let tracing engines -+ * examine it. From the signal number and sigaction, -+ * determine what normal delivery would do. If no -+ * engine perturbs it, we'll do that by returning the -+ * signal number after setting *return_ka. -+ */ -+ signr = dequeue_signal(task, &task->blocked, info); -+ if (signr == 0) -+ return signr; -+ BUG_ON(signr != info->si_signo); -+ -+ ka = &task->sighand->action[signr - 1]; -+ *return_ka = *ka; -+ -+ /* -+ * We are never allowed to interfere with SIGKILL. -+ * Just punt after filling in *return_ka for our caller. -+ */ -+ if (signr == SIGKILL) -+ return signr; -+ -+ if (ka->sa.sa_handler == SIG_IGN) { -+ event = UTRACE_EVENT(SIGNAL_IGN); -+ report.result = UTRACE_SIGNAL_IGN; -+ } else if (ka->sa.sa_handler != SIG_DFL) { -+ event = UTRACE_EVENT(SIGNAL); -+ report.result = UTRACE_SIGNAL_DELIVER; -+ } else if (sig_kernel_coredump(signr)) { -+ event = UTRACE_EVENT(SIGNAL_CORE); -+ report.result = UTRACE_SIGNAL_CORE; -+ } else if (sig_kernel_ignore(signr)) { -+ event = UTRACE_EVENT(SIGNAL_IGN); -+ report.result = UTRACE_SIGNAL_IGN; -+ } else if (signr == SIGSTOP) { -+ event = UTRACE_EVENT(SIGNAL_STOP); -+ report.result = UTRACE_SIGNAL_STOP; -+ } else if (sig_kernel_stop(signr)) { -+ event = UTRACE_EVENT(SIGNAL_STOP); -+ report.result = UTRACE_SIGNAL_TSTP; -+ } else { -+ event = UTRACE_EVENT(SIGNAL_TERM); -+ report.result = UTRACE_SIGNAL_TERM; -+ } -+ -+ /* -+ * Now that we know what event type this signal is, we -+ * can short-circuit if no engines care about those. -+ */ -+ if ((task->utrace_flags & (event | UTRACE_EVENT(QUIESCE))) == 0) -+ return signr; -+ -+ /* -+ * We have some interested engines, so tell them about -+ * the signal and let them change its disposition. -+ */ -+ spin_unlock_irq(&task->sighand->siglock); -+ } -+ -+ /* -+ * This reporting pass chooses what signal disposition we'll act on. -+ */ -+ list_for_each_entry(engine, &utrace->attached, entry) { -+ /* -+ * See start_callback() comment about this barrier. -+ */ -+ utrace->reporting = engine; -+ smp_mb(); -+ -+ /* -+ * This pairs with the barrier in mark_engine_detached(), -+ * see start_callback() comments. -+ */ -+ want = engine->flags; -+ smp_rmb(); -+ ops = engine->ops; -+ -+ if ((want & (event | UTRACE_EVENT(QUIESCE))) == 0) { -+ utrace->reporting = NULL; -+ continue; -+ } -+ -+ if (ops->report_signal) -+ ret = (*ops->report_signal)( -+ report.result | report.action, engine, task, -+ regs, info, ka, return_ka); -+ else -+ ret = (report.result | (*ops->report_quiesce)( -+ report.action, engine, task, event)); -+ -+ /* -+ * Avoid a tight loop reporting again and again if some -+ * engine is too stupid. -+ */ -+ switch (utrace_resume_action(ret)) { -+ default: -+ break; -+ case UTRACE_INTERRUPT: -+ case UTRACE_REPORT: -+ ret = (ret & ~UTRACE_RESUME_MASK) | UTRACE_RESUME; -+ break; -+ } -+ -+ finish_callback(task, utrace, &report, engine, ret); -+ } -+ -+ /* -+ * We express the chosen action to the signals code in terms -+ * of a representative signal whose default action does it. -+ * Our caller uses our return value (signr) to decide what to -+ * do, but uses info->si_signo as the signal number to report. -+ */ -+ switch (utrace_signal_action(report.result)) { -+ case UTRACE_SIGNAL_TERM: -+ signr = SIGTERM; -+ break; -+ -+ case UTRACE_SIGNAL_CORE: -+ signr = SIGQUIT; -+ break; -+ -+ case UTRACE_SIGNAL_STOP: -+ signr = SIGSTOP; -+ break; -+ -+ case UTRACE_SIGNAL_TSTP: -+ signr = SIGTSTP; -+ break; -+ -+ case UTRACE_SIGNAL_DELIVER: -+ signr = info->si_signo; -+ -+ if (return_ka->sa.sa_handler == SIG_DFL) { -+ /* -+ * We'll do signr's normal default action. -+ * For ignore, we'll fall through below. -+ * For stop/death, break locks and returns it. -+ */ -+ if (likely(signr) && !sig_kernel_ignore(signr)) -+ break; -+ } else if (return_ka->sa.sa_handler != SIG_IGN && -+ likely(signr)) { -+ /* -+ * Complete the bookkeeping after the report. -+ * The handler will run. If an engine wanted to -+ * stop or step, then make sure we do another -+ * report after signal handler setup. -+ */ -+ if (report.action != UTRACE_RESUME) -+ report.action = UTRACE_INTERRUPT; -+ finish_report(&report, task, utrace); -+ -+ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) -+ push_back_signal(task, info); -+ else -+ spin_lock_irq(&task->sighand->siglock); -+ -+ /* -+ * We do the SA_ONESHOT work here since the -+ * normal path will only touch *return_ka now. -+ */ -+ if (unlikely(return_ka->sa.sa_flags & SA_ONESHOT)) { -+ return_ka->sa.sa_flags &= ~SA_ONESHOT; -+ if (likely(valid_signal(signr))) { -+ ka = &task->sighand->action[signr - 1]; -+ ka->sa.sa_handler = SIG_DFL; -+ } -+ } -+ -+ return signr; -+ } -+ -+ /* Fall through for an ignored signal. */ -+ -+ case UTRACE_SIGNAL_IGN: -+ case UTRACE_SIGNAL_REPORT: -+ default: -+ /* -+ * If the signal is being ignored, then we are on the way -+ * directly back to user mode. We can stop here, or step, -+ * as in utrace_resume(), above. After we've dealt with that, -+ * our caller will relock and come back through here. -+ */ -+ finish_resume_report(&report, task, utrace); -+ -+ if (unlikely(fatal_signal_pending(task))) { -+ /* -+ * The only reason we woke up now was because of a -+ * SIGKILL. Don't do normal dequeuing in case it -+ * might get a signal other than SIGKILL. That would -+ * perturb the death state so it might differ from -+ * what the debugger would have allowed to happen. -+ * Instead, pluck out just the SIGKILL to be sure -+ * we'll die immediately with nothing else different -+ * from the quiescent state the debugger wanted us in. -+ */ -+ sigset_t sigkill_only; -+ siginitsetinv(&sigkill_only, sigmask(SIGKILL)); -+ spin_lock_irq(&task->sighand->siglock); -+ signr = dequeue_signal(task, &sigkill_only, info); -+ BUG_ON(signr != SIGKILL); -+ *return_ka = task->sighand->action[SIGKILL - 1]; -+ return signr; -+ } -+ -+ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) { -+ push_back_signal(task, info); -+ spin_unlock_irq(&task->sighand->siglock); -+ } -+ -+ return -1; -+ } -+ -+ /* -+ * Complete the bookkeeping after the report. -+ * This sets utrace->report if UTRACE_STOP was used. -+ */ -+ finish_report(&report, task, utrace); -+ -+ return_ka->sa.sa_handler = SIG_DFL; -+ -+ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) -+ push_back_signal(task, info); -+ else -+ spin_lock_irq(&task->sighand->siglock); -+ -+ if (sig_kernel_stop(signr)) -+ task->signal->flags |= SIGNAL_STOP_DEQUEUED; -+ -+ return signr; -+} -+ -+/* -+ * This gets called after a signal handler has been set up. -+ * We set a flag so the next report knows it happened. -+ * If we're already stepping, make sure we do a report_signal. -+ * If not, make sure we get into utrace_resume() where we can -+ * clear the signal_handler flag before resuming. -+ */ -+void utrace_signal_handler(struct task_struct *task, int stepping) -+{ -+ struct utrace *utrace = task_utrace_struct(task); -+ -+ spin_lock(&utrace->lock); -+ -+ utrace->signal_handler = 1; -+ if (stepping) { -+ utrace->interrupt = 1; -+ set_tsk_thread_flag(task, TIF_SIGPENDING); -+ } else { -+ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME); -+ } -+ -+ spin_unlock(&utrace->lock); -+} -+ -+/** -+ * utrace_prepare_examine - prepare to examine thread state -+ * @target: thread of interest, a &struct task_struct pointer -+ * @engine: engine pointer returned by utrace_attach_task() -+ * @exam: temporary state, a &struct utrace_examiner pointer -+ * -+ * This call prepares to safely examine the thread @target using -+ * &struct user_regset calls, or direct access to thread-synchronous fields. -+ * -+ * When @target is current, this call is superfluous. When @target is -+ * another thread, it must held stopped via %UTRACE_STOP by @engine. -+ * -+ * This call may block the caller until @target stays stopped, so it must -+ * be called only after the caller is sure @target is about to unschedule. -+ * This means a zero return from a utrace_control() call on @engine giving -+ * %UTRACE_STOP, or a report_quiesce() or report_signal() callback to -+ * @engine that used %UTRACE_STOP in its return value. -+ * -+ * Returns -%ESRCH if @target is dead or -%EINVAL if %UTRACE_STOP was -+ * not used. If @target has started running again despite %UTRACE_STOP -+ * (for %SIGKILL or a spurious wakeup), this call returns -%EAGAIN. -+ * -+ * When this call returns zero, it's safe to use &struct user_regset -+ * calls and task_user_regset_view() on @target and to examine some of -+ * its fields directly. When the examination is complete, a -+ * utrace_finish_examine() call must follow to check whether it was -+ * completed safely. -+ */ -+int utrace_prepare_examine(struct task_struct *target, -+ struct utrace_engine *engine, -+ struct utrace_examiner *exam) -+{ -+ int ret = 0; -+ -+ if (unlikely(target == current)) -+ return 0; -+ -+ rcu_read_lock(); -+ if (unlikely(!engine_wants_stop(engine))) -+ ret = -EINVAL; -+ else if (unlikely(target->exit_state)) -+ ret = -ESRCH; -+ else { -+ exam->state = target->state; -+ if (unlikely(exam->state == TASK_RUNNING)) -+ ret = -EAGAIN; -+ else -+ get_task_struct(target); -+ } -+ rcu_read_unlock(); -+ -+ if (likely(!ret)) { -+ exam->ncsw = wait_task_inactive(target, exam->state); -+ put_task_struct(target); -+ if (unlikely(!exam->ncsw)) -+ ret = -EAGAIN; -+ } -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(utrace_prepare_examine); -+ -+/** -+ * utrace_finish_examine - complete an examination of thread state -+ * @target: thread of interest, a &struct task_struct pointer -+ * @engine: engine pointer returned by utrace_attach_task() -+ * @exam: pointer passed to utrace_prepare_examine() call -+ * -+ * This call completes an examination on the thread @target begun by a -+ * paired utrace_prepare_examine() call with the same arguments that -+ * returned success (zero). -+ * -+ * When @target is current, this call is superfluous. When @target is -+ * another thread, this returns zero if @target has remained unscheduled -+ * since the paired utrace_prepare_examine() call returned zero. -+ * -+ * When this returns an error, any examination done since the paired -+ * utrace_prepare_examine() call is unreliable and the data extracted -+ * should be discarded. The error is -%EINVAL if @engine is not -+ * keeping @target stopped, or -%EAGAIN if @target woke up unexpectedly. -+ */ -+int utrace_finish_examine(struct task_struct *target, -+ struct utrace_engine *engine, -+ struct utrace_examiner *exam) -+{ -+ int ret = 0; -+ -+ if (unlikely(target == current)) -+ return 0; -+ -+ rcu_read_lock(); -+ if (unlikely(!engine_wants_stop(engine))) -+ ret = -EINVAL; -+ else if (unlikely(target->state != exam->state)) -+ ret = -EAGAIN; -+ else -+ get_task_struct(target); -+ rcu_read_unlock(); -+ -+ if (likely(!ret)) { -+ unsigned long ncsw = wait_task_inactive(target, exam->state); -+ if (unlikely(ncsw != exam->ncsw)) -+ ret = -EAGAIN; -+ put_task_struct(target); -+ } -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(utrace_finish_examine); -+ -+/* -+ * This is declared in linux/regset.h and defined in machine-dependent -+ * code. We put the export here to ensure no machine forgets it. -+ */ -+EXPORT_SYMBOL_GPL(task_user_regset_view); -+ -+/* -+ * Called with rcu_read_lock() held. -+ */ -+void task_utrace_proc_status(struct seq_file *m, struct task_struct *p) -+{ -+ struct utrace *utrace = &p->utrace; -+ seq_printf(m, "Utrace:\t%lx%s%s%s\n", -+ p->utrace_flags, -+ utrace->stopped ? " (stopped)" : "", -+ utrace->report ? " (report)" : "", -+ utrace->interrupt ? " (interrupt)" : ""); -+} diff --git a/patches.xen/add-console-use-vt b/patches.xen/add-console-use-vt deleted file mode 100644 index aeba6c6..0000000 --- a/patches.xen/add-console-use-vt +++ /dev/null @@ -1,46 +0,0 @@ -Subject: add console_use_vt -From: kraxel@suse.de -Patch-mainline: no - -$subject says all - ---- head-2011-02-08.orig/drivers/tty/tty_io.c 2011-02-08 09:51:53.000000000 +0100 -+++ head-2011-02-08/drivers/tty/tty_io.c 2011-01-31 14:30:58.000000000 +0100 -@@ -140,6 +140,8 @@ EXPORT_SYMBOL(tty_mutex); - /* Spinlock to protect the tty->tty_files list */ - DEFINE_SPINLOCK(tty_files_lock); - -+int console_use_vt = 1; -+ - static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); - static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); - ssize_t redirected_tty_write(struct file *, const char __user *, -@@ -1834,7 +1836,7 @@ retry_open: - goto got_driver; - } - #ifdef CONFIG_VT -- if (device == MKDEV(TTY_MAJOR, 0)) { -+ if (console_use_vt && device == MKDEV(TTY_MAJOR, 0)) { - extern struct tty_driver *console_driver; - driver = tty_driver_kref_get(console_driver); - index = fg_console; -@@ -3309,7 +3311,8 @@ int __init tty_init(void) - WARN_ON(device_create_file(consdev, &dev_attr_active) < 0); - - #ifdef CONFIG_VT -- vty_init(&console_fops); -+ if (console_use_vt) -+ vty_init(&console_fops); - #endif - return 0; - } ---- head-2011-02-08.orig/include/linux/console.h 2011-02-08 09:51:53.000000000 +0100 -+++ head-2011-02-08/include/linux/console.h 2011-01-31 14:30:58.000000000 +0100 -@@ -73,6 +73,7 @@ extern const struct consw dummy_con; /* - extern const struct consw vga_con; /* VGA text console */ - extern const struct consw newport_con; /* SGI Newport console */ - extern const struct consw prom_con; /* SPARC PROM console */ -+extern int console_use_vt; - - int con_is_bound(const struct consw *csw); - int register_con_driver(const struct consw *csw, int first, int last); diff --git a/patches.xen/ipv6-no-autoconf b/patches.xen/ipv6-no-autoconf deleted file mode 100644 index b966811..0000000 --- a/patches.xen/ipv6-no-autoconf +++ /dev/null @@ -1,35 +0,0 @@ -From: Olaf Kirch -Subject: Allow to bring up network interface w/o ipv6 autoconf -References: bnc#161888 -Patch-mainline: no - -When bringing up a xen bridge device, it will always be configured to -use a MAC address of ff:ff:ff:ff:ff:fe. This greatly confuses IPv6 DAD, -which starts logging lots and lots of useless messages to syslog. - -We really want to disable IPv6 on these interfaces, and there doesn't -seem to be a reliable way to do this without bringing the interface -up first (and triggering IPv6 autoconf). - -This patch makes autoconf (DAD and router discovery) depend on the -interface's ability to do multicast. Turning off multicast for an -interface before bringing it up will suppress autoconfiguration. - ---- head-2011-02-08.orig/net/ipv6/addrconf.c 2011-02-08 09:51:53.000000000 +0100 -+++ head-2011-02-08/net/ipv6/addrconf.c 2011-02-08 10:00:00.000000000 +0100 -@@ -2848,6 +2848,7 @@ static void addrconf_dad_start(struct in - goto out; - - if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || -+ !(dev->flags&IFF_MULTICAST) || - idev->cnf.accept_dad < 1 || - !(ifp->flags&IFA_F_TENTATIVE) || - ifp->flags & IFA_F_NODAD) { -@@ -2951,6 +2952,7 @@ static void addrconf_dad_completed(struc - ifp->idev->cnf.forwarding == 2) && - ifp->idev->cnf.rtr_solicits > 0 && - (dev->flags&IFF_LOOPBACK) == 0 && -+ (dev->flags&IFF_MULTICAST) && - (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { - /* - * If a host as already performed a random delay diff --git a/patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-i386.patch b/patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-i386.patch deleted file mode 100644 index 7991481..0000000 --- a/patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-i386.patch +++ /dev/null @@ -1,155 +0,0 @@ -Subject: kexec: Move asm segment handling code to the assembly file (i386) -From: http://xenbits.xensource.com/xen-unstable.hg (tip 13816) -Patch-mainline: n/a - -This patch moves the idt, gdt, and segment handling code from machine_kexec.c -to relocate_kernel.S. The main reason behind this move is to avoid code -duplication in the Xen hypervisor. With this patch all code required to kexec -is put on the control page. - -On top of that this patch also counts as a cleanup - I think it is much -nicer to write assembly directly in assembly files than wrap inline assembly -in C functions for no apparent reason. - -Signed-off-by: Magnus Damm -Acked-by: jbeulich@novell.com - - Applies to 2.6.19-rc1. - jb: fixed up register usage (paralleling what's needed for 2.6.30 on x86-64) - ---- head-2010-01-19.orig/arch/x86/kernel/machine_kexec_32.c 2010-01-19 13:26:10.000000000 +0100 -+++ head-2010-01-19/arch/x86/kernel/machine_kexec_32.c 2010-01-19 14:51:07.000000000 +0100 -@@ -27,48 +27,6 @@ - #include - #include - --static void set_idt(void *newidt, __u16 limit) --{ -- struct desc_ptr curidt; -- -- /* ia32 supports unaliged loads & stores */ -- curidt.size = limit; -- curidt.address = (unsigned long)newidt; -- -- load_idt(&curidt); --} -- -- --static void set_gdt(void *newgdt, __u16 limit) --{ -- struct desc_ptr curgdt; -- -- /* ia32 supports unaligned loads & stores */ -- curgdt.size = limit; -- curgdt.address = (unsigned long)newgdt; -- -- load_gdt(&curgdt); --} -- --static void load_segments(void) --{ --#define __STR(X) #X --#define STR(X) __STR(X) -- -- __asm__ __volatile__ ( -- "\tljmp $"STR(__KERNEL_CS)",$1f\n" -- "\t1:\n" -- "\tmovl $"STR(__KERNEL_DS)",%%eax\n" -- "\tmovl %%eax,%%ds\n" -- "\tmovl %%eax,%%es\n" -- "\tmovl %%eax,%%fs\n" -- "\tmovl %%eax,%%gs\n" -- "\tmovl %%eax,%%ss\n" -- : : : "eax", "memory"); --#undef STR --#undef __STR --} -- - static void machine_kexec_free_page_tables(struct kimage *image) - { - free_page((unsigned long)image->arch.pgd); -@@ -228,24 +186,6 @@ void machine_kexec(struct kimage *image) - page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) - << PAGE_SHIFT); - -- /* -- * The segment registers are funny things, they have both a -- * visible and an invisible part. Whenever the visible part is -- * set to a specific selector, the invisible part is loaded -- * with from a table in memory. At no other time is the -- * descriptor table in memory accessed. -- * -- * I take advantage of this here by force loading the -- * segments, before I zap the gdt with an invalid value. -- */ -- load_segments(); -- /* -- * The gdt & idt are now invalid. -- * If you want to load them you must set up your own idt & gdt. -- */ -- set_gdt(phys_to_virt(0), 0); -- set_idt(phys_to_virt(0), 0); -- - /* now call it */ - image->start = relocate_kernel_ptr((unsigned long)image->head, - (unsigned long)page_list, ---- head-2010-01-19.orig/arch/x86/kernel/relocate_kernel_32.S 2009-06-10 05:05:27.000000000 +0200 -+++ head-2010-01-19/arch/x86/kernel/relocate_kernel_32.S 2010-01-19 14:51:07.000000000 +0100 -@@ -87,14 +87,32 @@ relocate_kernel: - movl PTR(PA_PGD)(%ebp), %eax - movl %eax, %cr3 - -+ /* setup idt */ -+ lidtl idt_48 - relocate_kernel(%edi) -+ -+ /* setup gdt */ -+ leal gdt - relocate_kernel(%edi), %eax -+ movl %eax, (gdt_48 - relocate_kernel) + 2(%edi) -+ lgdtl gdt_48 - relocate_kernel(%edi) -+ -+ /* setup data segment registers */ -+ mov $(gdt_ds - gdt), %eax -+ mov %eax, %ds -+ mov %eax, %es -+ mov %eax, %fs -+ mov %eax, %gs -+ mov %eax, %ss -+ - /* setup a new stack at the end of the physical control page */ - lea PAGE_SIZE(%edi), %esp - -- /* jump to identity mapped page */ -+ /* load new code segment and jump to identity mapped page */ -+ pushl $0 -+ pushl $(gdt_cs - gdt) - movl %edi, %eax - addl $(identity_mapped - relocate_kernel), %eax - pushl %eax -- ret -+ iretl - - identity_mapped: - /* store the start address on the stack */ -@@ -271,5 +289,22 @@ swap_pages: - popl %ebp - ret - -+ .align 16 -+gdt: -+ .quad 0x0000000000000000 /* NULL descriptor */ -+gdt_cs: -+ .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ -+gdt_ds: -+ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ -+gdt_end: -+ -+gdt_48: -+ .word gdt_end - gdt - 1 /* limit */ -+ .long 0 /* base - filled in by code above */ -+ -+idt_48: -+ .word 0 /* limit */ -+ .long 0 /* base */ -+ - .globl kexec_control_code_size - .set kexec_control_code_size, . - relocate_kernel diff --git a/patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch b/patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch deleted file mode 100644 index bbd3780..0000000 --- a/patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch +++ /dev/null @@ -1,150 +0,0 @@ -Subject: kexec: Move asm segment handling code to the assembly file (x86_64) -From: http://xenbits.xensource.com/xen-unstable.hg (tip 13816) -Patch-mainline: n/a - -This patch moves the idt, gdt, and segment handling code from machine_kexec.c -to relocate_kernel.S. The main reason behind this move is to avoid code -duplication in the Xen hypervisor. With this patch all code required to kexec -is put on the control page. - -On top of that this patch also counts as a cleanup - I think it is much -nicer to write assembly directly in assembly files than wrap inline assembly -in C functions for no apparent reason. - -Signed-off-by: Magnus Damm -Acked-by: jbeulich@novell.com - - Applies to 2.6.19-rc1. - jb: fixed up register usage for 2.6.30 (bnc#545206) - ---- head-2010-04-15.orig/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:37:47.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:38:56.000000000 +0200 -@@ -203,47 +203,6 @@ static int init_pgtable(struct kimage *i - return init_transition_pgtable(image, level4p); - } - --static void set_idt(void *newidt, u16 limit) --{ -- struct desc_ptr curidt; -- -- /* x86-64 supports unaliged loads & stores */ -- curidt.size = limit; -- curidt.address = (unsigned long)newidt; -- -- __asm__ __volatile__ ( -- "lidtq %0\n" -- : : "m" (curidt) -- ); --}; -- -- --static void set_gdt(void *newgdt, u16 limit) --{ -- struct desc_ptr curgdt; -- -- /* x86-64 supports unaligned loads & stores */ -- curgdt.size = limit; -- curgdt.address = (unsigned long)newgdt; -- -- __asm__ __volatile__ ( -- "lgdtq %0\n" -- : : "m" (curgdt) -- ); --}; -- --static void load_segments(void) --{ -- __asm__ __volatile__ ( -- "\tmovl %0,%%ds\n" -- "\tmovl %0,%%es\n" -- "\tmovl %0,%%ss\n" -- "\tmovl %0,%%fs\n" -- "\tmovl %0,%%gs\n" -- : : "a" (__KERNEL_DS) : "memory" -- ); --} -- - int machine_kexec_prepare(struct kimage *image) - { - unsigned long start_pgtable; -@@ -311,24 +270,6 @@ void machine_kexec(struct kimage *image) - page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) - << PAGE_SHIFT); - -- /* -- * The segment registers are funny things, they have both a -- * visible and an invisible part. Whenever the visible part is -- * set to a specific selector, the invisible part is loaded -- * with from a table in memory. At no other time is the -- * descriptor table in memory accessed. -- * -- * I take advantage of this here by force loading the -- * segments, before I zap the gdt with an invalid value. -- */ -- load_segments(); -- /* -- * The gdt & idt are now invalid. -- * If you want to load them you must set up your own idt & gdt. -- */ -- set_gdt(phys_to_virt(0), 0); -- set_idt(phys_to_virt(0), 0); -- - /* now call it */ - image->start = relocate_kernel((unsigned long)image->head, - (unsigned long)page_list, ---- head-2010-04-15.orig/arch/x86/kernel/relocate_kernel_64.S 2010-04-15 09:37:47.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/relocate_kernel_64.S 2010-01-19 14:51:10.000000000 +0100 -@@ -91,13 +91,30 @@ relocate_kernel: - /* Switch to the identity mapped page tables */ - movq %r9, %cr3 - -+ /* setup idt */ -+ lidtq idt_80 - relocate_kernel(%r8) -+ -+ /* setup gdt */ -+ leaq gdt - relocate_kernel(%r8), %rax -+ movq %rax, (gdt_80 - relocate_kernel) + 2(%r8) -+ lgdtq gdt_80 - relocate_kernel(%r8) -+ -+ /* setup data segment registers */ -+ xorl %eax, %eax -+ movl %eax, %ds -+ movl %eax, %es -+ movl %eax, %fs -+ movl %eax, %gs -+ movl %eax, %ss -+ - /* setup a new stack at the end of the physical control page */ - lea PAGE_SIZE(%r8), %rsp - -- /* jump to identity mapped page */ -+ /* load new code segment and jump to identity mapped page */ - addq $(identity_mapped - relocate_kernel), %r8 -+ pushq $(gdt_cs - gdt) - pushq %r8 -- ret -+ lretq - - identity_mapped: - /* store the start address on the stack */ -@@ -262,5 +279,20 @@ swap_pages: - 3: - ret - -+ .align 16 -+gdt: -+ .quad 0x0000000000000000 /* NULL descriptor */ -+gdt_cs: -+ .quad 0x00af9a000000ffff -+gdt_end: -+ -+gdt_80: -+ .word gdt_end - gdt - 1 /* limit */ -+ .quad 0 /* base - filled in by code above */ -+ -+idt_80: -+ .word 0 /* limit */ -+ .quad 0 /* base */ -+ - .globl kexec_control_code_size - .set kexec_control_code_size, . - relocate_kernel diff --git a/patches.xen/pci-guestdev b/patches.xen/pci-guestdev deleted file mode 100644 index 5b75dce..0000000 --- a/patches.xen/pci-guestdev +++ /dev/null @@ -1,2696 +0,0 @@ -Subject: xen/dom0: Reserve devices for guest use -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1023:85ca9742b8b9) -Patch-mainline: n/a - -jb: Added support for reassign_resources=all (bnc#574224). -jb: Used kzalloc() instead of all kmalloc()+memset() pairs. -jb: Added support for guestiomuldev=all. -jb: split /dev/xen/pci_iomul driver to be separate (so it can be a module) -Acked-by: jbeulich@novell.com - ---- head-2011-03-11.orig/Documentation/kernel-parameters.txt 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/Documentation/kernel-parameters.txt 2011-03-11 10:49:08.000000000 +0100 -@@ -815,6 +815,24 @@ bytes respectively. Such letter suffixes - gpt [EFI] Forces disk with valid GPT signature but - invalid Protective MBR to be treated as GPT. - -+ guestdev= [PCI,ACPI,XEN] -+ Format: {|}][,{|}[,...]] -+ Format of device path: [:]-.[-.[,...]][+iomul] -+ Format of sbdf: [:]:.[+iomul] -+ Specifies PCI device for guest domain. -+ If PCI-PCI bridge is specified, all PCI devices -+ behind PCI-PCI bridge are reserved. -+ +iomul means that this PCI function will share -+ IO ports with other +iomul functions under same -+ switch. NOTE: if +iomul is specfied, all the functions -+ of the device will share IO ports. -+ -+ guestiomuldev= [PCI,ACPI,XEN] -+ Format: [sbd][,][,...] -+ Format of sbdf: [:]: -+ Note: function shouldn't be specified. -+ Specifies PCI device for IO port multiplexing driver. -+ - hashdist= [KNL,NUMA] Large hashes allocated during boot - are distributed across NUMA nodes. Defaults on - for 64bit NUMA, off otherwise. -@@ -2162,6 +2180,10 @@ bytes respectively. Such letter suffixes - Run specified binary instead of /init from the ramdisk, - used for early userspace startup. See initrd. - -+ reassign_resources [PCI,ACPI,XEN] -+ Use guestdev= parameter to reassign device's -+ resources, or specify =all here. -+ - reboot= [BUGS=X86-32,BUGS=ARM,BUGS=IA-64] Rebooting mode - Format: [,[,...]] - See arch/*/kernel/reboot.c or arch/*/kernel/process.c ---- head-2011-03-11.orig/drivers/acpi/pci_root.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/pci_root.c 2011-01-31 14:31:27.000000000 +0100 -@@ -448,6 +448,41 @@ out: - } - EXPORT_SYMBOL(acpi_pci_osc_control_set); - -+#ifdef CONFIG_PCI_GUESTDEV -+#include -+ -+static ssize_t seg_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct list_head *entry; -+ -+ list_for_each(entry, &acpi_pci_roots) { -+ struct acpi_pci_root *root; -+ root = list_entry(entry, struct acpi_pci_root, node); -+ if (&root->device->dev == dev) -+ return sprintf(buf, "%04x\n", root->segment); -+ } -+ return 0; -+} -+static DEVICE_ATTR(seg, 0444, seg_show, NULL); -+ -+static ssize_t bbn_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct list_head *entry; -+ -+ list_for_each(entry, &acpi_pci_roots) { -+ struct acpi_pci_root *root; -+ root = list_entry(entry, struct acpi_pci_root, node); -+ if (&root->device->dev == dev) -+ return sprintf(buf, "%02x\n", -+ (unsigned int)root->secondary.start); -+ } -+ return 0; -+} -+static DEVICE_ATTR(bbn, 0444, bbn_show, NULL); -+#endif -+ - static int __devinit acpi_pci_root_add(struct acpi_device *device) - { - unsigned long long segment, bus; -@@ -599,6 +634,13 @@ static int __devinit acpi_pci_root_add(s - "ACPI _OSC request failed (code %d)\n", status); - } - -+#ifdef CONFIG_PCI_GUESTDEV -+ if (device_create_file(&device->dev, &dev_attr_seg)) -+ dev_warn(&device->dev, "could not create seg attr\n"); -+ if (device_create_file(&device->dev, &dev_attr_bbn)) -+ dev_warn(&device->dev, "could not create bbn attr\n"); -+#endif -+ - pci_acpi_add_bus_pm_notifier(device, root->bus); - if (device->wakeup.flags.run_wake) - device_set_run_wake(root->bus->bridge, true); -@@ -646,3 +688,31 @@ static int __init acpi_pci_root_init(voi - } - - subsys_initcall(acpi_pci_root_init); -+ -+#ifdef CONFIG_PCI_GUESTDEV -+int acpi_pci_get_root_seg_bbn(char *hid, char *uid, int *seg, int *bbn) -+{ -+ struct list_head *entry; -+ -+ list_for_each(entry, &acpi_pci_roots) { -+ struct acpi_pci_root *root; -+ -+ root = list_entry(entry, struct acpi_pci_root, node); -+ if (strcmp(acpi_device_hid(root->device), hid)) -+ continue; -+ -+ if (!root->device->pnp.unique_id) { -+ if (strlen(uid)) -+ continue; -+ } else { -+ if (strcmp(root->device->pnp.unique_id, uid)) -+ continue; -+ } -+ -+ *seg = (int)root->segment; -+ *bbn = (int)root->secondary.start; -+ return TRUE; -+ } -+ return FALSE; -+} -+#endif ---- head-2011-03-11.orig/drivers/acpi/scan.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/scan.c 2011-01-31 14:31:27.000000000 +0100 -@@ -175,6 +175,16 @@ acpi_device_hid_show(struct device *dev, - } - static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); - -+#ifdef CONFIG_PCI_GUESTDEV -+static ssize_t -+acpi_device_uid_show(struct device *dev, struct device_attribute *attr, char *buf) { -+ struct acpi_device *acpi_dev = to_acpi_device(dev); -+ -+ return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); -+} -+static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); -+#endif -+ - static ssize_t - acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct acpi_device *acpi_dev = to_acpi_device(dev); -@@ -217,6 +227,13 @@ static int acpi_device_setup_files(struc - goto end; - } - -+#ifdef CONFIG_PCI_GUESTDEV -+ if(dev->pnp.unique_id) { -+ result = device_create_file(&dev->dev, &dev_attr_uid); -+ if(result) -+ goto end; -+ } -+#endif - /* - * If device has _EJ0, 'eject' file is created that is used to trigger - * hot-removal function from userland. -@@ -280,6 +297,9 @@ static void acpi_free_ids(struct acpi_de - kfree(id->id); - kfree(id); - } -+#ifdef CONFIG_PCI_GUESTDEV -+ kfree(device->pnp.unique_id); -+#endif - } - - static void acpi_device_release(struct device *dev) -@@ -1131,6 +1151,11 @@ static void acpi_device_set_id(struct ac - for (i = 0; i < cid_list->count; i++) - acpi_add_id(device, cid_list->ids[i].string); - } -+#ifdef CONFIG_PCI_GUESTDEV -+ if (info->valid & ACPI_VALID_UID) -+ device->pnp.unique_id = kstrdup(info->unique_id.string, -+ GFP_KERNEL); -+#endif - if (info->valid & ACPI_VALID_ADR) { - device->pnp.bus_address = info->address; - device->flags.bus_address = 1; ---- head-2011-03-11.orig/drivers/pci/Kconfig 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/pci/Kconfig 2011-01-31 14:31:27.000000000 +0100 -@@ -31,6 +31,20 @@ config PCI_DEBUG - - When in doubt, say N. - -+config PCI_GUESTDEV -+ bool "PCI Device Reservation for Passthrough" -+ depends on PCI && ACPI && XEN -+ default y -+ help -+ Say Y here if you want to reserve PCI device for passthrough. -+ -+config PCI_IOMULTI -+ tristate "PCI Device IO Multiplex for Passthrough" -+ depends on PCI && ACPI && XEN -+ default y -+ help -+ Say Y here if you need io multiplexing. -+ - config PCI_STUB - tristate "PCI Stub driver" - depends on PCI ---- head-2011-03-11.orig/drivers/pci/Makefile 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/pci/Makefile 2011-01-31 14:31:28.000000000 +0100 -@@ -7,6 +7,10 @@ obj-y += access.o bus.o probe.o remove. - irq.o vpd.o - obj-$(CONFIG_PROC_FS) += proc.o - obj-$(CONFIG_SYSFS) += slot.o -+obj-$(CONFIG_PCI_GUESTDEV) += guestdev.o -+obj-$(CONFIG_PCI_IOMULTI) += pci-iomul.o -+iomul-$(CONFIG_PCI_IOMULTI) := iomulti.o -+obj-y += $(iomul-y) $(iomul-m) - - obj-$(CONFIG_PCI_QUIRKS) += quirks.o - ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/pci/guestdev.c 2011-01-31 14:31:28.000000000 +0100 -@@ -0,0 +1,880 @@ -+/* -+ * Copyright (c) 2008, 2009 NEC Corporation. -+ * Copyright (c) 2009 Isaku Yamahata -+ * VA Linux Systems Japan K.K. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple -+ * Place - Suite 330, Boston, MA 02111-1307 USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define HID_LEN 8 -+#define UID_LEN 8 -+#define DEV_LEN 2 -+#define FUNC_LEN 1 -+#define DEV_NUM_MAX 31 -+#define FUNC_NUM_MAX 7 -+#define INVALID_SEG (-1) -+#define INVALID_BBN (-1) -+#define GUESTDEV_STR_MAX 128 -+ -+#define GUESTDEV_FLAG_TYPE_MASK 0x3 -+#define GUESTDEV_FLAG_DEVICEPATH 0x1 -+#define GUESTDEV_FLAG_SBDF 0x2 -+ -+#define GUESTDEV_OPT_IOMUL 0x1 -+ -+struct guestdev { -+ int flags; -+ int options; -+ struct list_head root_list; -+ union { -+ struct devicepath { -+ char hid[HID_LEN + 1]; -+ char uid[UID_LEN + 1]; -+ int seg; -+ int bbn; -+ struct devicepath_node *child; -+ } devicepath; -+ struct sbdf { -+ int seg; -+ int bus; -+ int dev; -+ int func; -+ } sbdf; -+ } u; -+}; -+ -+struct devicepath_node { -+ int dev; -+ int func; -+ struct devicepath_node *child; -+}; -+ -+struct pcidev_sbdf { -+ int seg; -+ int bus; -+ struct pcidev_sbdf_node *child; -+}; -+ -+struct pcidev_sbdf_node { -+ int dev; -+ int func; -+ struct pcidev_sbdf_node *child; -+}; -+ -+static char __initdata guestdev_param[COMMAND_LINE_SIZE]; -+static LIST_HEAD(guestdev_list); -+ -+/* Get hid and uid */ -+static int __init pci_get_hid_uid(char *str, char *hid, char *uid) -+{ -+ char *sp, *ep; -+ int len; -+ -+ sp = str; -+ ep = strchr(sp, ':'); -+ if (!ep) { -+ ep = strchr(sp, '-'); -+ if (!ep) -+ goto format_err_end; -+ } -+ /* hid length */ -+ len = ep - sp; -+ if (len <= 0 || HID_LEN < len) -+ goto format_err_end; -+ -+ strlcpy(hid, sp, len); -+ -+ if (*ep == '-') { /* no uid */ -+ uid[0] = '\0'; -+ return TRUE; -+ } -+ -+ sp = ep + 1; -+ ep = strchr(sp, '-'); -+ if (!ep) -+ ep = strchr(sp, '\0'); -+ -+ /* uid length */ -+ len = ep - sp; -+ if (len <= 0 || UID_LEN < len) -+ goto format_err_end; -+ -+ strlcpy(uid, sp, len); -+ return TRUE; -+ -+format_err_end: -+ return FALSE; -+} -+ -+/* Get device and function */ -+static int __init pci_get_dev_func(char *str, int *dev, int *func) -+{ -+ if (sscanf(str, "%02x.%01x", dev, func) != 2) -+ goto format_err_end; -+ -+ if (*dev < 0 || DEV_NUM_MAX < *dev) -+ goto format_err_end; -+ -+ if (*func < 0 || FUNC_NUM_MAX < *func) -+ goto format_err_end; -+ -+ return TRUE; -+ -+format_err_end: -+ return FALSE; -+} -+ -+/* Check extended guestdev parameter format error */ -+static int __init pci_check_extended_guestdev_format(char *str) -+{ -+ int flg; -+ char *p; -+ -+ /* Check extended format */ -+ if (strpbrk(str, "(|)") == NULL) -+ return TRUE; -+ -+ flg = 0; -+ p = str; -+ while (*p) { -+ switch (*p) { -+ case '(': -+ /* Check nesting error */ -+ if (flg != 0) -+ goto format_err_end; -+ flg = 1; -+ /* Check position of '(' is head or -+ previos charactor of '(' is not '-'. */ -+ if (p == str || *(p - 1) != '-') -+ goto format_err_end; -+ break; -+ case ')': -+ /* Check nesting error */ -+ if (flg != 1) -+ goto format_err_end; -+ flg = 0; -+ /* Check next charactor of ')' is not '\0' */ -+ if (*(p + 1) != '\0') -+ goto format_err_end; -+ break; -+ case '|': -+ /* Check position of '|' is outside of '(' and ')' */ -+ if (flg != 1) -+ goto format_err_end; -+ break; -+ default: -+ break; -+ } -+ p++; -+ } -+ /* Check number of '(' and ')' are not equal */ -+ if (flg != 0) -+ goto format_err_end; -+ return TRUE; -+ -+format_err_end: -+ pr_err("PCI: The format of the guestdev parameter is illegal. [%s]\n", -+ str); -+ return FALSE; -+} -+ -+/* Make guestdev strings */ -+static void pci_make_guestdev_str(struct guestdev *gdev, -+ char *gdev_str, int buf_size) -+{ -+ struct devicepath_node *node; -+ int count; -+ -+ switch (gdev->flags & GUESTDEV_FLAG_TYPE_MASK) { -+ case GUESTDEV_FLAG_DEVICEPATH: -+ memset(gdev_str, 0, buf_size); -+ -+ if (strlen(gdev->u.devicepath.uid)) -+ count = snprintf(gdev_str, buf_size, "%s:%s", -+ gdev->u.devicepath.hid, -+ gdev->u.devicepath.uid); -+ else -+ count = snprintf(gdev_str, buf_size, "%s", -+ gdev->u.devicepath.hid); -+ if (count < 0) -+ return; -+ -+ node = gdev->u.devicepath.child; -+ while (node) { -+ gdev_str += count; -+ buf_size -= count; -+ if (buf_size <= 0) -+ return; -+ count = snprintf(gdev_str, buf_size, "-%02x.%01x", -+ node->dev, node->func); -+ if (count < 0) -+ return; -+ node = node->child; -+ } -+ break; -+ case GUESTDEV_FLAG_SBDF: -+ snprintf(gdev_str, buf_size, "%04x:%02x:%02x.%01x", -+ gdev->u.sbdf.seg, gdev->u.sbdf.bus, -+ gdev->u.sbdf.dev, gdev->u.sbdf.func); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+/* Free guestdev and nodes */ -+static void __init pci_free_guestdev(struct guestdev *gdev) -+{ -+ struct devicepath_node *node, *next; -+ -+ if (!gdev) -+ return; -+ if (gdev->flags & GUESTDEV_FLAG_DEVICEPATH) { -+ node = gdev->u.devicepath.child; -+ while (node) { -+ next = node->child; -+ kfree(node); -+ node = next; -+ } -+ } -+ list_del(&gdev->root_list); -+ kfree(gdev); -+} -+ -+/* Copy guestdev and nodes */ -+struct guestdev __init *pci_copy_guestdev(struct guestdev *gdev_src) -+{ -+ struct guestdev *gdev; -+ struct devicepath_node *node, *node_src, *node_upper; -+ -+ BUG_ON(!(gdev_src->flags & GUESTDEV_FLAG_DEVICEPATH)); -+ -+ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL); -+ if (!gdev) -+ goto allocate_err_end; -+ -+ INIT_LIST_HEAD(&gdev->root_list); -+ gdev->flags = gdev_src->flags; -+ gdev->options = gdev_src->options; -+ strcpy(gdev->u.devicepath.hid, gdev_src->u.devicepath.hid); -+ strcpy(gdev->u.devicepath.uid, gdev_src->u.devicepath.uid); -+ gdev->u.devicepath.seg = gdev_src->u.devicepath.seg; -+ gdev->u.devicepath.bbn = gdev_src->u.devicepath.bbn; -+ -+ node_upper = NULL; -+ -+ node_src = gdev_src->u.devicepath.child; -+ while (node_src) { -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (!node) -+ goto allocate_err_end; -+ node->dev = node_src->dev; -+ node->func = node_src->func; -+ if (!node_upper) -+ gdev->u.devicepath.child = node; -+ else -+ node_upper->child = node; -+ node_upper = node; -+ node_src = node_src->child; -+ } -+ -+ return gdev; -+ -+allocate_err_end: -+ if (gdev) -+ pci_free_guestdev(gdev); -+ pr_err("PCI: failed to allocate memory\n"); -+ return NULL; -+} -+ -+/* Make guestdev from path strings */ -+static int __init pci_make_devicepath_guestdev(char *path_str, int options) -+{ -+ char hid[HID_LEN + 1], uid[UID_LEN + 1]; -+ char *sp, *ep; -+ struct guestdev *gdev, *gdev_org; -+ struct devicepath_node *node, *node_tmp; -+ int dev, func, ret_val; -+ -+ ret_val = 0; -+ gdev = gdev_org = NULL; -+ sp = path_str; -+ /* Look for end of hid:uid'-' */ -+ ep = strchr(sp, '-'); -+ /* Only hid, uid. (No dev, func) */ -+ if (!ep) -+ goto format_err_end; -+ -+ memset(hid, 0 ,sizeof(hid)); -+ memset(uid, 0, sizeof(uid)); -+ if (!pci_get_hid_uid(sp, hid, uid)) -+ goto format_err_end; -+ -+ gdev_org = kzalloc(sizeof(*gdev_org), GFP_KERNEL); -+ if (!gdev_org) -+ goto allocate_err_end; -+ INIT_LIST_HEAD(&gdev_org->root_list); -+ gdev_org->flags = GUESTDEV_FLAG_DEVICEPATH; -+ gdev_org->options = options; -+ strcpy(gdev_org->u.devicepath.hid, hid); -+ strcpy(gdev_org->u.devicepath.uid, uid); -+ gdev_org->u.devicepath.seg = INVALID_SEG; -+ gdev_org->u.devicepath.bbn = INVALID_BBN; -+ -+ gdev = gdev_org; -+ -+ sp = ep + 1; -+ ep = sp; -+ do { -+ if (*sp == '(') { -+ sp++; -+ if (strchr(sp, '|')) { -+ gdev = pci_copy_guestdev(gdev_org); -+ if (!gdev) { -+ ret_val = -ENOMEM; -+ goto end; -+ } -+ } -+ continue; -+ } -+ if (gdev && pci_get_dev_func(sp, &dev, &func)) { -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (!node) -+ goto allocate_err_end; -+ node->dev = dev; -+ node->func = func; -+ /* add node to end of guestdev */ -+ if (gdev->u.devicepath.child) { -+ node_tmp = gdev->u.devicepath.child; -+ while (node_tmp->child) { -+ node_tmp = node_tmp->child; -+ } -+ node_tmp->child = node; -+ } else -+ gdev->u.devicepath.child = node; -+ } else if (gdev) { -+ pr_err("PCI: Can't obtain dev# and #func# from %s.\n", -+ sp); -+ ret_val = -EINVAL; -+ if (gdev == gdev_org) -+ goto end; -+ pci_free_guestdev(gdev); -+ gdev = NULL; -+ } -+ -+ ep = strpbrk(sp, "-|)"); -+ if (!ep) -+ ep = strchr(sp, '\0'); -+ /* Is *ep '|' OR ')' OR '\0' ? */ -+ if (*ep != '-') { -+ if (gdev) -+ list_add_tail(&gdev->root_list, &guestdev_list); -+ if (*ep == '|') { -+ /* Between '|' and '|' ? */ -+ if (strchr(ep + 1, '|')) { -+ gdev = pci_copy_guestdev(gdev_org); -+ if (!gdev) { -+ ret_val = -ENOMEM; -+ goto end; -+ } -+ } else { -+ gdev = gdev_org; -+ gdev_org = NULL; -+ } -+ } else { -+ gdev_org = NULL; -+ gdev = NULL; -+ } -+ } -+ if (*ep == ')') -+ ep++; -+ sp = ep + 1; -+ } while (*ep != '\0'); -+ -+ goto end; -+ -+format_err_end: -+ pr_err("PCI: The format of the guestdev parameter is illegal. [%s]\n", -+ path_str); -+ ret_val = -EINVAL; -+ goto end; -+ -+allocate_err_end: -+ pr_err("PCI: failed to allocate memory\n"); -+ ret_val = -ENOMEM; -+ goto end; -+ -+end: -+ if (gdev_org && (gdev_org != gdev)) -+ pci_free_guestdev(gdev_org); -+ if (gdev) -+ pci_free_guestdev(gdev); -+ return ret_val; -+} -+ -+static int __init pci_make_sbdf_guestdev(char* str, int options) -+{ -+ struct guestdev *gdev; -+ int seg, bus, dev, func; -+ -+ if (sscanf(str, "%x:%x:%x.%x", &seg, &bus, &dev, &func) != 4) { -+ seg = 0; -+ if (sscanf(str, "%x:%x.%x", &bus, &dev, &func) != 3) -+ return -EINVAL; -+ } -+ gdev = kmalloc(sizeof(*gdev), GFP_KERNEL); -+ if (!gdev) { -+ pr_err("PCI: failed to allocate memory\n"); -+ return -ENOMEM; -+ } -+ INIT_LIST_HEAD(&gdev->root_list); -+ gdev->flags = GUESTDEV_FLAG_SBDF; -+ gdev->options = options; -+ gdev->u.sbdf.seg = seg; -+ gdev->u.sbdf.bus = bus; -+ gdev->u.sbdf.dev = dev; -+ gdev->u.sbdf.func = func; -+ list_add_tail(&gdev->root_list, &guestdev_list); -+ return 0; -+} -+ -+static int __init pci_parse_options(const char *str) -+{ -+ int options = 0; -+ char *ep; -+ -+ while (str) { -+ str++; -+ ep = strchr(str, '+'); -+ if (ep) -+ ep = '\0'; /* Chop */ -+ -+ if (!strcmp(str, "iomul")) -+ options |= GUESTDEV_OPT_IOMUL; -+ -+ str = ep; -+ } -+ return options; -+} -+ -+/* Parse guestdev parameter */ -+static int __init pci_parse_guestdev(void) -+{ -+ int len; -+ char *sp, *ep, *op; -+ int options; -+ struct list_head *head; -+ struct guestdev *gdev; -+ char path_str[GUESTDEV_STR_MAX]; -+ int ret_val = 0; -+ -+ len = strlen(guestdev_param); -+ if (len == 0) -+ return 0; -+ -+ sp = guestdev_param; -+ -+ do { -+ ep = strchr(sp, ','); -+ /* Chop */ -+ if (ep) -+ *ep = '\0'; -+ options = 0; -+ op = strchr(sp, '+'); -+ if (op && (!ep || op < ep)) { -+ options = pci_parse_options(op); -+ *op = '\0'; /* Chop */ -+ } -+ ret_val = pci_make_sbdf_guestdev(sp, options); -+ if (ret_val == -EINVAL) { -+ if (pci_check_extended_guestdev_format(sp)) { -+ ret_val = pci_make_devicepath_guestdev( -+ sp, options); -+ if (ret_val && ret_val != -EINVAL) -+ break; -+ } -+ } else if (ret_val) -+ break; -+ -+ if (ep) -+ ep++; -+ sp = ep; -+ } while (ep); -+ -+ list_for_each(head, &guestdev_list) { -+ gdev = list_entry(head, struct guestdev, root_list); -+ pci_make_guestdev_str(gdev, path_str, GUESTDEV_STR_MAX); -+ pr_debug("PCI: %s has been reserved for guest domain.\n", -+ path_str); -+ } -+ return 0; -+} -+ -+arch_initcall(pci_parse_guestdev); -+ -+/* Get command line */ -+static int __init pci_guestdev_setup(char *str) -+{ -+ if (strlen(str) >= COMMAND_LINE_SIZE) -+ return 0; -+ strlcpy(guestdev_param, str, sizeof(guestdev_param)); -+ return 1; -+} -+ -+__setup("guestdev=", pci_guestdev_setup); -+ -+/* Free sbdf and nodes */ -+static void pci_free_sbdf(struct pcidev_sbdf *sbdf) -+{ -+ struct pcidev_sbdf_node *node, *next; -+ -+ node = sbdf->child; -+ while (node) { -+ next = node->child; -+ kfree(node); -+ node = next; -+ } -+ /* Skip kfree(sbdf) */ -+} -+ -+/* Does PCI device belong to sub tree specified by guestdev with device path? */ -+typedef int (*pci_node_match_t)(const struct devicepath_node *gdev_node, -+ const struct pcidev_sbdf_node *sbdf_node, -+ int options); -+ -+static int pci_node_match(const struct devicepath_node *gdev_node, -+ const struct pcidev_sbdf_node *sbdf_node, -+ int options_unused) -+{ -+ return (gdev_node->dev == sbdf_node->dev && -+ gdev_node->func == sbdf_node->func); -+} -+ -+static int pci_is_in_devicepath_sub_tree(struct guestdev *gdev, -+ struct pcidev_sbdf *sbdf, -+ pci_node_match_t match) -+{ -+ int seg, bbn; -+ struct devicepath_node *gdev_node; -+ struct pcidev_sbdf_node *sbdf_node; -+ -+ if (!gdev || !sbdf) -+ return FALSE; -+ -+ BUG_ON(!(gdev->flags & GUESTDEV_FLAG_DEVICEPATH)); -+ -+ /* Compare seg and bbn */ -+ if (gdev->u.devicepath.seg == INVALID_SEG || -+ gdev->u.devicepath.bbn == INVALID_BBN) { -+ if (acpi_pci_get_root_seg_bbn(gdev->u.devicepath.hid, -+ gdev->u.devicepath.uid, &seg, &bbn)) { -+ gdev->u.devicepath.seg = seg; -+ gdev->u.devicepath.bbn = bbn; -+ } else -+ return FALSE; -+ } -+ -+ if (gdev->u.devicepath.seg != sbdf->seg || -+ gdev->u.devicepath.bbn != sbdf->bus) -+ return FALSE; -+ -+ gdev_node = gdev->u.devicepath.child; -+ sbdf_node = sbdf->child; -+ -+ /* Compare dev and func */ -+ while (gdev_node) { -+ if (!sbdf_node) -+ return FALSE; -+ if (!match(gdev_node, sbdf_node, gdev->options)) -+ return FALSE; -+ gdev_node = gdev_node->child; -+ sbdf_node = sbdf_node->child; -+ } -+ return TRUE; -+} -+ -+/* Get sbdf from device */ -+static int pci_get_sbdf_from_pcidev( -+ struct pci_dev *dev, struct pcidev_sbdf *sbdf) -+{ -+ struct pcidev_sbdf_node *node; -+ -+ if (!dev) -+ return FALSE; -+ -+ for(;;) { -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (!node) { -+ pr_err("PCI: failed to allocate memory\n"); -+ goto err_end; -+ } -+ node->dev = PCI_SLOT(dev->devfn); -+ node->func = PCI_FUNC(dev->devfn); -+ -+ if (!sbdf->child) -+ sbdf->child = node; -+ else { -+ node->child = sbdf->child; -+ sbdf->child = node; -+ } -+ if (!dev->bus) -+ goto err_end; -+ if (!dev->bus->self) -+ break; -+ dev = dev->bus->self; -+ } -+ if (sscanf(dev_name(&dev->dev), "%04x:%02x", &sbdf->seg, &sbdf->bus) != 2) -+ goto err_end; -+ return TRUE; -+ -+err_end: -+ pci_free_sbdf(sbdf); -+ return FALSE; -+} -+ -+/* Does PCI device belong to sub tree specified by guestdev with sbdf? */ -+typedef int (*pci_sbdf_match_t)(const struct guestdev *gdev, -+ const struct pci_dev *dev); -+ -+static int pci_sbdf_match(const struct guestdev *gdev, -+ const struct pci_dev *dev) -+{ -+ int seg, bus; -+ -+ if (sscanf(dev_name(&dev->dev), "%04x:%02x", &seg, &bus) != 2) -+ return FALSE; -+ -+ return gdev->u.sbdf.seg == seg && -+ gdev->u.sbdf.bus == bus && -+ gdev->u.sbdf.dev == PCI_SLOT(dev->devfn) && -+ gdev->u.sbdf.func == PCI_FUNC(dev->devfn); -+} -+ -+static int pci_is_in_sbdf_sub_tree(struct guestdev *gdev, struct pci_dev *dev, -+ pci_sbdf_match_t match) -+{ -+ BUG_ON(!(gdev->flags & GUESTDEV_FLAG_SBDF)); -+ for (;;) { -+ if (match(gdev, dev)) -+ return TRUE; -+ if (!dev->bus || !dev->bus->self) -+ break; -+ dev = dev->bus->self; -+ } -+ return FALSE; -+} -+ -+/* Does PCI device belong to sub tree specified by guestdev parameter? */ -+static int __pci_is_guestdev(struct pci_dev *dev, pci_node_match_t node_match, -+ pci_sbdf_match_t sbdf_match) -+{ -+ struct guestdev *gdev; -+ struct pcidev_sbdf pcidev_sbdf, *sbdf = NULL; -+ struct list_head *head; -+ int result = FALSE; -+ -+ if (!dev) -+ return FALSE; -+ -+ list_for_each(head, &guestdev_list) { -+ gdev = list_entry(head, struct guestdev, root_list); -+ switch (gdev->flags & GUESTDEV_FLAG_TYPE_MASK) { -+ case GUESTDEV_FLAG_DEVICEPATH: -+ if (sbdf == NULL) { -+ sbdf = &pcidev_sbdf; -+ memset(sbdf, 0 ,sizeof(*sbdf)); -+ if (!pci_get_sbdf_from_pcidev(dev, sbdf)) -+ goto out; -+ } -+ if (pci_is_in_devicepath_sub_tree(gdev, sbdf, -+ node_match)) { -+ result = TRUE; -+ goto out; -+ } -+ break; -+ case GUESTDEV_FLAG_SBDF: -+ if (pci_is_in_sbdf_sub_tree(gdev, dev, sbdf_match)) { -+ result = TRUE; -+ goto out; -+ } -+ break; -+ default: -+ BUG(); -+ } -+ } -+out: -+ if (sbdf) -+ pci_free_sbdf(sbdf); -+ return result; -+} -+ -+int pci_is_guestdev(struct pci_dev *dev) -+{ -+ return __pci_is_guestdev(dev, pci_node_match, pci_sbdf_match); -+} -+EXPORT_SYMBOL_GPL(pci_is_guestdev); -+ -+static int reassign_resources; -+ -+static int __init pci_set_reassign_resources(char *str) -+{ -+ if (str && !strcmp(str, "all")) -+ reassign_resources = -1; -+ else -+ reassign_resources = 1; -+ -+ return 1; -+} -+__setup("reassign_resources", pci_set_reassign_resources); -+ -+int pci_is_guestdev_to_reassign(struct pci_dev *dev) -+{ -+ if (reassign_resources < 0) -+ return TRUE; -+ if (reassign_resources) -+ return pci_is_guestdev(dev); -+ return FALSE; -+} -+ -+#if defined(CONFIG_PCI_IOMULTI) || defined(CONFIG_PCI_IOMULTI_MODULE) -+static int pci_iomul_node_match(const struct devicepath_node *gdev_node, -+ const struct pcidev_sbdf_node *sbdf_node, -+ int options) -+{ -+ return (options & GUESTDEV_OPT_IOMUL) && -+ ((gdev_node->child != NULL && -+ sbdf_node->child != NULL && -+ gdev_node->dev == sbdf_node->dev && -+ gdev_node->func == sbdf_node->func) || -+ (gdev_node->child == NULL && -+ sbdf_node->child == NULL && -+ gdev_node->dev == sbdf_node->dev)); -+} -+ -+static int pci_iomul_sbdf_match(const struct guestdev *gdev, -+ const struct pci_dev *dev) -+{ -+ int seg, bus; -+ -+ if (sscanf(dev_name(&dev->dev), "%04x:%02x", &seg, &bus) != 2) -+ return FALSE; -+ -+ return (gdev->options & GUESTDEV_OPT_IOMUL) && -+ gdev->u.sbdf.seg == seg && -+ gdev->u.sbdf.bus == bus && -+ gdev->u.sbdf.dev == PCI_SLOT(dev->devfn); -+} -+ -+int pci_is_iomuldev(struct pci_dev *dev) -+{ -+ return __pci_is_guestdev(dev, -+ pci_iomul_node_match, pci_iomul_sbdf_match); -+} -+#endif /* CONFIG_PCI_IOMULTI */ -+ -+/* Check whether the devicepath exists under the pci root bus */ -+static int __init pci_check_devicepath_exists( -+ struct guestdev *gdev, struct pci_bus *bus) -+{ -+ struct devicepath_node *node; -+ struct pci_dev *dev; -+ -+ BUG_ON(!(gdev->flags & GUESTDEV_FLAG_DEVICEPATH)); -+ -+ node = gdev->u.devicepath.child; -+ while (node) { -+ if (!bus) -+ return FALSE; -+ dev = pci_get_slot(bus, PCI_DEVFN(node->dev, node->func)); -+ if (!dev) -+ return FALSE; -+ bus = dev->subordinate; -+ node = node->child; -+ pci_dev_put(dev); -+ } -+ return TRUE; -+} -+ -+/* Check whether the guestdev exists in the PCI device tree */ -+static int __init pci_check_guestdev_exists(void) -+{ -+ struct list_head *head; -+ struct guestdev *gdev; -+ int seg, bbn; -+ struct pci_bus *bus; -+ struct pci_dev *dev; -+ char path_str[GUESTDEV_STR_MAX]; -+ -+ list_for_each(head, &guestdev_list) { -+ gdev = list_entry(head, struct guestdev, root_list); -+ switch (gdev->flags & GUESTDEV_FLAG_TYPE_MASK) { -+ case GUESTDEV_FLAG_DEVICEPATH: -+ if (gdev->u.devicepath.seg == INVALID_SEG || -+ gdev->u.devicepath.bbn == INVALID_BBN) { -+ if (acpi_pci_get_root_seg_bbn( -+ gdev->u.devicepath.hid, -+ gdev->u.devicepath.uid, &seg, &bbn)) { -+ gdev->u.devicepath.seg = seg; -+ gdev->u.devicepath.bbn = bbn; -+ } else { -+ pci_make_guestdev_str(gdev, -+ path_str, GUESTDEV_STR_MAX); -+ pr_info("PCI: " -+ "device %s does not exist\n", -+ path_str); -+ continue; -+ } -+ } -+ -+ bus = pci_find_bus(gdev->u.devicepath.seg, -+ gdev->u.devicepath.bbn); -+ if (!bus || !pci_check_devicepath_exists(gdev, bus)) { -+ pci_make_guestdev_str(gdev, path_str, -+ GUESTDEV_STR_MAX); -+ pr_info("PCI: device %s does not exist\n", -+ path_str); -+ } -+ break; -+ case GUESTDEV_FLAG_SBDF: -+ bus = pci_find_bus(gdev->u.sbdf.seg, gdev->u.sbdf.bus); -+ if (bus) { -+ dev = pci_get_slot(bus, -+ PCI_DEVFN(gdev->u.sbdf.dev, -+ gdev->u.sbdf.func)); -+ if (dev) { -+ pci_dev_put(dev); -+ continue; -+ } -+ } -+ pci_make_guestdev_str(gdev, path_str, GUESTDEV_STR_MAX); -+ pr_info("PCI: device %s does not exist\n", path_str); -+ break; -+ default: -+ BUG(); -+ } -+ } -+ return 0; -+} -+ -+fs_initcall(pci_check_guestdev_exists); -+ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/pci/iomulti.c 2011-01-31 14:31:28.000000000 +0100 -@@ -0,0 +1,898 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+ * Copyright (c) 2009 Isaku Yamahata -+ * VA Linux Systems Japan K.K. -+ */ -+ -+#include "iomulti.h" -+#include "pci.h" -+#include -+#include -+#include -+ -+#define PCI_BUS_MAX 255 -+#define PCI_DEV_MAX 31 -+ -+/* see pci_resource_len */ -+static inline resource_size_t pci_iomul_len(const struct resource* r) -+{ -+ if (r->start == 0 && r->start == r->end) -+ return 0; -+ return r->end - r->start + 1; -+} -+ -+#define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1)) -+/* stolen from pbus_size_io() */ -+static unsigned long pdev_size_io(struct pci_dev *pdev) -+{ -+ unsigned long size = 0, size1 = 0; -+ int i; -+ -+ for (i = 0; i < PCI_NUM_RESOURCES; i++) { -+ struct resource *r = &pdev->resource[i]; -+ unsigned long r_size; -+ -+ if (!(r->flags & IORESOURCE_IO)) -+ continue; -+ -+ r_size = r->end - r->start + 1; -+ -+ if (r_size < 0x400) -+ /* Might be re-aligned for ISA */ -+ size += r_size; -+ else -+ size1 += r_size; -+ } -+ -+/* To be fixed in 2.5: we should have sort of HAVE_ISA -+ flag in the struct pci_bus. */ -+#if defined(CONFIG_ISA) || defined(CONFIG_EISA) -+ size = (size & 0xff) + ((size & ~0xffUL) << 2); -+#endif -+ size = ROUND_UP(size + size1, 4096); -+ return size; -+} -+ -+/* -+ * primary bus number of PCI-PCI bridge in switch on which -+ * this slots sits. -+ * i.e. the primary bus number of PCI-PCI bridge of downstream port -+ * or root port in switch. -+ * the secondary bus number of PCI-PCI bridge of upstream port -+ * in switch. -+ */ -+static inline unsigned char pci_dev_switch_busnr(struct pci_dev *pdev) -+{ -+ if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) -+ return pdev->bus->primary; -+ return pdev->bus->number; -+} -+ -+static LIST_HEAD(switch_list); -+static DEFINE_MUTEX(switch_list_lock); -+ -+/*****************************************************************************/ -+int pci_iomul_switch_io_allocated(const struct pci_iomul_switch *sw) -+{ -+ return !(sw->io_base == 0 || sw->io_base > sw->io_limit); -+} -+EXPORT_SYMBOL_GPL(pci_iomul_switch_io_allocated); -+ -+static struct pci_iomul_switch *pci_iomul_find_switch_locked(int segment, -+ uint8_t bus) -+{ -+ struct pci_iomul_switch *sw; -+ -+ BUG_ON(!mutex_is_locked(&switch_list_lock)); -+ list_for_each_entry(sw, &switch_list, list) { -+ if (sw->segment == segment && sw->bus == bus) -+ return sw; -+ } -+ return NULL; -+} -+ -+static struct pci_iomul_slot *pci_iomul_find_slot_locked( -+ struct pci_iomul_switch *sw, uint8_t busnr, uint8_t dev) -+{ -+ struct pci_iomul_slot *slot; -+ -+ BUG_ON(!mutex_is_locked(&sw->lock)); -+ list_for_each_entry(slot, &sw->slots, sibling) { -+ if (slot->bus == busnr && slot->dev == dev) -+ return slot; -+ } -+ return NULL; -+} -+ -+static void pci_iomul_switch_get(struct pci_iomul_switch *sw); -+/* on successfull exit, sw->lock is locked for use slot and -+ * refrence count of sw is incremented. -+ */ -+void pci_iomul_get_lock_switch(struct pci_dev *pdev, -+ struct pci_iomul_switch **swp, -+ struct pci_iomul_slot **slot) -+{ -+ mutex_lock(&switch_list_lock); -+ -+ *swp = pci_iomul_find_switch_locked(pci_domain_nr(pdev->bus), -+ pci_dev_switch_busnr(pdev)); -+ if (*swp == NULL) { -+ *slot = NULL; -+ goto out; -+ } -+ -+ mutex_lock(&(*swp)->lock); -+ *slot = pci_iomul_find_slot_locked(*swp, pdev->bus->number, -+ PCI_SLOT(pdev->devfn)); -+ if (*slot == NULL) { -+ mutex_unlock(&(*swp)->lock); -+ *swp = NULL; -+ } else { -+ pci_iomul_switch_get(*swp); -+ } -+out: -+ mutex_unlock(&switch_list_lock); -+} -+EXPORT_SYMBOL_GPL(pci_iomul_get_lock_switch); -+ -+static struct pci_iomul_switch *pci_iomul_switch_alloc(int segment, -+ uint8_t bus) -+{ -+ struct pci_iomul_switch *sw; -+ -+ BUG_ON(!mutex_is_locked(&switch_list_lock)); -+ -+ sw = kmalloc(sizeof(*sw), GFP_KERNEL); -+ -+ mutex_init(&sw->lock); -+ kref_init(&sw->kref); -+ sw->io_region = NULL; -+ sw->count = 0; -+ sw->current_pdev = NULL; -+ sw->segment = segment; -+ sw->bus = bus; -+ sw->io_base = 0; -+ sw->io_limit = 0; -+ sw->func = NULL; -+ INIT_LIST_HEAD(&sw->slots); -+ -+ return sw; -+} -+ -+static void pci_iomul_switch_add_locked(struct pci_iomul_switch *sw) -+{ -+ BUG_ON(!mutex_is_locked(&switch_list_lock)); -+ list_add(&sw->list, &switch_list); -+} -+ -+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) -+static void pci_iomul_switch_del_locked(struct pci_iomul_switch *sw) -+{ -+ BUG_ON(!mutex_is_locked(&switch_list_lock)); -+ list_del(&sw->list); -+} -+#endif -+ -+static int __devinit pci_iomul_slot_init(struct pci_dev *pdev, -+ struct pci_iomul_slot *slot) -+{ -+ u16 rpcap; -+ u16 cap; -+ -+ rpcap = pci_find_capability(pdev, PCI_CAP_ID_EXP); -+ if (!rpcap) { -+ /* pci device isn't supported */ -+ pr_info("PCI: sharing io port of non PCIe device %s " -+ "isn't supported. ignoring.\n", -+ pci_name(pdev)); -+ return -ENOSYS; -+ } -+ -+ pci_read_config_word(pdev, rpcap + PCI_CAP_FLAGS, &cap); -+ switch ((cap & PCI_EXP_FLAGS_TYPE) >> 4) { -+ case PCI_EXP_TYPE_RC_END: -+ pr_info("PCI: io port sharing of root complex integrated " -+ "endpoint %s isn't supported. ignoring.\n", -+ pci_name(pdev)); -+ return -ENOSYS; -+ case PCI_EXP_TYPE_ENDPOINT: -+ case PCI_EXP_TYPE_LEG_END: -+ break; -+ default: -+ pr_info("PCI: io port sharing of non endpoint %s " -+ "doesn't make sense. ignoring.\n", -+ pci_name(pdev)); -+ return -EINVAL; -+ } -+ -+ kref_init(&slot->kref); -+ slot->switch_busnr = pci_dev_switch_busnr(pdev); -+ slot->segment = pci_domain_nr(pdev->bus); -+ slot->bus = pdev->bus->number; -+ slot->dev = PCI_SLOT(pdev->devfn); -+ -+ return 0; -+} -+ -+static struct pci_iomul_slot *__devinit -+pci_iomul_slot_alloc(struct pci_dev *pdev) -+{ -+ struct pci_iomul_slot *slot; -+ -+ slot = kzalloc(sizeof(*slot), GFP_KERNEL); -+ if (slot == NULL) -+ return NULL; -+ -+ if (pci_iomul_slot_init(pdev, slot) != 0) { -+ kfree(slot); -+ return NULL; -+ } -+ return slot; -+} -+ -+static void pci_iomul_slot_add_locked(struct pci_iomul_switch *sw, -+ struct pci_iomul_slot *slot) -+{ -+ BUG_ON(!mutex_is_locked(&sw->lock)); -+ list_add(&slot->sibling, &sw->slots); -+} -+ -+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) -+static void pci_iomul_slot_del_locked(struct pci_iomul_switch *sw, -+ struct pci_iomul_slot *slot) -+{ -+ BUG_ON(!mutex_is_locked(&sw->lock)); -+ list_del(&slot->sibling); -+} -+#endif -+ -+/*****************************************************************************/ -+static int pci_get_sbd(const char *str, -+ int *segment__, uint8_t *bus__, uint8_t *dev__) -+{ -+ int segment; -+ int bus; -+ int dev; -+ -+ if (sscanf(str, "%x:%x:%x", &segment, &bus, &dev) != 3) { -+ if (sscanf(str, "%x:%x", &bus, &dev) == 2) -+ segment = 0; -+ else -+ return -EINVAL; -+ } -+ -+ if (segment < 0 || INT_MAX <= segment) -+ return -EINVAL; -+ if (bus < 0 || PCI_BUS_MAX < bus) -+ return -EINVAL; -+ if (dev < 0 || PCI_DEV_MAX < dev) -+ return -EINVAL; -+ -+ *segment__ = segment; -+ *bus__ = bus; -+ *dev__ = dev; -+ return 0; -+} -+ -+static char iomul_param[COMMAND_LINE_SIZE]; -+#define TOKEN_MAX 10 /* SSSS:BB:DD length is 10 */ -+static int pci_is_iomul_dev_param(struct pci_dev *pdev) -+{ -+ int len; -+ char *p; -+ char *next_str; -+ -+ if (!strcmp(iomul_param, "all")) -+ return 1; -+ for (p = &iomul_param[0]; *p != '\0'; p = next_str + 1) { -+ next_str = strchr(p, ','); -+ if (next_str != NULL) -+ len = next_str - p; -+ else -+ len = strlen(p); -+ -+ if (len > 0 && len <= TOKEN_MAX) { -+ char tmp[TOKEN_MAX+1]; -+ int seg; -+ uint8_t bus; -+ uint8_t dev; -+ -+ strlcpy(tmp, p, len); -+ if (pci_get_sbd(tmp, &seg, &bus, &dev) == 0 && -+ pci_domain_nr(pdev->bus) == seg && -+ pdev->bus->number == bus && -+ PCI_SLOT(pdev->devfn) == dev) -+ return 1; -+ } -+ if (next_str == NULL) -+ break; -+ } -+ -+ /* check guestdev=+iomul option */ -+ return pci_is_iomuldev(pdev); -+} -+ -+/* -+ * Format: [:]:[,[:]:[,...] -+ */ -+static int __init pci_iomul_param_setup(char *str) -+{ -+ if (strlen(str) >= COMMAND_LINE_SIZE) -+ return 0; -+ -+ /* parse it after pci bus scanning */ -+ strlcpy(iomul_param, str, sizeof(iomul_param)); -+ return 1; -+} -+__setup("guestiomuldev=", pci_iomul_param_setup); -+ -+/*****************************************************************************/ -+static void __devinit pci_iomul_set_bridge_io_window(struct pci_dev *bridge, -+ uint32_t io_base, -+ uint32_t io_limit) -+{ -+ uint16_t l; -+ uint32_t upper16; -+ -+ io_base >>= 12; -+ io_base <<= 4; -+ io_limit >>= 12; -+ io_limit <<= 4; -+ l = (io_base & 0xff) | ((io_limit & 0xff) << 8); -+ upper16 = ((io_base & 0xffff00) >> 8) | -+ (((io_limit & 0xffff00) >> 8) << 16); -+ -+ /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ -+ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); -+ /* Update lower 16 bits of I/O base/limit. */ -+ pci_write_config_word(bridge, PCI_IO_BASE, l); -+ /* Update upper 16 bits of I/O base/limit. */ -+ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, upper16); -+} -+ -+static void __devinit pci_disable_bridge_io_window(struct pci_dev *bridge) -+{ -+ /* set base = 0xffffff limit = 0x0 */ -+ pci_iomul_set_bridge_io_window(bridge, 0xffffff, 0); -+} -+ -+static int __devinit pci_iomul_func_scan(struct pci_dev *pdev, -+ struct pci_iomul_slot *slot, -+ uint8_t func) -+{ -+ struct pci_iomul_func *f; -+ unsigned int i; -+ -+ f = kzalloc(sizeof(*f), GFP_KERNEL); -+ if (f == NULL) -+ return -ENOMEM; -+ -+ f->segment = slot->segment; -+ f->bus = slot->bus; -+ f->devfn = PCI_DEVFN(slot->dev, func); -+ f->io_size = pdev_size_io(pdev); -+ -+ for (i = 0; i < PCI_NUM_BARS; i++) { -+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) -+ continue; -+ if (pci_resource_len(pdev, i) == 0) -+ continue; -+ -+ f->io_bar |= 1 << i; -+ f->resource[i] = pdev->resource[i]; -+ } -+ -+ if (f->io_bar) -+ slot->func[func] = f; -+ else -+ kfree(f); -+ return 0; -+} -+ -+/* -+ * This is tricky part. -+ * fake PCI resource assignment routines by setting flags to 0. -+ * PCI resource allocate routines think the resource should -+ * be allocated by checking flags. 0 means this resource isn't used. -+ * See pbus_size_io() and pdev_sort_resources(). -+ * -+ * After allocated resources, flags (IORESOURCE_IO) is exported -+ * to other part including user process. -+ * So we have to set flags to IORESOURCE_IO, but at the same time -+ * we must prevent those resources from reassigning when pci hot plug. -+ * To achieve that, set r->parent to dummy resource. -+ */ -+static void __devinit pci_iomul_disable_resource(struct resource *r) -+{ -+ /* don't allocate this resource */ -+ r->flags = 0; -+} -+ -+static void __devinit pci_iomul_reenable_resource( -+ struct resource *dummy_parent, struct resource *r) -+{ -+ int ret; -+ -+ dummy_parent->start = r->start; -+ dummy_parent->end = r->end; -+ dummy_parent->flags = r->flags; -+ dummy_parent->name = "PCI IOMUL dummy resource"; -+ -+ ret = request_resource(dummy_parent, r); -+ BUG_ON(ret); -+} -+ -+static void __devinit pci_iomul_fixup_ioresource(struct pci_dev *pdev, -+ struct pci_iomul_func *func, -+ int reassign, int dealloc) -+{ -+ uint8_t i; -+ struct resource *r; -+ -+ pr_info("PCI: deallocating io resource[%s]. io size 0x%lx\n", -+ pci_name(pdev), func->io_size); -+ for (i = 0; i < PCI_NUM_BARS; i++) { -+ r = &pdev->resource[i]; -+ if (!(func->io_bar & (1 << i))) -+ continue; -+ -+ if (reassign) { -+ r->end -= r->start; -+ r->start = 0; -+ pci_update_resource(pdev, i); -+ func->resource[i] = *r; -+ } -+ -+ if (dealloc) -+ /* don't allocate this resource */ -+ pci_iomul_disable_resource(r); -+ } -+ -+ /* parent PCI-PCI bridge */ -+ if (!reassign) -+ return; -+ pdev = pdev->bus->self; -+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) -+ return; -+ pci_disable_bridge_io_window(pdev); -+ for (i = 0; i < PCI_NUM_RESOURCES; i++) { -+ r = &pdev->resource[i]; -+ if (!(r->flags & IORESOURCE_IO)) -+ continue; -+ -+ r->end -= r->start; -+ r->start = 0; -+ if (i < PCI_BRIDGE_RESOURCES) -+ pci_update_resource(pdev, i); -+ } -+} -+ -+static void __devinit __quirk_iomul_dealloc_ioresource( -+ struct pci_iomul_switch *sw, -+ struct pci_dev *pdev, struct pci_iomul_slot *slot) -+{ -+ struct pci_iomul_func *f; -+ struct pci_iomul_func *__f; -+ -+ if (pci_iomul_func_scan(pdev, slot, PCI_FUNC(pdev->devfn)) != 0) -+ return; -+ -+ f = slot->func[PCI_FUNC(pdev->devfn)]; -+ if (f == NULL) -+ return; -+ -+ __f = sw->func; -+ /* sw->io_base == 0 means that we are called at boot time. -+ * != 0 means that we are called by php after boot. */ -+ if (sw->io_base == 0 && -+ (__f == NULL || __f->io_size < f->io_size)) { -+ if (__f != NULL) { -+ struct pci_bus *__pbus; -+ struct pci_dev *__pdev; -+ -+ __pbus = pci_find_bus(__f->segment, __f->bus); -+ BUG_ON(__pbus == NULL); -+ __pdev = pci_get_slot(__pbus, __f->devfn); -+ BUG_ON(__pdev == NULL); -+ pci_iomul_fixup_ioresource(__pdev, __f, 0, 1); -+ pci_dev_put(__pdev); -+ } -+ -+ pci_iomul_fixup_ioresource(pdev, f, 1, 0); -+ sw->func = f; -+ } else { -+ pci_iomul_fixup_ioresource(pdev, f, 1, 1); -+ } -+} -+ -+static void __devinit quirk_iomul_dealloc_ioresource(struct pci_dev *pdev) -+{ -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_slot *slot; -+ -+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) -+ return; -+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) -+ return; /* PCI Host Bridge isn't a target device */ -+ if (!pci_is_iomul_dev_param(pdev)) -+ return; -+ -+ mutex_lock(&switch_list_lock); -+ sw = pci_iomul_find_switch_locked(pci_domain_nr(pdev->bus), -+ pci_dev_switch_busnr(pdev)); -+ if (sw == NULL) { -+ sw = pci_iomul_switch_alloc(pci_domain_nr(pdev->bus), -+ pci_dev_switch_busnr(pdev)); -+ if (sw == NULL) { -+ mutex_unlock(&switch_list_lock); -+ pr_warn("PCI: can't allocate memory" -+ "for sw of IO multiplexing %s", -+ pci_name(pdev)); -+ return; -+ } -+ pci_iomul_switch_add_locked(sw); -+ } -+ pci_iomul_switch_get(sw); -+ mutex_unlock(&switch_list_lock); -+ -+ mutex_lock(&sw->lock); -+ slot = pci_iomul_find_slot_locked(sw, pdev->bus->number, -+ PCI_SLOT(pdev->devfn)); -+ if (slot == NULL) { -+ slot = pci_iomul_slot_alloc(pdev); -+ if (slot == NULL) { -+ mutex_unlock(&sw->lock); -+ pci_iomul_switch_put(sw); -+ pr_warn("PCI: can't allocate memory " -+ "for IO multiplexing %s", pci_name(pdev)); -+ return; -+ } -+ pci_iomul_slot_add_locked(sw, slot); -+ } -+ -+ pr_info("PCI: disable device and release io resource[%s].\n", -+ pci_name(pdev)); -+ pci_disable_device(pdev); -+ -+ __quirk_iomul_dealloc_ioresource(sw, pdev, slot); -+ -+ mutex_unlock(&sw->lock); -+ pci_iomul_switch_put(sw); -+} -+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, -+ quirk_iomul_dealloc_ioresource); -+ -+static void __devinit pci_iomul_read_bridge_io(struct pci_iomul_switch *sw) -+{ -+ struct pci_iomul_func *f = sw->func; -+ -+ struct pci_bus *pbus; -+ struct pci_dev *pdev; -+ struct pci_dev *bridge; -+ -+ uint16_t l; -+ uint16_t base_upper16; -+ uint16_t limit_upper16; -+ uint32_t io_base; -+ uint32_t io_limit; -+ -+ pbus = pci_find_bus(f->segment, f->bus); -+ BUG_ON(pbus == NULL); -+ -+ pdev = pci_get_slot(pbus, f->devfn); -+ BUG_ON(pdev == NULL); -+ -+ bridge = pdev->bus->self; -+ pci_read_config_word(bridge, PCI_IO_BASE, &l); -+ pci_read_config_word(bridge, PCI_IO_BASE_UPPER16, &base_upper16); -+ pci_read_config_word(bridge, PCI_IO_LIMIT_UPPER16, &limit_upper16); -+ -+ io_base = (l & 0xf0) | ((uint32_t)base_upper16 << 8); -+ io_base <<= 8; -+ io_limit = (l >> 8) | ((uint32_t)limit_upper16 << 8); -+ io_limit <<= 8; -+ io_limit |= 0xfff; -+ -+ sw->io_base = io_base; -+ sw->io_limit = io_limit; -+ -+ pci_dev_put(pdev); -+ pr_info("PCI: bridge %s base 0x%x limit 0x%x\n", -+ pci_name(bridge), sw->io_base, sw->io_limit); -+} -+ -+static void __devinit pci_iomul_setup_brige(struct pci_dev *bridge, -+ uint32_t io_base, -+ uint32_t io_limit) -+{ -+ uint16_t cmd; -+ -+ if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_HOST) -+ return; -+ -+ pci_iomul_set_bridge_io_window(bridge, io_base, io_limit); -+ -+ /* and forcibly enables IO */ -+ pci_read_config_word(bridge, PCI_COMMAND, &cmd); -+ if (!(cmd & PCI_COMMAND_IO)) { -+ cmd |= PCI_COMMAND_IO; -+ pr_info("PCI: forcibly enabling IO %s\n", pci_name(bridge)); -+ pci_write_config_word(bridge, PCI_COMMAND, cmd); -+ } -+} -+ -+struct __bar { -+ unsigned long size; -+ uint8_t bar; -+}; -+ -+/* decending order */ -+static int __devinit pci_iomul_bar_cmp(const void *lhs__, const void *rhs__) -+{ -+ const struct __bar *lhs = (struct __bar*)lhs__; -+ const struct __bar *rhs = (struct __bar*)rhs__; -+ return - (lhs->size - rhs->size); -+} -+ -+static void __devinit pci_iomul_setup_dev(struct pci_dev *pdev, -+ struct pci_iomul_func *f, -+ uint32_t io_base) -+{ -+ struct __bar bars[PCI_NUM_BARS]; -+ int i; -+ uint8_t num_bars = 0; -+ struct resource *r; -+ -+ pr_info("PCI: Forcibly assign IO %s from 0x%x\n", -+ pci_name(pdev), io_base); -+ -+ for (i = 0; i < PCI_NUM_BARS; i++) { -+ if (!(f->io_bar & (1 << i))) -+ continue; -+ -+ r = &f->resource[i]; -+ bars[num_bars].size = pci_iomul_len(r); -+ bars[num_bars].bar = i; -+ -+ num_bars++; -+ } -+ -+ sort(bars, num_bars, sizeof(bars[0]), &pci_iomul_bar_cmp, NULL); -+ -+ for (i = 0; i < num_bars; i++) { -+ struct resource *fr = &f->resource[bars[i].bar]; -+ r = &pdev->resource[bars[i].bar]; -+ -+ BUG_ON(r->start != 0); -+ r->start += io_base; -+ r->end += io_base; -+ -+ fr->start = r->start; -+ fr->end = r->end; -+ -+ /* pci_update_resource() check flags. */ -+ r->flags = fr->flags; -+ pci_update_resource(pdev, bars[i].bar); -+ pci_iomul_reenable_resource(&f->dummy_parent, r); -+ -+ io_base += bars[i].size; -+ } -+} -+ -+static void __devinit pci_iomul_release_io_resource( -+ struct pci_dev *pdev, struct pci_iomul_switch *sw, -+ struct pci_iomul_slot *slot, struct pci_iomul_func *f) -+{ -+ int i; -+ struct resource *r; -+ -+ for (i = 0; i < PCI_NUM_BARS; i++) { -+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO && -+ pdev->resource[i].parent != NULL) { -+ r = &pdev->resource[i]; -+ f->resource[i] = *r; -+ release_resource(r); -+ pci_iomul_reenable_resource(&f->dummy_parent, r); -+ } -+ } -+ -+ /* parent PCI-PCI bridge */ -+ pdev = pdev->bus->self; -+ if ((pdev->class >> 8) != PCI_CLASS_BRIDGE_HOST) { -+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { -+ struct resource *parent = pdev->resource[i].parent; -+ -+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO && -+ parent != NULL) { -+ r = &pdev->resource[i]; -+ -+ sw->io_resource.flags = r->flags; -+ sw->io_resource.start = sw->io_base; -+ sw->io_resource.end = sw->io_limit; -+ sw->io_resource.name = "PCI IO Multiplexer"; -+ -+ release_resource(r); -+ pci_iomul_reenable_resource( -+ &slot->dummy_parent[i - PCI_BRIDGE_RESOURCES], r); -+ -+ if (request_resource(parent, -+ &sw->io_resource)) -+ pr_err("PCI IOMul: can't allocate " -+ "resource. [0x%x, 0x%x]", -+ sw->io_base, sw->io_limit); -+ } -+ } -+ } -+} -+ -+static void __devinit quirk_iomul_reassign_ioresource(struct pci_dev *pdev) -+{ -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_slot *slot; -+ struct pci_iomul_func *sf; -+ struct pci_iomul_func *f; -+ -+ pci_iomul_get_lock_switch(pdev, &sw, &slot); -+ if (sw == NULL || slot == NULL) -+ return; -+ -+ if (sw->io_base == 0) -+ pci_iomul_read_bridge_io(sw); -+ if (!pci_iomul_switch_io_allocated(sw)) -+ goto out; -+ -+ sf = sw->func; -+ f = slot->func[PCI_FUNC(pdev->devfn)]; -+ if (f == NULL) -+ /* (sf == NULL || f == NULL) case -+ * can happen when all the specified devices -+ * don't have io space -+ */ -+ goto out; -+ -+ if (sf != NULL && -+ (pci_domain_nr(pdev->bus) != sf->segment || -+ pdev->bus->number != sf->bus || -+ PCI_SLOT(pdev->devfn) != PCI_SLOT(sf->devfn)) && -+ PCI_FUNC(pdev->devfn) == 0) { -+ pci_iomul_setup_brige(pdev->bus->self, -+ sw->io_base, sw->io_limit); -+ } -+ -+ BUG_ON(f->io_size > sw->io_limit - sw->io_base + 1); -+ if (/* f == sf */ sf != NULL && -+ pci_domain_nr(pdev->bus) == sf->segment && -+ pdev->bus->number == sf->bus && -+ pdev->devfn == sf->devfn) -+ pci_iomul_release_io_resource(pdev, sw, slot, f); -+ else -+ pci_iomul_setup_dev(pdev, f, sw->io_base); -+ -+out: -+ mutex_unlock(&sw->lock); -+ pci_iomul_switch_put(sw); -+} -+ -+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, -+ quirk_iomul_reassign_ioresource); -+ -+/*****************************************************************************/ -+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) -+static int __devinit __pci_iomul_notifier_del_device(struct pci_dev *pdev) -+{ -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_slot *slot; -+ int i; -+ -+ pci_iomul_get_lock_switch(pdev, &sw, &slot); -+ if (sw == NULL || slot == NULL) -+ return 0; -+ -+ if (sw->func == slot->func[PCI_FUNC(pdev->devfn)]) -+ sw->func = NULL; -+ kfree(slot->func[PCI_FUNC(pdev->devfn)]); -+ slot->func[PCI_FUNC(pdev->devfn)] = NULL; -+ for (i = 0; i < PCI_NUM_FUNC; i++) { -+ if (slot->func[i] != NULL) -+ goto out; -+ } -+ -+ pci_iomul_slot_del_locked(sw, slot); -+ pci_iomul_slot_put(slot); -+ -+out: -+ mutex_unlock(&sw->lock); -+ pci_iomul_switch_put(sw); -+ return 0; -+} -+ -+static int __devinit __pci_iomul_notifier_del_switch(struct pci_dev *pdev) -+{ -+ struct pci_iomul_switch *sw; -+ -+ mutex_lock(&switch_list_lock); -+ sw = pci_iomul_find_switch_locked(pci_domain_nr(pdev->bus), -+ pdev->bus->number); -+ if (sw == NULL) -+ goto out; -+ -+ pci_iomul_switch_del_locked(sw); -+ -+ mutex_lock(&sw->lock); -+ if (sw->io_resource.parent) -+ release_resource(&sw->io_resource); -+ sw->io_base = 0; /* to tell this switch is removed */ -+ sw->io_limit = 0; -+ BUG_ON(!list_empty(&sw->slots)); -+ mutex_unlock(&sw->lock); -+ -+out: -+ mutex_unlock(&switch_list_lock); -+ pci_iomul_switch_put(sw); -+ return 0; -+} -+ -+static int __devinit pci_iomul_notifier_del_device(struct pci_dev *pdev) -+{ -+ int ret; -+ switch (pdev->hdr_type) { -+ case PCI_HEADER_TYPE_NORMAL: -+ ret = __pci_iomul_notifier_del_device(pdev); -+ break; -+ case PCI_HEADER_TYPE_BRIDGE: -+ ret = __pci_iomul_notifier_del_switch(pdev); -+ break; -+ default: -+ pr_warn("PCI IOMUL: device %s has unknown " -+ "header type %02x, ignoring.\n", -+ pci_name(pdev), pdev->hdr_type); -+ ret = -EIO; -+ break; -+ } -+ return ret; -+} -+ -+static int __devinit pci_iomul_notifier(struct notifier_block *nb, -+ unsigned long action, void *data) -+{ -+ struct device *dev = data; -+ struct pci_dev *pdev = to_pci_dev(dev); -+ -+ switch (action) { -+ case BUS_NOTIFY_ADD_DEVICE: -+ quirk_iomul_reassign_ioresource(pdev); -+ break; -+ case BUS_NOTIFY_DEL_DEVICE: -+ return pci_iomul_notifier_del_device(pdev); -+ default: -+ /* nothing */ -+ break; -+ } -+ -+ return 0; -+} -+ -+static struct notifier_block __devinitdata pci_iomul_nb = { -+ .notifier_call = pci_iomul_notifier, -+}; -+ -+static int __init pci_iomul_hotplug_init(void) -+{ -+ bus_register_notifier(&pci_bus_type, &pci_iomul_nb); -+ return 0; -+} -+late_initcall(pci_iomul_hotplug_init); -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/pci/iomulti.h 2011-01-31 14:31:28.000000000 +0100 -@@ -0,0 +1,122 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+ * Copyright (c) 2009 Isaku Yamahata -+ * VA Linux Systems Japan K.K. -+ */ -+ -+#include -+#include -+#include -+ -+#define PCI_NUM_BARS 6 -+#define PCI_NUM_FUNC 8 -+ -+struct pci_iomul_func { -+ int segment; -+ uint8_t bus; -+ uint8_t devfn; -+ -+ /* only start and end are used */ -+ unsigned long io_size; -+ uint8_t io_bar; -+ struct resource resource[PCI_NUM_BARS]; -+ struct resource dummy_parent; -+}; -+ -+struct pci_iomul_switch { -+ struct list_head list; /* bus_list_lock protects */ -+ -+ /* -+ * This lock the following entry and following -+ * pci_iomul_slot/pci_iomul_func. -+ */ -+ struct mutex lock; -+ struct kref kref; -+ -+ struct resource io_resource; -+ struct resource *io_region; -+ unsigned int count; -+ struct pci_dev *current_pdev; -+ -+ int segment; -+ uint8_t bus; -+ -+ uint32_t io_base; -+ uint32_t io_limit; -+ -+ /* func which has the largeset io size*/ -+ struct pci_iomul_func *func; -+ -+ struct list_head slots; -+}; -+ -+static inline void pci_iomul_switch_get(struct pci_iomul_switch *sw) -+{ -+ kref_get(&sw->kref); -+} -+ -+static inline void pci_iomul_switch_release(struct kref *kref) -+{ -+ struct pci_iomul_switch *sw = container_of(kref, -+ struct pci_iomul_switch, -+ kref); -+ kfree(sw); -+} -+ -+static inline void pci_iomul_switch_put(struct pci_iomul_switch *sw) -+{ -+ kref_put(&sw->kref, &pci_iomul_switch_release); -+} -+ -+struct pci_iomul_slot { -+ struct list_head sibling; -+ struct kref kref; -+ /* -+ * busnr -+ * when pcie, the primary busnr of the PCI-PCI bridge on which -+ * this devices sits. -+ */ -+ uint8_t switch_busnr; -+ struct resource dummy_parent[PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES]; -+ -+ /* device */ -+ int segment; -+ uint8_t bus; -+ uint8_t dev; -+ -+ struct pci_iomul_func *func[PCI_NUM_FUNC]; -+}; -+ -+static inline void pci_iomul_slot_get(struct pci_iomul_slot *slot) -+{ -+ kref_get(&slot->kref); -+} -+ -+static inline void pci_iomul_slot_release(struct kref *kref) -+{ -+ struct pci_iomul_slot *slot = container_of(kref, struct pci_iomul_slot, -+ kref); -+ kfree(slot); -+} -+ -+static inline void pci_iomul_slot_put(struct pci_iomul_slot *slot) -+{ -+ kref_put(&slot->kref, &pci_iomul_slot_release); -+} -+ -+int pci_iomul_switch_io_allocated(const struct pci_iomul_switch *); -+void pci_iomul_get_lock_switch(struct pci_dev *, struct pci_iomul_switch **, -+ struct pci_iomul_slot **); ---- head-2011-03-11.orig/drivers/pci/pci.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/pci/pci.c 2011-01-31 14:31:28.000000000 +0100 -@@ -2984,6 +2984,13 @@ resource_size_t pci_specified_resource_a - */ - int pci_is_reassigndev(struct pci_dev *dev) - { -+#ifdef CONFIG_PCI_GUESTDEV -+ int result; -+ -+ result = pci_is_guestdev_to_reassign(dev); -+ if (result) -+ return result; -+#endif /* CONFIG_PCI_GUESTDEV */ - return (pci_specified_resource_alignment(dev) != 0); - } - ---- head-2011-03-11.orig/drivers/pci/pci.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/pci/pci.h 2011-01-31 14:31:28.000000000 +0100 -@@ -350,4 +350,11 @@ static inline int pci_dev_specific_reset - } - #endif - -+#ifdef CONFIG_PCI_GUESTDEV -+extern int pci_is_guestdev_to_reassign(struct pci_dev *dev); -+extern int pci_is_iomuldev(struct pci_dev *dev); -+#else -+#define pci_is_iomuldev(dev) 0 -+#endif -+ - #endif /* DRIVERS_PCI_H */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/pci/pci-iomul.c 2011-01-31 14:31:28.000000000 +0100 -@@ -0,0 +1,437 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+ * Copyright (c) 2009 Isaku Yamahata -+ * VA Linux Systems Japan K.K. -+ */ -+ -+#include "iomulti.h" -+#include -+#include -+#include -+#include -+#include -+ -+struct pci_iomul_data { -+ struct mutex lock; -+ -+ struct pci_dev *pdev; -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_slot *slot; /* slot::kref */ -+ struct pci_iomul_func **func; /* when dereferencing, -+ sw->lock is necessary */ -+}; -+ -+static int pci_iomul_func_ioport(struct pci_iomul_func *func, -+ uint8_t bar, uint64_t offset, int *port) -+{ -+ if (!(func->io_bar & (1 << bar))) -+ return -EINVAL; -+ -+ *port = func->resource[bar].start + offset; -+ if (*port < func->resource[bar].start || -+ *port > func->resource[bar].end) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static inline int pci_iomul_valid(struct pci_iomul_data *iomul) -+{ -+ BUG_ON(!mutex_is_locked(&iomul->lock)); -+ BUG_ON(!mutex_is_locked(&iomul->sw->lock)); -+ return pci_iomul_switch_io_allocated(iomul->sw) && -+ *iomul->func != NULL; -+} -+ -+static void __pci_iomul_enable_io(struct pci_dev *pdev) -+{ -+ uint16_t cmd; -+ -+ pci_dev_get(pdev); -+ pci_read_config_word(pdev, PCI_COMMAND, &cmd); -+ cmd |= PCI_COMMAND_IO; -+ pci_write_config_word(pdev, PCI_COMMAND, cmd); -+} -+ -+static void __pci_iomul_disable_io(struct pci_iomul_data *iomul, -+ struct pci_dev *pdev) -+{ -+ uint16_t cmd; -+ -+ if (!pci_iomul_valid(iomul)) -+ return; -+ -+ pci_read_config_word(pdev, PCI_COMMAND, &cmd); -+ cmd &= ~PCI_COMMAND_IO; -+ pci_write_config_word(pdev, PCI_COMMAND, cmd); -+ pci_dev_put(pdev); -+} -+ -+static int pci_iomul_open(struct inode *inode, struct file *filp) -+{ -+ struct pci_iomul_data *iomul; -+ iomul = kmalloc(sizeof(*iomul), GFP_KERNEL); -+ if (iomul == NULL) -+ return -ENOMEM; -+ -+ mutex_init(&iomul->lock); -+ iomul->pdev = NULL; -+ iomul->sw = NULL; -+ iomul->slot = NULL; -+ iomul->func = NULL; -+ filp->private_data = (void*)iomul; -+ -+ return nonseekable_open(inode, filp); -+} -+ -+static int pci_iomul_release(struct inode *inode, struct file *filp) -+{ -+ struct pci_iomul_data *iomul = -+ (struct pci_iomul_data*)filp->private_data; -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_slot *slot = NULL; -+ -+ mutex_lock(&iomul->lock); -+ sw = iomul->sw; -+ slot = iomul->slot; -+ if (iomul->pdev != NULL) { -+ if (sw != NULL) { -+ mutex_lock(&sw->lock); -+ if (sw->current_pdev == iomul->pdev) { -+ __pci_iomul_disable_io(iomul, -+ sw->current_pdev); -+ sw->current_pdev = NULL; -+ } -+ sw->count--; -+ if (sw->count == 0) { -+ release_region(sw->io_region->start, sw->io_region->end - sw->io_region->start + 1); -+ sw->io_region = NULL; -+ } -+ mutex_unlock(&sw->lock); -+ } -+ pci_dev_put(iomul->pdev); -+ } -+ mutex_unlock(&iomul->lock); -+ -+ if (slot != NULL) -+ pci_iomul_slot_put(slot); -+ if (sw != NULL) -+ pci_iomul_switch_put(sw); -+ kfree(iomul); -+ return 0; -+} -+ -+static long pci_iomul_setup(struct pci_iomul_data *iomul, -+ struct pci_iomul_setup __user *arg) -+{ -+ long error = 0; -+ struct pci_iomul_setup setup; -+ struct pci_iomul_switch *sw = NULL; -+ struct pci_iomul_slot *slot; -+ struct pci_bus *pbus; -+ struct pci_dev *pdev; -+ -+ if (copy_from_user(&setup, arg, sizeof(setup))) -+ return -EFAULT; -+ -+ pbus = pci_find_bus(setup.segment, setup.bus); -+ if (pbus == NULL) -+ return -ENODEV; -+ pdev = pci_get_slot(pbus, setup.dev); -+ if (pdev == NULL) -+ return -ENODEV; -+ -+ mutex_lock(&iomul->lock); -+ if (iomul->sw != NULL) { -+ error = -EBUSY; -+ goto out0; -+ } -+ -+ pci_iomul_get_lock_switch(pdev, &sw, &slot); -+ if (sw == NULL || slot == NULL) { -+ error = -ENODEV; -+ goto out0; -+ } -+ if (!pci_iomul_switch_io_allocated(sw)) { -+ error = -ENODEV; -+ goto out; -+ } -+ -+ if (slot->func[setup.func] == NULL) { -+ error = -ENODEV; -+ goto out; -+ } -+ -+ if (sw->count == 0) { -+ BUG_ON(sw->io_region != NULL); -+ sw->io_region = -+ request_region(sw->io_base, -+ sw->io_limit - sw->io_base + 1, -+ "PCI IO Multiplexer driver"); -+ if (sw->io_region == NULL) { -+ mutex_unlock(&sw->lock); -+ error = -EBUSY; -+ goto out; -+ } -+ } -+ sw->count++; -+ pci_iomul_slot_get(slot); -+ -+ iomul->pdev = pdev; -+ iomul->sw = sw; -+ iomul->slot = slot; -+ iomul->func = &slot->func[setup.func]; -+ -+out: -+ mutex_unlock(&sw->lock); -+out0: -+ mutex_unlock(&iomul->lock); -+ if (error != 0) { -+ if (sw != NULL) -+ pci_iomul_switch_put(sw); -+ pci_dev_put(pdev); -+ } -+ return error; -+} -+ -+static int pci_iomul_lock(struct pci_iomul_data *iomul, -+ struct pci_iomul_switch **sw, -+ struct pci_iomul_func **func) -+{ -+ mutex_lock(&iomul->lock); -+ *sw = iomul->sw; -+ if (*sw == NULL) { -+ mutex_unlock(&iomul->lock); -+ return -ENODEV; -+ } -+ mutex_lock(&(*sw)->lock); -+ if (!pci_iomul_valid(iomul)) { -+ mutex_unlock(&(*sw)->lock); -+ mutex_unlock(&iomul->lock); -+ return -ENODEV; -+ } -+ *func = *iomul->func; -+ -+ return 0; -+} -+ -+static long pci_iomul_disable_io(struct pci_iomul_data *iomul) -+{ -+ long error = 0; -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_func *dummy_func; -+ struct pci_dev *pdev; -+ -+ if (pci_iomul_lock(iomul, &sw, &dummy_func) < 0) -+ return -ENODEV; -+ -+ pdev = iomul->pdev; -+ if (pdev == NULL) -+ error = -ENODEV; -+ -+ if (pdev != NULL && sw->current_pdev == pdev) { -+ __pci_iomul_disable_io(iomul, pdev); -+ sw->current_pdev = NULL; -+ } -+ -+ mutex_unlock(&sw->lock); -+ mutex_unlock(&iomul->lock); -+ return error; -+} -+ -+static void pci_iomul_switch_to( -+ struct pci_iomul_data *iomul, struct pci_iomul_switch *sw, -+ struct pci_dev *next_pdev) -+{ -+ if (sw->current_pdev == next_pdev) -+ /* nothing to do */ -+ return; -+ -+ if (sw->current_pdev != NULL) -+ __pci_iomul_disable_io(iomul, sw->current_pdev); -+ -+ __pci_iomul_enable_io(next_pdev); -+ sw->current_pdev = next_pdev; -+} -+ -+static long pci_iomul_in(struct pci_iomul_data *iomul, -+ struct pci_iomul_in __user *arg) -+{ -+ struct pci_iomul_in in; -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_func *func; -+ -+ long error = 0; -+ int port; -+ uint32_t value = 0; -+ -+ if (copy_from_user(&in, arg, sizeof(in))) -+ return -EFAULT; -+ -+ if (pci_iomul_lock(iomul, &sw, &func) < 0) -+ return -ENODEV; -+ -+ error = pci_iomul_func_ioport(func, in.bar, in.offset, &port); -+ if (error) -+ goto out; -+ -+ pci_iomul_switch_to(iomul, sw, iomul->pdev); -+ switch (in.size) { -+ case 4: -+ value = inl(port); -+ break; -+ case 2: -+ value = inw(port); -+ break; -+ case 1: -+ value = inb(port); -+ break; -+ default: -+ error = -EINVAL; -+ break; -+ } -+ -+out: -+ mutex_unlock(&sw->lock); -+ mutex_unlock(&iomul->lock); -+ -+ if (error == 0 && put_user(value, &arg->value)) -+ return -EFAULT; -+ return error; -+} -+ -+static long pci_iomul_out(struct pci_iomul_data *iomul, -+ struct pci_iomul_out __user *arg) -+{ -+ struct pci_iomul_in out; -+ struct pci_iomul_switch *sw; -+ struct pci_iomul_func *func; -+ -+ long error = 0; -+ int port; -+ -+ if (copy_from_user(&out, arg, sizeof(out))) -+ return -EFAULT; -+ -+ if (pci_iomul_lock(iomul, &sw, &func) < 0) -+ return -ENODEV; -+ -+ error = pci_iomul_func_ioport(func, out.bar, out.offset, &port); -+ if (error) -+ goto out; -+ -+ pci_iomul_switch_to(iomul, sw, iomul->pdev); -+ switch (out.size) { -+ case 4: -+ outl(out.value, port); -+ break; -+ case 2: -+ outw(out.value, port); -+ break; -+ case 1: -+ outb(out.value, port); -+ break; -+ default: -+ error = -EINVAL; -+ break; -+ } -+ -+out: -+ mutex_unlock(&sw->lock); -+ mutex_unlock(&iomul->lock); -+ return error; -+} -+ -+static long pci_iomul_ioctl(struct file *filp, -+ unsigned int cmd, unsigned long arg) -+{ -+ long error; -+ struct pci_iomul_data *iomul = -+ (struct pci_iomul_data*)filp->private_data; -+ -+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) -+ return -EPERM; -+ -+ switch (cmd) { -+ case PCI_IOMUL_SETUP: -+ error = pci_iomul_setup(iomul, -+ (struct pci_iomul_setup __user *)arg); -+ break; -+ case PCI_IOMUL_DISABLE_IO: -+ error = pci_iomul_disable_io(iomul); -+ break; -+ case PCI_IOMUL_IN: -+ error = pci_iomul_in(iomul, (struct pci_iomul_in __user *)arg); -+ break; -+ case PCI_IOMUL_OUT: -+ error = pci_iomul_out(iomul, -+ (struct pci_iomul_out __user *)arg); -+ break; -+ default: -+ error = -ENOSYS; -+ break; -+ } -+ -+ return error; -+} -+ -+static const struct file_operations pci_iomul_fops = { -+ .owner = THIS_MODULE, -+ -+ .open = pci_iomul_open, -+ .release = pci_iomul_release, -+ -+ .unlocked_ioctl = pci_iomul_ioctl, -+}; -+ -+static struct miscdevice pci_iomul_miscdev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "pci_iomul", -+ .nodename = "xen/pci_iomul", -+ .fops = &pci_iomul_fops, -+}; -+ -+static int __init pci_iomul_init(void) -+{ -+ int error; -+ -+ error = misc_register(&pci_iomul_miscdev); -+ if (error != 0) { -+ pr_alert("Couldn't register /dev/xen/pci_iomul"); -+ return error; -+ } -+ pr_info("PCI IO multiplexer device installed\n"); -+ return 0; -+} -+ -+#ifdef MODULE -+static void __exit pci_iomul_cleanup(void) -+{ -+ misc_deregister(&pci_iomul_miscdev); -+} -+module_exit(pci_iomul_cleanup); -+#endif -+ -+/* -+ * This must be called after pci fixup final which is called by -+ * device_initcall(pci_init). -+ */ -+late_initcall(pci_iomul_init); -+ -+MODULE_ALIAS("devname:xen/pci_iomul"); -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Isaku Yamahata "); -+MODULE_DESCRIPTION("PCI IO space multiplexing driver"); ---- head-2011-03-11.orig/include/linux/acpi.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/acpi.h 2011-01-31 14:31:28.000000000 +0100 -@@ -248,6 +248,8 @@ int acpi_check_region(resource_size_t st - - int acpi_resources_are_enforced(void); - -+int acpi_pci_get_root_seg_bbn(char *hid, char *uid, int *seg, int *bbn); -+ - #ifdef CONFIG_PM_SLEEP - void __init acpi_no_s4_hw_signature(void); - void __init acpi_old_suspend_ordering(void); ---- head-2011-03-11.orig/include/linux/pci.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/pci.h 2011-01-31 14:31:28.000000000 +0100 -@@ -1538,5 +1538,11 @@ int pci_vpd_find_tag(const u8 *buf, unsi - int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, - unsigned int len, const char *kw); - -+#ifdef CONFIG_PCI_GUESTDEV -+int pci_is_guestdev(struct pci_dev *dev); -+#else -+#define pci_is_guestdev(dev) 0 -+#endif -+ - #endif /* __KERNEL__ */ - #endif /* LINUX_PCI_H */ ---- head-2011-03-11.orig/include/xen/Kbuild 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/xen/Kbuild 2011-01-31 14:31:28.000000000 +0100 -@@ -1,2 +1,3 @@ - header-y += evtchn.h - header-y += privcmd.h -+header-y += public/ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/include/xen/public/Kbuild 2011-01-31 14:31:28.000000000 +0100 -@@ -0,0 +1 @@ -+header-y += iomulti.h ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/include/xen/public/iomulti.h 2011-01-31 14:31:28.000000000 +0100 -@@ -0,0 +1,50 @@ -+#ifndef __LINUX_PUBLIC_IOMULTI_H__ -+#define __LINUX_PUBLIC_IOMULTI_H__ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+ * Copyright (c) 2009 Isaku Yamahata -+ * VA Linux Systems Japan K.K. -+ */ -+ -+struct pci_iomul_setup { -+ uint16_t segment; -+ uint8_t bus; -+ uint8_t dev; -+ uint8_t func; -+}; -+ -+struct pci_iomul_in { -+ uint8_t bar; -+ uint64_t offset; -+ -+ uint8_t size; -+ uint32_t value; -+}; -+ -+struct pci_iomul_out { -+ uint8_t bar; -+ uint64_t offset; -+ -+ uint8_t size; -+ uint32_t value; -+}; -+ -+#define PCI_IOMUL_SETUP _IOW ('P', 0, struct pci_iomul_setup) -+#define PCI_IOMUL_DISABLE_IO _IO ('P', 1) -+#define PCI_IOMUL_IN _IOWR('P', 2, struct pci_iomul_in) -+#define PCI_IOMUL_OUT _IOW ('P', 3, struct pci_iomul_out) -+ -+#endif /* __LINUX_PUBLIC_IOMULTI_H__ */ diff --git a/patches.xen/pci-reserve b/patches.xen/pci-reserve deleted file mode 100644 index a4cae76..0000000 --- a/patches.xen/pci-reserve +++ /dev/null @@ -1,236 +0,0 @@ -Subject: linux/pci: reserve io/memory space for bridge -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1010:10eae161c153) -Patch-mainline: n/a - -reserve io/memory space for bridge which will be used later -by PCI hotplug. - -Signed-off-by: Isaku Yamahata -Acked-by: jbeulich@novell.com - ---- head-2011-03-11.orig/Documentation/kernel-parameters.txt 2011-03-11 10:49:08.000000000 +0100 -+++ head-2011-03-11/Documentation/kernel-parameters.txt 2011-03-11 10:49:17.000000000 +0100 -@@ -2010,6 +2010,13 @@ bytes respectively. Such letter suffixes - off: Turn ECRC off - on: Turn ECRC on. - -+ pci_reserve= [PCI] -+ Format: [[+IO][+MEM]][,...] -+ Format of sbdf: [:]:. -+ Specifies the least reserved io size or memory size -+ which is assigned to PCI bridge even when no child -+ pci device exists. This is useful with PCI hotplug. -+ - pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power - Management. - off Disable ASPM. ---- head-2011-03-11.orig/drivers/pci/Kconfig 2011-01-31 14:31:27.000000000 +0100 -+++ head-2011-03-11/drivers/pci/Kconfig 2011-01-31 14:32:40.000000000 +0100 -@@ -45,6 +45,13 @@ config PCI_IOMULTI - help - Say Y here if you need io multiplexing. - -+config PCI_RESERVE -+ bool "PCI IO/MEMORY space reserve" -+ depends on PCI && XEN_PRIVILEGED_GUEST -+ default y -+ help -+ Say Y here if you need PCI IO/MEMORY space reserve -+ - config PCI_STUB - tristate "PCI Stub driver" - depends on PCI ---- head-2011-03-11.orig/drivers/pci/Makefile 2011-01-31 14:31:28.000000000 +0100 -+++ head-2011-03-11/drivers/pci/Makefile 2011-01-31 14:32:40.000000000 +0100 -@@ -11,6 +11,7 @@ obj-$(CONFIG_PCI_GUESTDEV) += guestdev.o - obj-$(CONFIG_PCI_IOMULTI) += pci-iomul.o - iomul-$(CONFIG_PCI_IOMULTI) := iomulti.o - obj-y += $(iomul-y) $(iomul-m) -+obj-$(CONFIG_PCI_RESERVE) += reserve.o - - obj-$(CONFIG_PCI_QUIRKS) += quirks.o - ---- head-2011-03-11.orig/drivers/pci/pci.h 2011-01-31 14:31:28.000000000 +0100 -+++ head-2011-03-11/drivers/pci/pci.h 2011-01-31 14:32:40.000000000 +0100 -@@ -357,4 +357,19 @@ extern int pci_is_iomuldev(struct pci_de - #define pci_is_iomuldev(dev) 0 - #endif - -+#ifdef CONFIG_PCI_RESERVE -+unsigned long pci_reserve_size_io(struct pci_bus *bus); -+unsigned long pci_reserve_size_mem(struct pci_bus *bus); -+#else -+static inline unsigned long pci_reserve_size_io(struct pci_bus *bus) -+{ -+ return 0; -+} -+ -+static inline unsigned long pci_reserve_size_mem(struct pci_bus *bus) -+{ -+ return 0; -+} -+#endif /* CONFIG_PCI_RESERVE */ -+ - #endif /* DRIVERS_PCI_H */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/pci/reserve.c 2011-01-31 14:32:40.000000000 +0100 -@@ -0,0 +1,137 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -+ * -+ * Copyright (c) 2009 Isaku Yamahata -+ * VA Linux Systems Japan K.K. -+ */ -+ -+#include -+#include -+ -+#include -+ -+static char pci_reserve_param[COMMAND_LINE_SIZE]; -+ -+/* pci_reserve= [PCI] -+ * Format: [[+IO][+MEM]][,...] -+ * Format of sbdf: [:]:. -+ */ -+static int pci_reserve_parse_size(const char *str, -+ unsigned long *io_size, -+ unsigned long *mem_size) -+{ -+ if (sscanf(str, "io%lx", io_size) == 1 || -+ sscanf(str, "IO%lx", io_size) == 1) -+ return 0; -+ -+ if (sscanf(str, "mem%lx", mem_size) == 1 || -+ sscanf(str, "MEM%lx", mem_size) == 1) -+ return 0; -+ -+ return -EINVAL; -+} -+ -+static int pci_reserve_parse_one(const char *str, -+ int *seg, int *bus, int *dev, int *func, -+ unsigned long *io_size, -+ unsigned long *mem_size) -+{ -+ char *p; -+ -+ *io_size = 0; -+ *mem_size = 0; -+ -+ if (sscanf(str, "%x:%x:%x.%x", seg, bus, dev, func) != 4) { -+ *seg = 0; -+ if (sscanf(str, "%x:%x.%x", bus, dev, func) != 3) { -+ return -EINVAL; -+ } -+ } -+ -+ p = strchr(str, '+'); -+ if (p == NULL) -+ return -EINVAL; -+ if (pci_reserve_parse_size(++p, io_size, mem_size)) -+ return -EINVAL; -+ -+ p = strchr(p, '+'); -+ return p ? pci_reserve_parse_size(p + 1, io_size, mem_size) : 0; -+} -+ -+static unsigned long pci_reserve_size(struct pci_bus *pbus, int flags) -+{ -+ char *sp; -+ char *ep; -+ -+ int seg; -+ int bus; -+ int dev; -+ int func; -+ -+ unsigned long io_size; -+ unsigned long mem_size; -+ -+ sp = pci_reserve_param; -+ -+ do { -+ ep = strchr(sp, ','); -+ if (ep) -+ *ep = '\0'; /* chomp */ -+ -+ if (pci_reserve_parse_one(sp, &seg, &bus, &dev, &func, -+ &io_size, &mem_size) == 0) { -+ if (pci_domain_nr(pbus) == seg && -+ pbus->number == bus && -+ PCI_SLOT(pbus->self->devfn) == dev && -+ PCI_FUNC(pbus->self->devfn) == func) { -+ switch (flags) { -+ case IORESOURCE_IO: -+ return io_size; -+ case IORESOURCE_MEM: -+ return mem_size; -+ default: -+ break; -+ } -+ } -+ } -+ -+ if (ep) { -+ *ep = ','; /* restore chomp'ed ',' for later */ -+ ep++; -+ } -+ sp = ep; -+ } while (ep); -+ -+ return 0; -+} -+ -+unsigned long pci_reserve_size_io(struct pci_bus *pbus) -+{ -+ return pci_reserve_size(pbus, IORESOURCE_IO); -+} -+ -+unsigned long pci_reserve_size_mem(struct pci_bus *pbus) -+{ -+ return pci_reserve_size(pbus, IORESOURCE_MEM); -+} -+ -+static int __init pci_reserve_setup(char *str) -+{ -+ if (strlen(str) >= sizeof(pci_reserve_param)) -+ return 0; -+ strlcpy(pci_reserve_param, str, sizeof(pci_reserve_param)); -+ return 1; -+} -+__setup("pci_reserve=", pci_reserve_setup); ---- head-2011-03-11.orig/drivers/pci/setup-bus.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/pci/setup-bus.c 2011-01-31 14:32:40.000000000 +0100 -@@ -448,6 +448,9 @@ static void pbus_size_io(struct pci_bus - size = ALIGN(size + size1, 4096); - if (size < old_size) - size = old_size; -+ size1 = pci_reserve_size_io(bus); -+ if (size < size1) -+ size = ALIGN(size1, 4096); - if (!size) { - if (b_res->start || b_res->end) - dev_info(&bus->self->dev, "disabling bridge window " -@@ -537,7 +540,8 @@ static int pbus_size_mem(struct pci_bus - min_align = align1 >> 1; - align += aligns[order]; - } -- size = ALIGN(size, min_align); -+ size = ALIGN(max(size, (resource_size_t)pci_reserve_size_mem(bus)), -+ min_align); - if (!size) { - if (b_res->start || b_res->end) - dev_info(&bus->self->dev, "disabling bridge window " diff --git a/patches.xen/sfc-driverlink b/patches.xen/sfc-driverlink deleted file mode 100644 index bc3cf71..0000000 --- a/patches.xen/sfc-driverlink +++ /dev/null @@ -1,1133 +0,0 @@ -From: David Riddoch -commit d96c061bfd1839e34e136de0555564520acc97af -Author: Steve Hodgson -Date: Mon Jul 14 15:38:47 2008 +0100 - -Subject: sfc: Driverlink API for exporting hardware features to client drivers -References: FATE#303479 -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - ---- head-2009-11-06.orig/drivers/net/sfc/Makefile 2009-11-06 10:29:51.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/Makefile 2009-07-28 10:04:25.000000000 +0200 -@@ -1,6 +1,7 @@ - sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \ - falcon_xmac.o selftest.o ethtool.o xfp_phy.o \ -- mdio_10g.o tenxpress.o boards.o sfe4001.o -+ mdio_10g.o tenxpress.o boards.o sfe4001.o \ -+ driverlink.o - sfc-$(CONFIG_SFC_MTD) += mtd.o - - obj-$(CONFIG_SFC) += sfc.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-11-06/drivers/net/sfc/driverlink.c 2009-07-28 10:04:25.000000000 +0200 -@@ -0,0 +1,367 @@ -+/**************************************************************************** -+ * Driver for Solarflare Solarstorm network controllers and boards -+ * Copyright 2005 Fen Systems Ltd. -+ * Copyright 2005-2008 Solarflare Communications Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ */ -+ -+#include -+#include -+#include -+#include -+#include "net_driver.h" -+#include "efx.h" -+#include "driverlink_api.h" -+#include "driverlink.h" -+ -+/* Protects @efx_driverlink_lock and @efx_driver_list */ -+static DEFINE_MUTEX(efx_driverlink_lock); -+ -+/* List of all registered drivers */ -+static LIST_HEAD(efx_driver_list); -+ -+/* List of all registered Efx ports */ -+static LIST_HEAD(efx_port_list); -+ -+/** -+ * Driver link handle used internally to track devices -+ * @efx_dev: driverlink device handle exported to consumers -+ * @efx: efx_nic backing the driverlink device -+ * @port_node: per-device list head -+ * @driver_node: per-driver list head -+ */ -+struct efx_dl_handle { -+ struct efx_dl_device efx_dev; -+ struct efx_nic *efx; -+ struct list_head port_node; -+ struct list_head driver_node; -+}; -+ -+static struct efx_dl_handle *efx_dl_handle(struct efx_dl_device *efx_dev) -+{ -+ return container_of(efx_dev, struct efx_dl_handle, efx_dev); -+} -+ -+/* Remove an Efx device, and call the driver's remove() callback if -+ * present. The caller must hold @efx_driverlink_lock. */ -+static void efx_dl_del_device(struct efx_dl_device *efx_dev) -+{ -+ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev); -+ -+ EFX_INFO(efx_handle->efx, "%s driverlink client unregistering\n", -+ efx_dev->driver->name); -+ -+ if (efx_dev->driver->remove) -+ efx_dev->driver->remove(efx_dev); -+ -+ list_del(&efx_handle->driver_node); -+ list_del(&efx_handle->port_node); -+ -+ kfree(efx_handle); -+} -+ -+/* Attempt to probe the given device with the driver, creating a -+ * new &struct efx_dl_device. If the probe routine returns an error, -+ * then the &struct efx_dl_device is destroyed */ -+static void efx_dl_try_add_device(struct efx_nic *efx, -+ struct efx_dl_driver *driver) -+{ -+ struct efx_dl_handle *efx_handle; -+ struct efx_dl_device *efx_dev; -+ int rc; -+ -+ efx_handle = kzalloc(sizeof(*efx_handle), GFP_KERNEL); -+ if (!efx_handle) -+ goto fail; -+ efx_dev = &efx_handle->efx_dev; -+ efx_handle->efx = efx; -+ efx_dev->driver = driver; -+ efx_dev->pci_dev = efx->pci_dev; -+ INIT_LIST_HEAD(&efx_handle->port_node); -+ INIT_LIST_HEAD(&efx_handle->driver_node); -+ -+ rc = driver->probe(efx_dev, efx->net_dev, -+ efx->dl_info, efx->silicon_rev); -+ if (rc) -+ goto fail; -+ -+ list_add_tail(&efx_handle->driver_node, &driver->device_list); -+ list_add_tail(&efx_handle->port_node, &efx->dl_device_list); -+ -+ EFX_INFO(efx, "%s driverlink client registered\n", driver->name); -+ return; -+ -+ fail: -+ EFX_INFO(efx, "%s driverlink client skipped\n", driver->name); -+ -+ kfree(efx_handle); -+} -+ -+/* Unregister a driver from the driverlink layer, calling the -+ * driver's remove() callback for every attached device */ -+void efx_dl_unregister_driver(struct efx_dl_driver *driver) -+{ -+ struct efx_dl_handle *efx_handle, *efx_handle_n; -+ -+ printk(KERN_INFO "Efx driverlink unregistering %s driver\n", -+ driver->name); -+ -+ mutex_lock(&efx_driverlink_lock); -+ -+ list_for_each_entry_safe(efx_handle, efx_handle_n, -+ &driver->device_list, driver_node) -+ efx_dl_del_device(&efx_handle->efx_dev); -+ -+ list_del(&driver->node); -+ -+ mutex_unlock(&efx_driverlink_lock); -+} -+EXPORT_SYMBOL(efx_dl_unregister_driver); -+ -+/* Register a new driver with the driverlink layer. The driver's -+ * probe routine will be called for every attached nic. */ -+int efx_dl_register_driver(struct efx_dl_driver *driver) -+{ -+ struct efx_nic *efx; -+ int rc; -+ -+ printk(KERN_INFO "Efx driverlink registering %s driver\n", -+ driver->name); -+ -+ INIT_LIST_HEAD(&driver->node); -+ INIT_LIST_HEAD(&driver->device_list); -+ -+ rc = mutex_lock_interruptible(&efx_driverlink_lock); -+ if (rc) -+ return rc; -+ -+ list_add_tail(&driver->node, &efx_driver_list); -+ list_for_each_entry(efx, &efx_port_list, dl_node) -+ efx_dl_try_add_device(efx, driver); -+ -+ mutex_unlock(&efx_driverlink_lock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(efx_dl_register_driver); -+ -+void efx_dl_unregister_nic(struct efx_nic *efx) -+{ -+ struct efx_dl_handle *efx_handle, *efx_handle_n; -+ -+ mutex_lock(&efx_driverlink_lock); -+ -+ list_for_each_entry_safe_reverse(efx_handle, efx_handle_n, -+ &efx->dl_device_list, -+ port_node) -+ efx_dl_del_device(&efx_handle->efx_dev); -+ -+ list_del(&efx->dl_node); -+ -+ mutex_unlock(&efx_driverlink_lock); -+} -+ -+int efx_dl_register_nic(struct efx_nic *efx) -+{ -+ struct efx_dl_driver *driver; -+ int rc; -+ -+ rc = mutex_lock_interruptible(&efx_driverlink_lock); -+ if (rc) -+ return rc; -+ -+ list_add_tail(&efx->dl_node, &efx_port_list); -+ list_for_each_entry(driver, &efx_driver_list, node) -+ efx_dl_try_add_device(efx, driver); -+ -+ mutex_unlock(&efx_driverlink_lock); -+ -+ return 0; -+} -+ -+/* Dummy callback implementations. -+ * To avoid a branch point on the fast-path, the callbacks are always -+ * implemented - they are never NULL. -+ */ -+static enum efx_veto efx_dummy_tx_packet_callback(struct efx_dl_device *efx_dev, -+ struct sk_buff *skb) -+{ -+ return EFX_ALLOW_PACKET; -+} -+ -+static enum efx_veto efx_dummy_rx_packet_callback(struct efx_dl_device *efx_dev, -+ const char *pkt_buf, int len) -+{ -+ return EFX_ALLOW_PACKET; -+} -+ -+static int efx_dummy_request_mtu_callback(struct efx_dl_device *efx_dev, -+ int new_mtu) -+{ -+ return 0; -+} -+ -+static void efx_dummy_mtu_changed_callback(struct efx_dl_device *efx_dev, -+ int mtu) -+{ -+ return; -+} -+ -+static void efx_dummy_event_callback(struct efx_dl_device *efx_dev, void *event) -+{ -+ return; -+} -+ -+struct efx_dl_callbacks efx_default_callbacks = { -+ .tx_packet = efx_dummy_tx_packet_callback, -+ .rx_packet = efx_dummy_rx_packet_callback, -+ .request_mtu = efx_dummy_request_mtu_callback, -+ .mtu_changed = efx_dummy_mtu_changed_callback, -+ .event = efx_dummy_event_callback, -+}; -+ -+void efx_dl_unregister_callbacks(struct efx_dl_device *efx_dev, -+ struct efx_dl_callbacks *callbacks) -+{ -+ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev); -+ struct efx_nic *efx = efx_handle->efx; -+ -+ efx_suspend(efx); -+ -+ EFX_INFO(efx, "removing callback hooks into %s driver\n", -+ efx_dev->driver->name); -+ -+ if (callbacks->tx_packet) { -+ BUG_ON(efx->dl_cb_dev.tx_packet != efx_dev); -+ efx->dl_cb.tx_packet = efx_default_callbacks.tx_packet; -+ efx->dl_cb_dev.tx_packet = NULL; -+ } -+ if (callbacks->rx_packet) { -+ BUG_ON(efx->dl_cb_dev.rx_packet != efx_dev); -+ efx->dl_cb.rx_packet = efx_default_callbacks.rx_packet; -+ efx->dl_cb_dev.rx_packet = NULL; -+ } -+ if (callbacks->request_mtu) { -+ BUG_ON(efx->dl_cb_dev.request_mtu != efx_dev); -+ efx->dl_cb.request_mtu = efx_default_callbacks.request_mtu; -+ efx->dl_cb_dev.request_mtu = NULL; -+ } -+ if (callbacks->mtu_changed) { -+ BUG_ON(efx->dl_cb_dev.mtu_changed != efx_dev); -+ efx->dl_cb.mtu_changed = efx_default_callbacks.mtu_changed; -+ efx->dl_cb_dev.mtu_changed = NULL; -+ } -+ if (callbacks->event) { -+ BUG_ON(efx->dl_cb_dev.event != efx_dev); -+ efx->dl_cb.event = efx_default_callbacks.event; -+ efx->dl_cb_dev.event = NULL; -+ } -+ -+ efx_resume(efx); -+} -+EXPORT_SYMBOL(efx_dl_unregister_callbacks); -+ -+int efx_dl_register_callbacks(struct efx_dl_device *efx_dev, -+ struct efx_dl_callbacks *callbacks) -+{ -+ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev); -+ struct efx_nic *efx = efx_handle->efx; -+ int rc = 0; -+ -+ efx_suspend(efx); -+ -+ /* Check that the requested callbacks are not already hooked. */ -+ if ((callbacks->tx_packet && efx->dl_cb_dev.tx_packet) || -+ (callbacks->rx_packet && efx->dl_cb_dev.rx_packet) || -+ (callbacks->request_mtu && efx->dl_cb_dev.request_mtu) || -+ (callbacks->mtu_changed && efx->dl_cb_dev.mtu_changed) || -+ (callbacks->event && efx->dl_cb_dev.event)) { -+ rc = -EBUSY; -+ goto out; -+ } -+ -+ EFX_INFO(efx, "adding callback hooks to %s driver\n", -+ efx_dev->driver->name); -+ -+ /* Hook in the requested callbacks, leaving any NULL members -+ * referencing the members of @efx_default_callbacks */ -+ if (callbacks->tx_packet) { -+ efx->dl_cb.tx_packet = callbacks->tx_packet; -+ efx->dl_cb_dev.tx_packet = efx_dev; -+ } -+ if (callbacks->rx_packet) { -+ efx->dl_cb.rx_packet = callbacks->rx_packet; -+ efx->dl_cb_dev.rx_packet = efx_dev; -+ } -+ if (callbacks->request_mtu) { -+ efx->dl_cb.request_mtu = callbacks->request_mtu; -+ efx->dl_cb_dev.request_mtu = efx_dev; -+ } -+ if (callbacks->mtu_changed) { -+ efx->dl_cb.mtu_changed = callbacks->mtu_changed; -+ efx->dl_cb_dev.mtu_changed = efx_dev; -+ } -+ if (callbacks->event) { -+ efx->dl_cb.event = callbacks->event; -+ efx->dl_cb_dev.event = efx_dev; -+ } -+ -+ out: -+ efx_resume(efx); -+ -+ return rc; -+} -+EXPORT_SYMBOL(efx_dl_register_callbacks); -+ -+void efx_dl_schedule_reset(struct efx_dl_device *efx_dev) -+{ -+ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev); -+ struct efx_nic *efx = efx_handle->efx; -+ -+ efx_schedule_reset(efx, RESET_TYPE_ALL); -+} -+EXPORT_SYMBOL(efx_dl_schedule_reset); -+ -+void efx_dl_reset_unlock(void) -+{ -+ mutex_unlock(&efx_driverlink_lock); -+} -+ -+/* Suspend ready for reset, serialising against all the driverlink interfacse -+ * and calling the suspend() callback of every registered driver */ -+void efx_dl_reset_suspend(struct efx_nic *efx) -+{ -+ struct efx_dl_handle *efx_handle; -+ struct efx_dl_device *efx_dev; -+ -+ mutex_lock(&efx_driverlink_lock); -+ -+ list_for_each_entry_reverse(efx_handle, -+ &efx->dl_device_list, -+ port_node) { -+ efx_dev = &efx_handle->efx_dev; -+ if (efx_dev->driver->reset_suspend) -+ efx_dev->driver->reset_suspend(efx_dev); -+ } -+} -+ -+/* Resume after a reset, calling the resume() callback of every registered -+ * driver, and releasing @Efx_driverlink_lock acquired in -+ * efx_dl_reset_resume() */ -+void efx_dl_reset_resume(struct efx_nic *efx, int ok) -+{ -+ struct efx_dl_handle *efx_handle; -+ struct efx_dl_device *efx_dev; -+ -+ list_for_each_entry(efx_handle, &efx->dl_device_list, -+ port_node) { -+ efx_dev = &efx_handle->efx_dev; -+ if (efx_dev->driver->reset_resume) -+ efx_dev->driver->reset_resume(efx_dev, ok); -+ } -+ -+ mutex_unlock(&efx_driverlink_lock); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-11-06/drivers/net/sfc/driverlink.h 2009-07-28 10:04:25.000000000 +0200 -@@ -0,0 +1,43 @@ -+/**************************************************************************** -+ * Driver for Solarflare Solarstorm network controllers and boards -+ * Copyright 2005 Fen Systems Ltd. -+ * Copyright 2006-2008 Solarflare Communications Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ */ -+ -+#ifndef EFX_DRIVERLINK_H -+#define EFX_DRIVERLINK_H -+ -+/* Forward declarations */ -+struct efx_dl_device; -+struct efx_nic; -+ -+/* Efx callback devices -+ * -+ * A list of the devices that own each callback. The partner to -+ * struct efx_dl_callbacks. -+ */ -+struct efx_dl_cb_devices { -+ struct efx_dl_device *tx_packet; -+ struct efx_dl_device *rx_packet; -+ struct efx_dl_device *request_mtu; -+ struct efx_dl_device *mtu_changed; -+ struct efx_dl_device *event; -+}; -+ -+extern struct efx_dl_callbacks efx_default_callbacks; -+ -+#define EFX_DL_CALLBACK(_port, _name, ...) \ -+ (_port)->dl_cb._name((_port)->dl_cb_dev._name, __VA_ARGS__) -+ -+extern int efx_dl_register_nic(struct efx_nic *efx); -+extern void efx_dl_unregister_nic(struct efx_nic *efx); -+ -+/* Suspend and resume client drivers over a hardware reset */ -+extern void efx_dl_reset_suspend(struct efx_nic *efx); -+extern void efx_dl_reset_resume(struct efx_nic *efx, int ok); -+ -+#endif /* EFX_DRIVERLINK_H */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-11-06/drivers/net/sfc/driverlink_api.h 2009-07-28 10:04:25.000000000 +0200 -@@ -0,0 +1,303 @@ -+/**************************************************************************** -+ * Driver for Solarflare Solarstorm network controllers and boards -+ * Copyright 2005-2006 Fen Systems Ltd. -+ * Copyright 2005-2008 Solarflare Communications Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ */ -+ -+#ifndef EFX_DRIVERLINK_API_H -+#define EFX_DRIVERLINK_API_H -+ -+#include -+ -+/* Forward declarations */ -+struct pci_dev; -+struct net_device; -+struct sk_buff; -+struct efx_dl_device; -+struct efx_dl_device_info; -+ -+/* An extra safeguard in addition to symbol versioning */ -+#define EFX_DRIVERLINK_API_VERSION 2 -+ -+/** -+ * struct efx_dl_driver - An Efx driverlink device driver -+ * -+ * A driverlink client defines and initializes as many instances of -+ * efx_dl_driver as required, registering each one with -+ * efx_dl_register_driver(). -+ * -+ * @name: Name of the driver -+ * @probe: Called when device added -+ * The client should use the @def_info linked list and @silicon_rev -+ * to determine if they wish to attach to this device. -+ * Context: process, driverlink semaphore held -+ * @remove: Called when device removed -+ * The client must ensure the finish all operations with this -+ * device before returning from this method. -+ * Context: process, driverlink semaphore held -+ * @reset_suspend: Called before device is reset -+ * Called immediately before a hardware reset. The client must stop all -+ * hardware processing before returning from this method. Callbacks will -+ * be inactive when this method is called. -+ * Context: process, driverlink semaphore held. rtnl_lock may be held -+ * @reset_resume: Called after device is reset -+ * Called after a hardware reset. If @ok is true, the client should -+ * state and resume normal operations. If @ok is false, the client should -+ * abandon use of the hardware resources. remove() will still be called. -+ * Context: process, driverlink semaphore held. rtnl_lock may be held -+ */ -+struct efx_dl_driver { -+ const char *name; -+ -+ int (*probe) (struct efx_dl_device *efx_dl_dev, -+ const struct net_device *net_dev, -+ const struct efx_dl_device_info *dev_info, -+ const char *silicon_rev); -+ void (*remove) (struct efx_dl_device *efx_dev); -+ void (*reset_suspend) (struct efx_dl_device *efx_dev); -+ void (*reset_resume) (struct efx_dl_device *efx_dev, int ok); -+ -+/* private: */ -+ struct list_head node; -+ struct list_head device_list; -+}; -+ -+/** -+ * enum efx_dl_device_info_type - Device information identifier. -+ * -+ * Used to identify each item in the &struct efx_dl_device_info linked list -+ * provided to each driverlink client in the probe() @dev_info member. -+ * -+ * @EFX_DL_FALCON_RESOURCES: Information type is &struct efx_dl_falcon_resources -+ */ -+enum efx_dl_device_info_type { -+ /** Falcon resources available for export */ -+ EFX_DL_FALCON_RESOURCES = 0, -+}; -+ -+/** -+ * struct efx_dl_device_info - device information structure -+ * -+ * @next: Link to next structure, if any -+ * @type: Type code for this structure -+ */ -+struct efx_dl_device_info { -+ struct efx_dl_device_info *next; -+ enum efx_dl_device_info_type type; -+}; -+ -+/** -+ * enum efx_dl_falcon_resource_flags - Falcon resource information flags. -+ * -+ * Flags that describe hardware variations for the current Falcon device. -+ * -+ * @EFX_DL_FALCON_DUAL_FUNC: Port is dual-function. -+ * Certain silicon revisions have two pci functions, and require -+ * certain hardware resources to be accessed via the secondary -+ * function -+ * @EFX_DL_FALCON_USE_MSI: Port is initialised to use MSI/MSI-X interrupts. -+ * Falcon supports traditional legacy interrupts and MSI/MSI-X -+ * interrupts. The choice is made at run time by the sfc driver, and -+ * notified to the clients by this enumeration -+ */ -+enum efx_dl_falcon_resource_flags { -+ EFX_DL_FALCON_DUAL_FUNC = 0x1, -+ EFX_DL_FALCON_USE_MSI = 0x2, -+}; -+ -+/** -+ * struct efx_dl_falcon_resources - Falcon resource information. -+ * -+ * This structure describes Falcon hardware resources available for -+ * use by a driverlink driver. -+ * -+ * @hdr: Resource linked list header -+ * @biu_lock: Register access lock. -+ * Some Falcon revisions require register access for configuration -+ * registers to be serialised between ports and PCI functions. -+ * The sfc driver will provide the appropriate lock semantics for -+ * the underlying hardware. -+ * @buffer_table_min: First available buffer table entry -+ * @buffer_table_lim: Last available buffer table entry + 1 -+ * @evq_timer_min: First available event queue with timer -+ * @evq_timer_lim: Last available event queue with timer + 1 -+ * @evq_int_min: First available event queue with interrupt -+ * @evq_int_lim: Last available event queue with interrupt + 1 -+ * @rxq_min: First available RX queue -+ * @rxq_lim: Last available RX queue + 1 -+ * @txq_min: First available TX queue -+ * @txq_lim: Last available TX queue + 1 -+ * @flags: Hardware variation flags -+ */ -+struct efx_dl_falcon_resources { -+ struct efx_dl_device_info hdr; -+ spinlock_t *biu_lock; -+ unsigned buffer_table_min; -+ unsigned buffer_table_lim; -+ unsigned evq_timer_min; -+ unsigned evq_timer_lim; -+ unsigned evq_int_min; -+ unsigned evq_int_lim; -+ unsigned rxq_min; -+ unsigned rxq_lim; -+ unsigned txq_min; -+ unsigned txq_lim; -+ enum efx_dl_falcon_resource_flags flags; -+}; -+ -+/** -+ * struct efx_dl_device - An Efx driverlink device. -+ * -+ * @pci_dev: PCI device used by the sfc driver. -+ * @priv: Driver private data -+ * Driverlink clients can use this to store a pointer to their -+ * internal per-device data structure. Each (driver, device) -+ * tuple has a separate &struct efx_dl_device, so clients can use -+ * this @priv field independently. -+ * @driver: Efx driverlink driver for this device -+ */ -+struct efx_dl_device { -+ struct pci_dev *pci_dev; -+ void *priv; -+ struct efx_dl_driver *driver; -+}; -+ -+/** -+ * enum efx_veto - Packet veto request flag. -+ * -+ * This is the return type for the rx_packet() and tx_packet() methods -+ * in &struct efx_dl_callbacks. -+ * -+ * @EFX_ALLOW_PACKET: Packet may be transmitted/received -+ * @EFX_VETO_PACKET: Packet must not be transmitted/received -+ */ -+enum efx_veto { -+ EFX_ALLOW_PACKET = 0, -+ EFX_VETO_PACKET = 1, -+}; -+ -+/** -+ * struct efx_dl_callbacks - Efx callbacks -+ * -+ * This is a tighly controlled set of simple callbacks, that are attached -+ * to the sfc driver via efx_dl_register_callbacks(). They export just enough -+ * state to allow clients to make use of the available hardware resources. -+ * -+ * For efficiency, only one client can hook each callback. Since these -+ * callbacks are called on packet transmit and reception paths, and the -+ * sfc driver may have multiple tx and rx queues per port, clients should -+ * avoid acquiring locks or allocating memory. -+ * -+ * @tx_packet: Called when packet is about to be transmitted -+ * Called for every packet about to be transmitted, providing means -+ * for the client to snoop traffic, and veto transmission by returning -+ * %EFX_VETO_PACKET (the sfc driver will subsequently free the skb). -+ * Context: tasklet, netif_tx_lock held -+ * @rx_packet: Called when packet is received -+ * Called for every received packet (after LRO), allowing the client -+ * to snoop every received packet (on every rx queue), and veto -+ * reception by returning %EFX_VETO_PACKET. -+ * Context: tasklet -+ * @request_mtu: Called to request MTU change. -+ * Called whenever the user requests the net_dev mtu to be changed. -+ * If the client returns an error, the mtu change is aborted. The sfc -+ * driver guarantees that no other callbacks are running. -+ * Context: process, rtnl_lock held. -+ * @mtu_changed: Called when MTU has been changed. -+ * Called after the mtu has been successfully changed, always after -+ * a previous call to request_mtu(). The sfc driver guarantees that no -+ * other callbacks are running. -+ * Context: process, rtnl_lock held. -+ * @event: Called when a hardware NIC event is not understood by the sfc driver. -+ * Context: tasklet. -+ */ -+struct efx_dl_callbacks { -+ enum efx_veto (*tx_packet) (struct efx_dl_device *efx_dev, -+ struct sk_buff *skb); -+ enum efx_veto (*rx_packet) (struct efx_dl_device *efx_dev, -+ const char *pkt_hdr, int pkt_len); -+ int (*request_mtu) (struct efx_dl_device *efx_dev, int new_mtu); -+ void (*mtu_changed) (struct efx_dl_device *efx_dev, int mtu); -+ void (*event) (struct efx_dl_device *efx_dev, void *p_event); -+}; -+ -+/* Include API version number in symbol used for efx_dl_register_driver */ -+#define efx_dl_stringify_1(x, y) x ## y -+#define efx_dl_stringify_2(x, y) efx_dl_stringify_1(x, y) -+#define efx_dl_register_driver \ -+ efx_dl_stringify_2(efx_dl_register_driver_api_ver_, \ -+ EFX_DRIVERLINK_API_VERSION) -+ -+/* Exported driverlink api used to register and unregister the client driver -+ * and any callbacks [only one per port allowed], and to allow a client driver -+ * to request reset to recover from an error condition. -+ * -+ * All of these functions acquire the driverlink semaphore, so must not be -+ * called from an efx_dl_driver or efx_dl_callbacks member, and must be called -+ * from process context. -+ */ -+extern int efx_dl_register_driver(struct efx_dl_driver *driver); -+ -+extern void efx_dl_unregister_driver(struct efx_dl_driver *driver); -+ -+extern int efx_dl_register_callbacks(struct efx_dl_device *efx_dev, -+ struct efx_dl_callbacks *callbacks); -+ -+extern void efx_dl_unregister_callbacks(struct efx_dl_device *efx_dev, -+ struct efx_dl_callbacks *callbacks); -+ -+/* Schedule a reset without grabbing any locks */ -+extern void efx_dl_schedule_reset(struct efx_dl_device *efx_dev); -+ -+/** -+ * efx_dl_for_each_device_info_matching - iterate an efx_dl_device_info list -+ * @_dev_info: Pointer to first &struct efx_dl_device_info -+ * @_type: Type code to look for -+ * @_info_type: Structure type corresponding to type code -+ * @_field: Name of &struct efx_dl_device_info field in the type -+ * @_p: Iterator variable -+ * -+ * Example: -+ * struct efx_dl_falcon_resources *res; -+ * efx_dl_for_each_device_info_matching(dev_info, EFX_DL_FALCON_RESOURCES, -+ * struct efx_dl_falcon_resources, -+ * hdr, res) { -+ * if (res->flags & EFX_DL_FALCON_DUAL_FUNC) -+ * .... -+ * } -+ */ -+#define efx_dl_for_each_device_info_matching(_dev_info, _type, \ -+ _info_type, _field, _p) \ -+ for ((_p) = container_of((_dev_info), _info_type, _field); \ -+ (_p) != NULL; \ -+ (_p) = container_of((_p)->_field.next, _info_type, _field))\ -+ if ((_p)->_field.type != _type) \ -+ continue; \ -+ else -+ -+/** -+ * efx_dl_search_device_info - search an efx_dl_device_info list -+ * @_dev_info: Pointer to first &struct efx_dl_device_info -+ * @_type: Type code to look for -+ * @_info_type: Structure type corresponding to type code -+ * @_field: Name of &struct efx_dl_device_info member in this type -+ * @_p: Result variable -+ * -+ * Example: -+ * struct efx_dl_falcon_resources *res; -+ * efx_dl_search_device_info(dev_info, EFX_DL_FALCON_RESOURCES, -+ * struct efx_dl_falcon_resources, hdr, res); -+ * if (res) -+ * .... -+ */ -+#define efx_dl_search_device_info(_dev_info, _type, _info_type, \ -+ _field, _p) \ -+ efx_dl_for_each_device_info_matching((_dev_info), (_type), \ -+ _info_type, _field, (_p)) \ -+ break; -+ -+#endif /* EFX_DRIVERLINK_API_H */ ---- head-2009-11-06.orig/drivers/net/sfc/efx.c 2009-11-06 10:29:51.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/efx.c 2009-10-12 13:40:25.000000000 +0200 -@@ -1487,12 +1487,21 @@ static int efx_change_mtu(struct net_dev - - efx_stop_all(efx); - -+ /* Ask driverlink client if we can change MTU */ -+ rc = EFX_DL_CALLBACK(efx, request_mtu, new_mtu); -+ if (rc) -+ goto out; -+ - EFX_LOG(efx, "changing MTU to %d\n", new_mtu); - - efx_fini_channels(efx); - net_dev->mtu = new_mtu; - efx_init_channels(efx); - -+ /* Notify driverlink client of new MTU */ -+ EFX_DL_CALLBACK(efx, mtu_changed, new_mtu); -+ -+ out: - efx_start_all(efx); - return rc; - } -@@ -1680,6 +1689,23 @@ static void efx_unregister_netdev(struct - * Device reset and suspend - * - **************************************************************************/ -+/* Serialise access to the driverlink callbacks, by quiescing event processing -+ * (without flushing the descriptor queues), and acquiring the rtnl_lock */ -+void efx_suspend(struct efx_nic *efx) -+{ -+ EFX_LOG(efx, "suspending operations\n"); -+ -+ rtnl_lock(); -+ efx_stop_all(efx); -+} -+ -+void efx_resume(struct efx_nic *efx) -+{ -+ EFX_LOG(efx, "resuming operations\n"); -+ -+ efx_start_all(efx); -+ rtnl_unlock(); -+} - - /* Tears down the entire software state and most of the hardware state - * before reset. */ -@@ -1760,8 +1786,8 @@ static int efx_reset(struct efx_nic *efx - enum reset_type method = efx->reset_pending; - int rc = 0; - -- /* Serialise with kernel interfaces */ - rtnl_lock(); -+ efx_dl_reset_suspend(efx); - - /* If we're not RUNNING then don't reset. Leave the reset_pending - * flag set so that efx_pci_probe_main will be retried */ -@@ -1807,6 +1833,7 @@ out_disable: - } - - out_unlock: -+ efx_dl_reset_resume(efx, 1); - rtnl_unlock(); - return rc; - } -@@ -1951,6 +1978,9 @@ static int efx_init_struct(struct efx_ni - efx->mac_op = &efx_dummy_mac_operations; - efx->phy_op = &efx_dummy_phy_operations; - efx->mdio.dev = net_dev; -+ INIT_LIST_HEAD(&efx->dl_node); -+ INIT_LIST_HEAD(&efx->dl_device_list); -+ efx->dl_cb = efx_default_callbacks; - INIT_WORK(&efx->phy_work, efx_phy_work); - INIT_WORK(&efx->mac_work, efx_mac_work); - atomic_set(&efx->netif_stop_count, 1); -@@ -2054,6 +2084,7 @@ static void efx_pci_remove(struct pci_de - efx = pci_get_drvdata(pci_dev); - if (!efx) - return; -+ efx_dl_unregister_nic(efx); - - /* Mark the NIC as fini, then stop the interface */ - rtnl_lock(); -@@ -2230,9 +2261,16 @@ static int __devinit efx_pci_probe(struc - if (rc) - goto fail5; - -+ /* Register with driverlink layer */ -+ rc = efx_dl_register_nic(efx); -+ if (rc) -+ goto fail6; -+ - EFX_LOG(efx, "initialisation successful\n"); - return 0; - -+ fail6: -+ efx_unregister_netdev(efx); - fail5: - efx_pci_remove_main(efx); - fail4: ---- head-2009-11-06.orig/drivers/net/sfc/falcon.c 2009-11-06 10:29:51.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/falcon.c 2009-07-28 10:04:25.000000000 +0200 -@@ -36,14 +36,14 @@ - - /** - * struct falcon_nic_data - Falcon NIC state -- * @next_buffer_table: First available buffer table id -+ * @resources: Resource information for driverlink client - * @pci_dev2: The secondary PCI device if present - * @i2c_data: Operations and state for I2C bit-bashing algorithm - * @int_error_count: Number of internal errors seen recently - * @int_error_expire: Time at which error count will be expired - */ - struct falcon_nic_data { -- unsigned next_buffer_table; -+ struct efx_dl_falcon_resources resources; - struct pci_dev *pci_dev2; - struct i2c_algo_bit_data i2c_data; - -@@ -336,8 +336,8 @@ static int falcon_alloc_special_buffer(s - memset(buffer->addr, 0xff, len); - - /* Select new buffer ID */ -- buffer->index = nic_data->next_buffer_table; -- nic_data->next_buffer_table += buffer->entries; -+ buffer->index = nic_data->resources.buffer_table_min; -+ nic_data->resources.buffer_table_min += buffer->entries; - - EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, -@@ -960,10 +960,12 @@ static void falcon_handle_driver_event(s - case TX_DESCQ_FLS_DONE_EV_DECODE: - EFX_TRACE(efx, "channel %d TXQ %d flushed\n", - channel->channel, ev_sub_data); -+ EFX_DL_CALLBACK(efx, event, event); - break; - case RX_DESCQ_FLS_DONE_EV_DECODE: - EFX_TRACE(efx, "channel %d RXQ %d flushed\n", - channel->channel, ev_sub_data); -+ EFX_DL_CALLBACK(efx, event, event); - break; - case EVQ_INIT_DONE_EV_DECODE: - EFX_LOG(efx, "channel %d EVQ %d initialised\n", -@@ -972,14 +974,17 @@ static void falcon_handle_driver_event(s - case SRM_UPD_DONE_EV_DECODE: - EFX_TRACE(efx, "channel %d SRAM update done\n", - channel->channel); -+ EFX_DL_CALLBACK(efx, event, event); - break; - case WAKE_UP_EV_DECODE: - EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", - channel->channel, ev_sub_data); -+ EFX_DL_CALLBACK(efx, event, event); - break; - case TIMER_EV_DECODE: - EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", - channel->channel, ev_sub_data); -+ EFX_DL_CALLBACK(efx, event, event); - break; - case RX_RECOVERY_EV_DECODE: - EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " -@@ -1004,6 +1009,7 @@ static void falcon_handle_driver_event(s - EFX_TRACE(efx, "channel %d unknown driver event code %d " - "data %04x\n", channel->channel, ev_sub_code, - ev_sub_data); -+ EFX_DL_CALLBACK(efx, event, event); - break; - } - } -@@ -2744,6 +2750,59 @@ static int falcon_probe_nvconfig(struct - return rc; - } - -+/* Looks at available SRAM resources and silicon revision, and works out -+ * how many queues we can support, and where things like descriptor caches -+ * should live. */ -+static int falcon_dimension_resources(struct efx_nic *efx) -+{ -+ unsigned internal_dcs_entries; -+ struct falcon_nic_data *nic_data = efx->nic_data; -+ struct efx_dl_falcon_resources *res = &nic_data->resources; -+ -+ /* Fill out the driverlink resource list */ -+ res->hdr.type = EFX_DL_FALCON_RESOURCES; -+ res->biu_lock = &efx->biu_lock; -+ efx->dl_info = &res->hdr; -+ -+ /* NB. The minimum values get increased as this driver initialises -+ * its resources, so this should prevent any overlap. -+ */ -+ switch (falcon_rev(efx)) { -+ case FALCON_REV_A1: -+ res->rxq_min = 16; -+ res->txq_min = 16; -+ res->evq_int_min = 4; -+ res->evq_int_lim = 5; -+ res->evq_timer_min = 5; -+ res->evq_timer_lim = 4096; -+ internal_dcs_entries = 8192; -+ break; -+ case FALCON_REV_B0: -+ default: -+ res->rxq_min = 0; -+ res->txq_min = 0; -+ res->evq_int_min = 0; -+ res->evq_int_lim = 64; -+ res->evq_timer_min = 64; -+ res->evq_timer_lim = 4096; -+ internal_dcs_entries = 4096; -+ break; -+ } -+ -+ /* Internal SRAM only for now */ -+ res->rxq_lim = internal_dcs_entries / RX_DC_ENTRIES; -+ res->txq_lim = internal_dcs_entries / TX_DC_ENTRIES; -+ res->buffer_table_lim = 8192; -+ -+ if (FALCON_IS_DUAL_FUNC(efx)) -+ res->flags |= EFX_DL_FALCON_DUAL_FUNC; -+ -+ if (EFX_INT_MODE_USE_MSI(efx)) -+ res->flags |= EFX_DL_FALCON_USE_MSI; -+ -+ return 0; -+} -+ - /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port - * count, port speed). Set workaround and feature flags accordingly. - */ -@@ -2771,9 +2830,11 @@ static int falcon_probe_nic_variant(stru - EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); - return -ENODEV; - } -+ efx->silicon_rev = "falcon/a1"; - break; - - case FALCON_REV_B0: -+ efx->silicon_rev = "falcon/b0"; - break; - - default: -@@ -2883,6 +2944,10 @@ int falcon_probe_nic(struct efx_nic *efx - if (rc) - goto fail5; - -+ rc = falcon_dimension_resources(efx); -+ if (rc) -+ goto fail6; -+ - /* Initialise I2C adapter */ - efx->i2c_adap.owner = THIS_MODULE; - nic_data->i2c_data = falcon_i2c_bit_operations; -@@ -2892,10 +2957,12 @@ int falcon_probe_nic(struct efx_nic *efx - strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name)); - rc = i2c_bit_add_bus(&efx->i2c_adap); - if (rc) -- goto fail5; -+ goto fail6; - - return 0; - -+ fail6: -+ efx->dl_info = NULL; - fail5: - falcon_remove_spi_devices(efx); - falcon_free_buffer(efx, &efx->irq_status); -@@ -3083,6 +3150,7 @@ void falcon_remove_nic(struct efx_nic *e - /* Tear down the private nic state */ - kfree(efx->nic_data); - efx->nic_data = NULL; -+ efx->dl_info = NULL; - } - - void falcon_update_nic_stats(struct efx_nic *efx) ---- head-2009-11-06.orig/drivers/net/sfc/net_driver.h 2009-11-06 10:29:51.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/net_driver.h 2009-07-28 10:04:25.000000000 +0200 -@@ -29,6 +29,8 @@ - - #include "enum.h" - #include "bitfield.h" -+#include "driverlink_api.h" -+#include "driverlink.h" - - /************************************************************************** - * -@@ -754,6 +756,12 @@ union efx_multicast_hash { - * @loopback_mode: Loopback status - * @loopback_modes: Supported loopback mode bitmask - * @loopback_selftest: Offline self-test private state -+ * @silicon_rev: Silicon revision description for driverlink -+ * @dl_info: Linked list of hardware parameters exposed through driverlink -+ * @dl_node: Driverlink port list -+ * @dl_device_list: Driverlink device list -+ * @dl_cb: Driverlink callbacks table -+ * @dl_cb_dev: Driverlink callback owner devices - * - * The @priv field of the corresponding &struct net_device points to - * this. -@@ -844,6 +852,13 @@ struct efx_nic { - unsigned int loopback_modes; - - void *loopback_selftest; -+ -+ const char *silicon_rev; -+ struct efx_dl_device_info *dl_info; -+ struct list_head dl_node; -+ struct list_head dl_device_list; -+ struct efx_dl_callbacks dl_cb; -+ struct efx_dl_cb_devices dl_cb_dev; - }; - - static inline int efx_dev_registered(struct efx_nic *efx) ---- head-2009-11-06.orig/drivers/net/sfc/rx.c 2009-11-06 10:29:51.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/rx.c 2009-11-06 10:32:03.000000000 +0100 -@@ -447,7 +447,21 @@ static void efx_rx_packet_lro(struct efx - struct efx_rx_buffer *rx_buf, - bool checksummed) - { -+ struct efx_nic *efx = channel->efx; - struct napi_struct *napi = &channel->napi_str; -+ enum efx_veto veto; -+ -+ /* It would be faster if we had access to packets at the -+ * other side of generic LRO. Unfortunately, there isn't -+ * an obvious interface to this, so veto packets before LRO */ -+ veto = EFX_DL_CALLBACK(efx, rx_packet, rx_buf->data, rx_buf->len); -+ if (unlikely(veto)) { -+ EFX_TRACE(efx, "LRO RX vetoed by driverlink %s driver\n", -+ efx->dl_cb_dev.rx_packet->driver->name); -+ /* Free the buffer now */ -+ efx_free_rx_buffer(efx, rx_buf); -+ return; -+ } - - /* Pass the skb/page into the LRO engine */ - if (rx_buf->page) { -@@ -550,6 +564,7 @@ void __efx_rx_packet(struct efx_channel - struct efx_rx_buffer *rx_buf, bool checksummed) - { - struct efx_nic *efx = channel->efx; -+ enum efx_veto veto; - struct sk_buff *skb; - - /* If we're in loopback test, then pass the packet directly to the -@@ -561,6 +576,16 @@ void __efx_rx_packet(struct efx_channel - goto done; - } - -+ /* Allow callback to veto the packet */ -+ veto = EFX_DL_CALLBACK(efx, rx_packet, rx_buf->data, rx_buf->len); -+ if (unlikely(veto)) { -+ EFX_LOG(efx, "RX vetoed by driverlink %s driver\n", -+ efx->dl_cb_dev.rx_packet->driver->name); -+ /* Free the buffer now */ -+ efx_free_rx_buffer(efx, rx_buf); -+ goto done; -+ } -+ - if (rx_buf->skb) { - prefetch(skb_shinfo(rx_buf->skb)); - ---- head-2009-11-06.orig/drivers/net/sfc/tx.c 2009-11-06 10:29:51.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/tx.c 2009-10-12 13:40:32.000000000 +0200 -@@ -374,6 +374,7 @@ netdev_tx_t efx_hard_start_xmit(struct s - { - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_tx_queue *tx_queue; -+ enum efx_veto veto; - - if (unlikely(efx->port_inhibited)) - return NETDEV_TX_BUSY; -@@ -383,6 +384,17 @@ netdev_tx_t efx_hard_start_xmit(struct s - else - tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; - -+ /* See if driverlink wants to veto the packet. */ -+ veto = EFX_DL_CALLBACK(efx, tx_packet, skb); -+ if (unlikely(veto)) { -+ EFX_TRACE(efx, "TX queue %d packet vetoed by " -+ "driverlink %s driver\n", tx_queue->queue, -+ efx->dl_cb_dev.tx_packet->driver->name); -+ /* Free the skb; nothing else will do it */ -+ dev_kfree_skb_any(skb); -+ return NETDEV_TX_OK; -+ } -+ - return efx_xmit(efx, tx_queue, skb); - } - diff --git a/patches.xen/sfc-driverlink-conditional b/patches.xen/sfc-driverlink-conditional deleted file mode 100644 index c3a264f..0000000 --- a/patches.xen/sfc-driverlink-conditional +++ /dev/null @@ -1,248 +0,0 @@ -From: jbeulich@novell.com -Subject: conditionalize driverlink additions to Solarflare driver -Patch-mainline: n/a -References: FATE#303479 - -At once converted the EFX_TRACE() invocations after vetoed RX/TX -callbacks to ...LOG() ones, which is consistent with Solarflare's -current code according to David Riddoch (2008-09-12). - ---- head-2009-11-06.orig/drivers/net/sfc/Kconfig 2009-04-21 11:02:22.000000000 +0200 -+++ head-2009-11-06/drivers/net/sfc/Kconfig 2009-10-12 13:41:03.000000000 +0200 -@@ -12,8 +12,12 @@ config SFC - To compile this driver as a module, choose M here. The module - will be called sfc. - -+config SFC_DRIVERLINK -+ bool -+ - config SFC_RESOURCE - depends on SFC && X86 -+ select SFC_DRIVERLINK - tristate "Solarflare Solarstorm SFC4000 resource driver" - help - This module provides the SFC resource manager driver. ---- head-2009-11-06.orig/drivers/net/sfc/Makefile 2009-02-06 12:42:18.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/Makefile 2009-10-12 13:41:03.000000000 +0200 -@@ -1,7 +1,7 @@ - sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \ - falcon_xmac.o selftest.o ethtool.o xfp_phy.o \ -- mdio_10g.o tenxpress.o boards.o sfe4001.o \ -- driverlink.o -+ mdio_10g.o tenxpress.o boards.o sfe4001.o -+sfc-$(CONFIG_SFC_DRIVERLINK) += driverlink.o - sfc-$(CONFIG_SFC_MTD) += mtd.o - - obj-$(CONFIG_SFC) += sfc.o ---- head-2009-11-06.orig/drivers/net/sfc/driverlink.c 2009-07-28 10:04:25.000000000 +0200 -+++ head-2009-11-06/drivers/net/sfc/driverlink.c 2009-10-12 13:41:03.000000000 +0200 -@@ -14,7 +14,6 @@ - #include - #include "net_driver.h" - #include "efx.h" --#include "driverlink_api.h" - #include "driverlink.h" - - /* Protects @efx_driverlink_lock and @efx_driver_list */ ---- head-2009-11-06.orig/drivers/net/sfc/driverlink.h 2009-07-28 10:04:25.000000000 +0200 -+++ head-2009-11-06/drivers/net/sfc/driverlink.h 2009-10-12 13:41:03.000000000 +0200 -@@ -15,6 +15,10 @@ - struct efx_dl_device; - struct efx_nic; - -+#ifdef CONFIG_SFC_DRIVERLINK -+ -+#include "driverlink_api.h" -+ - /* Efx callback devices - * - * A list of the devices that own each callback. The partner to -@@ -40,4 +44,23 @@ extern void efx_dl_unregister_nic(struct - extern void efx_dl_reset_suspend(struct efx_nic *efx); - extern void efx_dl_reset_resume(struct efx_nic *efx, int ok); - -+#define EFX_DL_LOG EFX_LOG -+ -+#else /* CONFIG_SFC_DRIVERLINK */ -+ -+enum efx_veto { EFX_ALLOW_PACKET = 0 }; -+ -+static inline int efx_nop_callback(struct efx_nic *efx) { return 0; } -+#define EFX_DL_CALLBACK(port, name, ...) efx_nop_callback(port) -+ -+static inline int efx_dl_register_nic(struct efx_nic *efx) { return 0; } -+static inline void efx_dl_unregister_nic(struct efx_nic *efx) {} -+ -+static inline void efx_dl_reset_suspend(struct efx_nic *efx) {} -+static inline void efx_dl_reset_resume(struct efx_nic *efx, int ok) {} -+ -+#define EFX_DL_LOG(efx, fmt, args...) ((void)(efx)) -+ -+#endif /* CONFIG_SFC_DRIVERLINK */ -+ - #endif /* EFX_DRIVERLINK_H */ ---- head-2009-11-06.orig/drivers/net/sfc/efx.c 2009-10-12 13:40:25.000000000 +0200 -+++ head-2009-11-06/drivers/net/sfc/efx.c 2009-10-12 13:41:03.000000000 +0200 -@@ -1689,6 +1689,7 @@ static void efx_unregister_netdev(struct - * Device reset and suspend - * - **************************************************************************/ -+#ifdef CONFIG_SFC_DRIVERLINK - /* Serialise access to the driverlink callbacks, by quiescing event processing - * (without flushing the descriptor queues), and acquiring the rtnl_lock */ - void efx_suspend(struct efx_nic *efx) -@@ -1706,6 +1707,7 @@ void efx_resume(struct efx_nic *efx) - efx_start_all(efx); - rtnl_unlock(); - } -+#endif - - /* Tears down the entire software state and most of the hardware state - * before reset. */ -@@ -1978,9 +1980,11 @@ static int efx_init_struct(struct efx_ni - efx->mac_op = &efx_dummy_mac_operations; - efx->phy_op = &efx_dummy_phy_operations; - efx->mdio.dev = net_dev; -+#ifdef CONFIG_SFC_DRIVERLINK - INIT_LIST_HEAD(&efx->dl_node); - INIT_LIST_HEAD(&efx->dl_device_list); - efx->dl_cb = efx_default_callbacks; -+#endif - INIT_WORK(&efx->phy_work, efx_phy_work); - INIT_WORK(&efx->mac_work, efx_mac_work); - atomic_set(&efx->netif_stop_count, 1); ---- head-2009-11-06.orig/drivers/net/sfc/falcon.c 2009-07-28 10:04:25.000000000 +0200 -+++ head-2009-11-06/drivers/net/sfc/falcon.c 2009-10-12 13:41:03.000000000 +0200 -@@ -36,6 +36,7 @@ - - /** - * struct falcon_nic_data - Falcon NIC state -+ * @next_buffer_table: First available buffer table id - * @resources: Resource information for driverlink client - * @pci_dev2: The secondary PCI device if present - * @i2c_data: Operations and state for I2C bit-bashing algorithm -@@ -43,7 +44,11 @@ - * @int_error_expire: Time at which error count will be expired - */ - struct falcon_nic_data { -+#ifndef CONFIG_SFC_DRIVERLINK -+ unsigned next_buffer_table; -+#else - struct efx_dl_falcon_resources resources; -+#endif - struct pci_dev *pci_dev2; - struct i2c_algo_bit_data i2c_data; - -@@ -336,8 +341,13 @@ static int falcon_alloc_special_buffer(s - memset(buffer->addr, 0xff, len); - - /* Select new buffer ID */ -+#ifndef CONFIG_SFC_DRIVERLINK -+ buffer->index = nic_data->next_buffer_table; -+ nic_data->next_buffer_table += buffer->entries; -+#else - buffer->index = nic_data->resources.buffer_table_min; - nic_data->resources.buffer_table_min += buffer->entries; -+#endif - - EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, -@@ -2755,6 +2765,7 @@ static int falcon_probe_nvconfig(struct - * should live. */ - static int falcon_dimension_resources(struct efx_nic *efx) - { -+#ifdef CONFIG_SFC_DRIVERLINK - unsigned internal_dcs_entries; - struct falcon_nic_data *nic_data = efx->nic_data; - struct efx_dl_falcon_resources *res = &nic_data->resources; -@@ -2799,6 +2810,7 @@ static int falcon_dimension_resources(st - - if (EFX_INT_MODE_USE_MSI(efx)) - res->flags |= EFX_DL_FALCON_USE_MSI; -+#endif - - return 0; - } -@@ -2962,7 +2974,9 @@ int falcon_probe_nic(struct efx_nic *efx - return 0; - - fail6: -+#ifdef CONFIG_SFC_DRIVERLINK - efx->dl_info = NULL; -+#endif - fail5: - falcon_remove_spi_devices(efx); - falcon_free_buffer(efx, &efx->irq_status); -@@ -3150,7 +3164,9 @@ void falcon_remove_nic(struct efx_nic *e - /* Tear down the private nic state */ - kfree(efx->nic_data); - efx->nic_data = NULL; -+#ifdef CONFIG_SFC_DRIVERLINK - efx->dl_info = NULL; -+#endif - } - - void falcon_update_nic_stats(struct efx_nic *efx) ---- head-2009-11-06.orig/drivers/net/sfc/net_driver.h 2009-07-28 10:04:25.000000000 +0200 -+++ head-2009-11-06/drivers/net/sfc/net_driver.h 2009-10-12 13:41:03.000000000 +0200 -@@ -29,7 +29,6 @@ - - #include "enum.h" - #include "bitfield.h" --#include "driverlink_api.h" - #include "driverlink.h" - - /************************************************************************** -@@ -854,11 +853,13 @@ struct efx_nic { - void *loopback_selftest; - - const char *silicon_rev; -+#ifdef CONFIG_SFC_DRIVERLINK - struct efx_dl_device_info *dl_info; - struct list_head dl_node; - struct list_head dl_device_list; - struct efx_dl_callbacks dl_cb; - struct efx_dl_cb_devices dl_cb_dev; -+#endif - }; - - static inline int efx_dev_registered(struct efx_nic *efx) ---- head-2009-11-06.orig/drivers/net/sfc/rx.c 2009-11-06 10:32:03.000000000 +0100 -+++ head-2009-11-06/drivers/net/sfc/rx.c 2009-11-06 10:32:24.000000000 +0100 -@@ -456,8 +456,8 @@ static void efx_rx_packet_lro(struct efx - * an obvious interface to this, so veto packets before LRO */ - veto = EFX_DL_CALLBACK(efx, rx_packet, rx_buf->data, rx_buf->len); - if (unlikely(veto)) { -- EFX_TRACE(efx, "LRO RX vetoed by driverlink %s driver\n", -- efx->dl_cb_dev.rx_packet->driver->name); -+ EFX_DL_LOG(efx, "LRO RX vetoed by driverlink %s driver\n", -+ efx->dl_cb_dev.rx_packet->driver->name); - /* Free the buffer now */ - efx_free_rx_buffer(efx, rx_buf); - return; -@@ -579,8 +579,8 @@ void __efx_rx_packet(struct efx_channel - /* Allow callback to veto the packet */ - veto = EFX_DL_CALLBACK(efx, rx_packet, rx_buf->data, rx_buf->len); - if (unlikely(veto)) { -- EFX_LOG(efx, "RX vetoed by driverlink %s driver\n", -- efx->dl_cb_dev.rx_packet->driver->name); -+ EFX_DL_LOG(efx, "RX vetoed by driverlink %s driver\n", -+ efx->dl_cb_dev.rx_packet->driver->name); - /* Free the buffer now */ - efx_free_rx_buffer(efx, rx_buf); - goto done; ---- head-2009-11-06.orig/drivers/net/sfc/tx.c 2009-10-12 13:40:32.000000000 +0200 -+++ head-2009-11-06/drivers/net/sfc/tx.c 2009-10-12 13:41:03.000000000 +0200 -@@ -387,9 +387,9 @@ netdev_tx_t efx_hard_start_xmit(struct s - /* See if driverlink wants to veto the packet. */ - veto = EFX_DL_CALLBACK(efx, tx_packet, skb); - if (unlikely(veto)) { -- EFX_TRACE(efx, "TX queue %d packet vetoed by " -- "driverlink %s driver\n", tx_queue->queue, -- efx->dl_cb_dev.tx_packet->driver->name); -+ EFX_DL_LOG(efx, "TX queue %d packet vetoed by " -+ "driverlink %s driver\n", tx_queue->queue, -+ efx->dl_cb_dev.tx_packet->driver->name); - /* Free the skb; nothing else will do it */ - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; diff --git a/patches.xen/sfc-endianness b/patches.xen/sfc-endianness deleted file mode 100644 index 1f6dc3d..0000000 --- a/patches.xen/sfc-endianness +++ /dev/null @@ -1,18 +0,0 @@ -From: jbeulich@novell.com -Subject: fix building with gcc 4.4 -Patch-mainline: n/a - ---- head-2009-05-19.orig/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h 2008-07-17 16:18:07.000000000 +0200 -+++ head-2009-05-19/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h 2009-05-19 15:44:02.000000000 +0200 -@@ -42,9 +42,9 @@ - - #include - --#ifdef __LITTLE_ENDIAN -+#if defined(__LITTLE_ENDIAN) - #define EFHW_IS_LITTLE_ENDIAN --#elif __BIG_ENDIAN -+#elif defined(__BIG_ENDIAN) - #define EFHW_IS_BIG_ENDIAN - #else - #error Unknown endianness diff --git a/patches.xen/sfc-external-sram b/patches.xen/sfc-external-sram deleted file mode 100644 index 176e5c7..0000000 --- a/patches.xen/sfc-external-sram +++ /dev/null @@ -1,299 +0,0 @@ -From: Kieran Mansley -Subject: enable access to Falcon's external SRAM -References: bnc#489105 -Patch-mainline: n/a - -Include ability to reference external SRAM on Solarflare Falcon NICs to -allow event queues to be accessed by virtualised guests. - -Acked-by: jbeulich@novell.com - ---- head-2009-07-28.orig/drivers/net/sfc/falcon.c 2009-07-28 10:05:40.000000000 +0200 -+++ head-2009-07-28/drivers/net/sfc/falcon.c 2009-07-28 10:06:53.000000000 +0200 -@@ -36,6 +36,9 @@ - - /** - * struct falcon_nic_data - Falcon NIC state -+ * @sram_cfg: SRAM configuration value -+ * @tx_dc_base: Base address in SRAM of TX queue descriptor caches -+ * @rx_dc_base: Base address in SRAM of RX queue descriptor caches - * @next_buffer_table: First available buffer table id - * @resources: Resource information for driverlink client - * @pci_dev2: The secondary PCI device if present -@@ -44,6 +47,9 @@ - * @int_error_expire: Time at which error count will be expired - */ - struct falcon_nic_data { -+ int sram_cfg; -+ unsigned tx_dc_base; -+ unsigned rx_dc_base; - #ifndef CONFIG_SFC_DRIVERLINK - unsigned next_buffer_table; - #else -@@ -74,11 +80,11 @@ static int disable_dma_stats; - */ - #define TX_DC_ENTRIES 16 - #define TX_DC_ENTRIES_ORDER 0 --#define TX_DC_BASE 0x130000 -+#define TX_DC_INTERNAL_BASE 0x130000 - - #define RX_DC_ENTRIES 64 - #define RX_DC_ENTRIES_ORDER 2 --#define RX_DC_BASE 0x100000 -+#define RX_DC_INTERNAL_BASE 0x100000 - - static const unsigned int - /* "Large" EEPROM device: Atmel AT25640 or similar -@@ -468,9 +474,17 @@ void falcon_push_buffers(struct efx_tx_q - int falcon_probe_tx(struct efx_tx_queue *tx_queue) - { - struct efx_nic *efx = tx_queue->efx; -- return falcon_alloc_special_buffer(efx, &tx_queue->txd, -- FALCON_TXD_RING_SIZE * -- sizeof(efx_qword_t)); -+ int rc = falcon_alloc_special_buffer(efx, &tx_queue->txd, -+ FALCON_TXD_RING_SIZE * -+ sizeof(efx_qword_t)); -+#ifdef CONFIG_SFC_DRIVERLINK -+ if (rc == 0) { -+ struct falcon_nic_data *nic_data = efx->nic_data; -+ nic_data->resources.txq_min = max(nic_data->resources.txq_min, -+ (unsigned)tx_queue->queue + 1); -+ } -+#endif -+ return rc; - } - - void falcon_init_tx(struct efx_tx_queue *tx_queue) -@@ -610,9 +624,17 @@ void falcon_notify_rx_desc(struct efx_rx - int falcon_probe_rx(struct efx_rx_queue *rx_queue) - { - struct efx_nic *efx = rx_queue->efx; -- return falcon_alloc_special_buffer(efx, &rx_queue->rxd, -- FALCON_RXD_RING_SIZE * -- sizeof(efx_qword_t)); -+ int rc = falcon_alloc_special_buffer(efx, &rx_queue->rxd, -+ FALCON_RXD_RING_SIZE * -+ sizeof(efx_qword_t)); -+#ifdef CONFIG_SFC_DRIVERLINK -+ if (rc == 0) { -+ struct falcon_nic_data *nic_data = efx->nic_data; -+ nic_data->resources.rxq_min = max(nic_data->resources.rxq_min, -+ (unsigned)rx_queue->queue + 1); -+ } -+#endif -+ return rc; - } - - void falcon_init_rx(struct efx_rx_queue *rx_queue) -@@ -1120,9 +1142,18 @@ int falcon_probe_eventq(struct efx_chann - { - struct efx_nic *efx = channel->efx; - unsigned int evq_size; -+ int rc; - - evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t); -- return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); -+ rc = falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); -+#ifdef CONFIG_SFC_DRIVERLINK -+ if (rc == 0) { -+ struct falcon_nic_data *nic_data = efx->nic_data; -+ nic_data->resources.evq_int_min = max(nic_data->resources.evq_int_min, -+ (unsigned)channel->channel + 1); -+ } -+#endif -+ return rc; - } - - void falcon_init_eventq(struct efx_channel *channel) -@@ -2618,19 +2649,22 @@ fail5: - */ - static int falcon_reset_sram(struct efx_nic *efx) - { -+ struct falcon_nic_data *nic_data = efx->nic_data; - efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; -- int count; -+ int count, onchip, sram_cfg_val; - - /* Set the SRAM wake/sleep GPIO appropriately. */ -+ onchip = (nic_data->sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY); - falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); - EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); -- EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); -+ EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, onchip); - falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); - - /* Initiate SRAM reset */ -+ sram_cfg_val = onchip ? 0 : nic_data->sram_cfg; - EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, - SRAM_OOB_BT_INIT_EN, 1, -- SRM_NUM_BANKS_AND_BANK_SIZE, 0); -+ SRM_NUM_BANKS_AND_BANK_SIZE, sram_cfg_val); - falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); - - /* Wait for SRAM reset to complete */ -@@ -2702,8 +2736,10 @@ static void falcon_remove_spi_devices(st - /* Extract non-volatile configuration */ - static int falcon_probe_nvconfig(struct efx_nic *efx) - { -+ struct falcon_nic_data *nic_data = efx->nic_data; - struct falcon_nvconfig *nvconfig; - int board_rev; -+ bool onchip_sram; - int rc; - - nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); -@@ -2716,6 +2752,7 @@ static int falcon_probe_nvconfig(struct - efx->phy_type = PHY_TYPE_NONE; - efx->mdio.prtad = MDIO_PRTAD_NONE; - board_rev = 0; -+ onchip_sram = true; - rc = 0; - } else if (rc) { - goto fail1; -@@ -2726,6 +2763,13 @@ static int falcon_probe_nvconfig(struct - efx->phy_type = v2->port0_phy_type; - efx->mdio.prtad = v2->port0_phy_addr; - board_rev = le16_to_cpu(v2->board_revision); -+#ifdef CONFIG_SFC_DRIVERLINK -+ onchip_sram = EFX_OWORD_FIELD(nvconfig->nic_stat_reg, -+ ONCHIP_SRAM); -+#else -+ /* We have no use for external SRAM */ -+ onchip_sram = true; -+#endif - - if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { - __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; -@@ -2750,6 +2794,21 @@ static int falcon_probe_nvconfig(struct - - efx_set_board_info(efx, board_rev); - -+ /* Read the SRAM configuration. The register is initialised -+ * automatically but might may been reset since boot. -+ */ -+ if (onchip_sram) { -+ nic_data->sram_cfg = SRM_NB_BSZ_ONCHIP_ONLY; -+ } else { -+ nic_data->sram_cfg = -+ EFX_OWORD_FIELD(nvconfig->srm_cfg_reg, -+ SRM_NUM_BANKS_AND_BANK_SIZE); -+ WARN_ON(nic_data->sram_cfg == SRM_NB_BSZ_RESERVED); -+ /* Replace invalid setting with the smallest defaults */ -+ if (nic_data->sram_cfg == SRM_NB_BSZ_DEFAULT) -+ nic_data->sram_cfg = SRM_NB_BSZ_1BANKS_2M; -+ } -+ - kfree(nvconfig); - return 0; - -@@ -2765,9 +2824,9 @@ static int falcon_probe_nvconfig(struct - * should live. */ - static int falcon_dimension_resources(struct efx_nic *efx) - { -+ struct falcon_nic_data *nic_data = efx->nic_data; - #ifdef CONFIG_SFC_DRIVERLINK - unsigned internal_dcs_entries; -- struct falcon_nic_data *nic_data = efx->nic_data; - struct efx_dl_falcon_resources *res = &nic_data->resources; - - /* Fill out the driverlink resource list */ -@@ -2800,16 +2859,64 @@ static int falcon_dimension_resources(st - break; - } - -- /* Internal SRAM only for now */ -- res->rxq_lim = internal_dcs_entries / RX_DC_ENTRIES; -- res->txq_lim = internal_dcs_entries / TX_DC_ENTRIES; -- res->buffer_table_lim = 8192; -+ if (nic_data->sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) { -+ res->rxq_lim = internal_dcs_entries / RX_DC_ENTRIES; -+ res->txq_lim = internal_dcs_entries / TX_DC_ENTRIES; -+ res->buffer_table_lim = 8192; -+ nic_data->tx_dc_base = TX_DC_INTERNAL_BASE; -+ nic_data->rx_dc_base = RX_DC_INTERNAL_BASE; -+ } else { -+ unsigned sram_bytes, vnic_bytes, max_vnics, n_vnics, dcs; -+ -+ /* Determine how much SRAM we have to play with. We have -+ * to fit buffer table and descriptor caches in. -+ */ -+ switch (nic_data->sram_cfg) { -+ case SRM_NB_BSZ_1BANKS_2M: -+ default: -+ sram_bytes = 2 * 1024 * 1024; -+ break; -+ case SRM_NB_BSZ_1BANKS_4M: -+ case SRM_NB_BSZ_2BANKS_4M: -+ sram_bytes = 4 * 1024 * 1024; -+ break; -+ case SRM_NB_BSZ_1BANKS_8M: -+ case SRM_NB_BSZ_2BANKS_8M: -+ sram_bytes = 8 * 1024 * 1024; -+ break; -+ case SRM_NB_BSZ_2BANKS_16M: -+ sram_bytes = 16 * 1024 * 1024; -+ break; -+ } -+ /* For each VNIC allow at least 512 buffer table entries -+ * and descriptor cache for an rxq and txq. Buffer table -+ * space for evqs and dmaqs is relatively trivial, so not -+ * considered in this calculation. -+ */ -+ vnic_bytes = 512 * 8 + RX_DC_ENTRIES * 8 + TX_DC_ENTRIES * 8; -+ max_vnics = sram_bytes / vnic_bytes; -+ for (n_vnics = 1; n_vnics < res->evq_timer_min + max_vnics;) -+ n_vnics *= 2; -+ res->rxq_lim = n_vnics; -+ res->txq_lim = n_vnics; -+ -+ dcs = n_vnics * TX_DC_ENTRIES * 8; -+ nic_data->tx_dc_base = sram_bytes - dcs; -+ dcs = n_vnics * RX_DC_ENTRIES * 8; -+ nic_data->rx_dc_base = nic_data->tx_dc_base - dcs; -+ res->buffer_table_lim = nic_data->rx_dc_base / 8; -+ } - - if (FALCON_IS_DUAL_FUNC(efx)) - res->flags |= EFX_DL_FALCON_DUAL_FUNC; - - if (EFX_INT_MODE_USE_MSI(efx)) - res->flags |= EFX_DL_FALCON_USE_MSI; -+#else -+ /* We ignore external SRAM */ -+ EFX_BUG_ON_PARANOID(nic_data->sram_cfg != SRM_NB_BSZ_ONCHIP_ONLY); -+ nic_data->tx_dc_base = TX_DC_INTERNAL_BASE; -+ nic_data->rx_dc_base = RX_DC_INTERNAL_BASE; - #endif - - return 0; -@@ -2998,13 +3105,15 @@ int falcon_probe_nic(struct efx_nic *efx - */ - int falcon_init_nic(struct efx_nic *efx) - { -+ struct falcon_nic_data *nic_data = efx->nic_data; - efx_oword_t temp; - unsigned thresh; - int rc; - -- /* Use on-chip SRAM */ -+ /* Use on-chip SRAM if wanted. */ - falcon_read(efx, &temp, NIC_STAT_REG); -- EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); -+ EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, -+ nic_data->sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY); - falcon_write(efx, &temp, NIC_STAT_REG); - - /* Set the source of the GMAC clock */ -@@ -3023,9 +3132,9 @@ int falcon_init_nic(struct efx_nic *efx) - return rc; - - /* Set positions of descriptor caches in SRAM. */ -- EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); -+ EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, nic_data->tx_dc_base / 8); - falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER); -- EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); -+ EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, nic_data->rx_dc_base / 8); - falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER); - - /* Set TX descriptor cache size. */ diff --git a/patches.xen/sfc-resource-driver b/patches.xen/sfc-resource-driver deleted file mode 100644 index ae5345e..0000000 --- a/patches.xen/sfc-resource-driver +++ /dev/null @@ -1,15053 +0,0 @@ -From: David Riddoch -# replaces http://xenbits.xensource.com/linux-2.6.18-xen.hg c/s 421: -# HG changeset patch -# User Keir Fraser -# Date 1203330569 0 -# Node ID e4dd072db2595c420bb21d9e835416f4fd543526 -# Parent fc90e9b2c12b316b5460ece28f013e6de881af1a -Subject: Solarflare: Resource driver. -References: FATE#303479 -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - ---- head-2009-04-21.orig/drivers/net/sfc/Kconfig 2009-04-21 11:01:52.000000000 +0200 -+++ head-2009-04-21/drivers/net/sfc/Kconfig 2009-04-21 11:02:22.000000000 +0200 -@@ -11,6 +11,13 @@ config SFC - - To compile this driver as a module, choose M here. The module - will be called sfc. -+ -+config SFC_RESOURCE -+ depends on SFC && X86 -+ tristate "Solarflare Solarstorm SFC4000 resource driver" -+ help -+ This module provides the SFC resource manager driver. -+ - config SFC_MTD - bool "Solarflare Solarstorm SFC4000 flash MTD support" - depends on SFC && MTD && !(SFC=y && MTD=m) ---- head-2009-04-21.orig/drivers/net/sfc/Makefile 2009-04-21 11:01:52.000000000 +0200 -+++ head-2009-04-21/drivers/net/sfc/Makefile 2009-02-06 12:42:18.000000000 +0100 -@@ -5,3 +5,5 @@ sfc-y += efx.o falcon.o tx.o rx.o falc - sfc-$(CONFIG_SFC_MTD) += mtd.o - - obj-$(CONFIG_SFC) += sfc.o -+ -+obj-$(CONFIG_SFC_RESOURCE) += sfc_resource/ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/Makefile 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,14 @@ -+obj-$(CONFIG_SFC_RESOURCE) := sfc_resource.o -+ -+EXTRA_CFLAGS += -D__CI_HARDWARE_CONFIG_FALCON__ -+EXTRA_CFLAGS += -D__ci_driver__ -+EXTRA_CFLAGS += -Werror -+EXTRA_CFLAGS += -Idrivers/net/sfc -Idrivers/net/sfc/sfc_resource -+ -+sfc_resource-objs := resource_driver.o iopage.o efx_vi_shm.o \ -+ driverlink_new.o kernel_proc.o kfifo.o \ -+ nic.o eventq.o falcon.o falcon_hash.o \ -+ assert_valid.o buddy.o buffer_table.o filter_resource.o \ -+ iobufset_resource.o resource_manager.o resources.o \ -+ vi_resource_alloc.o vi_resource_event.o vi_resource_flush.o \ -+ vi_resource_manager.o driver_object.o kernel_compat.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/assert_valid.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,92 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains functions to assert validness of resources and -+ * resource manager in DEBUG build of the resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+ -+#ifndef NDEBUG -+#include -+#include -+#include -+ -+void -+efrm_resource_manager_assert_valid(struct efrm_resource_manager *rm, -+ const char *file, int line) -+{ -+ _EFRM_ASSERT(rm, file, line); -+ _EFRM_ASSERT(rm->rm_name, file, line); -+ _EFRM_ASSERT(rm->rm_type < EFRM_RESOURCE_NUM, file, line); -+ _EFRM_ASSERT(rm->rm_dtor, file, line); -+} -+EXPORT_SYMBOL(efrm_resource_manager_assert_valid); -+ -+/* -+ * \param rs resource to validate -+ * \param ref_count_is_zero One of 3 values -+ * > 0 - check ref count is zero -+ * = 0 - check ref count is non-zero -+ * < 0 - ref count could be any value -+ */ -+void -+efrm_resource_assert_valid(struct efrm_resource *rs, int ref_count_is_zero, -+ const char *file, int line) -+{ -+ struct efrm_resource_manager *rm; -+ -+ _EFRM_ASSERT(rs, file, line); -+ -+ if (ref_count_is_zero >= 0) { -+ if (!(ref_count_is_zero || rs->rs_ref_count > 0) -+ || !(!ref_count_is_zero || rs->rs_ref_count == 0)) -+ EFRM_WARN("%s: check %szero ref=%d " EFRM_RESOURCE_FMT, -+ __func__, -+ ref_count_is_zero == 0 ? "non-" : "", -+ rs->rs_ref_count, -+ EFRM_RESOURCE_PRI_ARG(rs->rs_handle)); -+ -+ _EFRM_ASSERT(!(ref_count_is_zero == 0) || -+ rs->rs_ref_count != 0, file, line); -+ _EFRM_ASSERT(!(ref_count_is_zero > 0) || -+ rs->rs_ref_count == 0, file, line); -+ } -+ -+ rm = efrm_rm_table[EFRM_RESOURCE_TYPE(rs->rs_handle)]; -+ efrm_resource_manager_assert_valid(rm, file, line); -+} -+EXPORT_SYMBOL(efrm_resource_assert_valid); -+ -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/buddy.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,220 @@ -+ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains implementation of a buddy allocator. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include /* get uintXX types on win32 */ -+#include -+#include -+#include -+ -+#if 1 -+#define DEBUG_ALLOC(x) -+#else -+#define DEBUG_ALLOC(x) x -+ -+static inline void efrm_buddy_dump(struct efrm_buddy_allocator *b) -+{ -+ unsigned o; -+ -+ EFRM_NOTICE("%s: dump allocator with order %u", -+ __func__, b->order); -+ for (o = 0; o <= b->order; o++) { -+ struct list_head *l = &b->free_lists[o]; -+ while (l->next != &b->free_lists[o]) { -+ l = l->next; -+ EFRM_NOTICE("%s: order %x: %zx", __func__, o, -+ l - b->links); -+ } -+ } -+} -+#endif -+ -+/* -+ * The purpose of the following inline functions is to give the -+ * understandable names to the simple actions. -+ */ -+static inline void -+efrm_buddy_free_list_add(struct efrm_buddy_allocator *b, -+ unsigned order, unsigned addr) -+{ -+ list_add(&b->links[addr], &b->free_lists[order]); -+ b->orders[addr] = (uint8_t) order; -+} -+static inline void -+efrm_buddy_free_list_del(struct efrm_buddy_allocator *b, unsigned addr) -+{ -+ list_del(&b->links[addr]); -+ b->links[addr].next = NULL; -+} -+static inline int -+efrm_buddy_free_list_empty(struct efrm_buddy_allocator *b, unsigned order) -+{ -+ return list_empty(&b->free_lists[order]); -+} -+static inline unsigned -+efrm_buddy_free_list_pop(struct efrm_buddy_allocator *b, unsigned order) -+{ -+ struct list_head *l = list_pop(&b->free_lists[order]); -+ l->next = NULL; -+ return (unsigned)(l - b->links); -+} -+static inline int -+efrm_buddy_addr_in_free_list(struct efrm_buddy_allocator *b, unsigned addr) -+{ -+ return b->links[addr].next != NULL; -+} -+static inline unsigned -+efrm_buddy_free_list_first(struct efrm_buddy_allocator *b, unsigned order) -+{ -+ return (unsigned)(b->free_lists[order].next - b->links); -+} -+ -+int efrm_buddy_ctor(struct efrm_buddy_allocator *b, unsigned order) -+{ -+ unsigned o; -+ unsigned size = 1 << order; -+ -+ DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __func__, order)); -+ EFRM_ASSERT(b); -+ EFRM_ASSERT(order <= sizeof(unsigned) * 8 - 1); -+ -+ b->order = order; -+ b->free_lists = vmalloc((order + 1) * sizeof(struct list_head)); -+ if (b->free_lists == NULL) -+ goto fail1; -+ -+ b->links = vmalloc(size * sizeof(struct list_head)); -+ if (b->links == NULL) -+ goto fail2; -+ -+ b->orders = vmalloc(size); -+ if (b->orders == NULL) -+ goto fail3; -+ -+ memset(b->links, 0, size * sizeof(struct list_head)); -+ -+ for (o = 0; o <= b->order; ++o) -+ INIT_LIST_HEAD(b->free_lists + o); -+ -+ efrm_buddy_free_list_add(b, b->order, 0); -+ -+ return 0; -+ -+fail3: -+ vfree(b->links); -+fail2: -+ vfree(b->free_lists); -+fail1: -+ return -ENOMEM; -+} -+ -+void efrm_buddy_dtor(struct efrm_buddy_allocator *b) -+{ -+ EFRM_ASSERT(b); -+ -+ vfree(b->free_lists); -+ vfree(b->links); -+ vfree(b->orders); -+} -+ -+int efrm_buddy_alloc(struct efrm_buddy_allocator *b, unsigned order) -+{ -+ unsigned smallest; -+ unsigned addr; -+ -+ DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __func__, order)); -+ EFRM_ASSERT(b); -+ -+ /* Find smallest chunk that is big enough. ?? Can optimise this by -+ ** keeping array of pointers to smallest chunk for each order. -+ */ -+ smallest = order; -+ while (smallest <= b->order && -+ efrm_buddy_free_list_empty(b, smallest)) -+ ++smallest; -+ -+ if (smallest > b->order) { -+ DEBUG_ALLOC(EFRM_NOTICE -+ ("buddy - alloc order %d failed - max order %d", -+ order, b->order);); -+ return -ENOMEM; -+ } -+ -+ /* Split blocks until we get one of the correct size. */ -+ addr = efrm_buddy_free_list_pop(b, smallest); -+ -+ DEBUG_ALLOC(EFRM_NOTICE("buddy - alloc %x order %d cut from order %d", -+ addr, order, smallest);); -+ while (smallest-- > order) -+ efrm_buddy_free_list_add(b, smallest, addr + (1 << smallest)); -+ -+ EFRM_DO_DEBUG(b->orders[addr] = (uint8_t) order); -+ -+ EFRM_ASSERT(addr < 1u << b->order); -+ return addr; -+} -+ -+void -+efrm_buddy_free(struct efrm_buddy_allocator *b, unsigned addr, -+ unsigned order) -+{ -+ unsigned buddy_addr; -+ -+ DEBUG_ALLOC(EFRM_NOTICE("%s(%u, %u)", __func__, addr, order)); -+ EFRM_ASSERT(b); -+ EFRM_ASSERT(order <= b->order); -+ EFRM_ASSERT((unsigned long)addr + ((unsigned long)1 << order) <= -+ (unsigned long)1 << b->order); -+ EFRM_ASSERT(!efrm_buddy_addr_in_free_list(b, addr)); -+ EFRM_ASSERT(b->orders[addr] == order); -+ -+ /* merge free blocks */ -+ while (order < b->order) { -+ buddy_addr = addr ^ (1 << order); -+ if (!efrm_buddy_addr_in_free_list(b, buddy_addr) || -+ b->orders[buddy_addr] != order) -+ break; -+ efrm_buddy_free_list_del(b, buddy_addr); -+ if (buddy_addr < addr) -+ addr = buddy_addr; -+ ++order; -+ } -+ -+ DEBUG_ALLOC(EFRM_NOTICE -+ ("buddy - free %x merged into order %d", addr, order);); -+ efrm_buddy_free_list_add(b, order, addr); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/buffer_table.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,209 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains abstraction of the buffer table on the NIC. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+/* -+** Might be worth keeping a bitmap of which entries are clear. Then we -+** wouldn't need to clear them all again when we free an allocation. -+*/ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/*! Comment? */ -+struct efrm_buffer_table { -+ spinlock_t lock; -+ struct efrm_buddy_allocator buddy; -+}; -+ -+/* Efab buffer state. */ -+static struct efrm_buffer_table efrm_buffers; -+ -+int efrm_buffer_table_ctor(unsigned low, unsigned high) -+{ -+ int log2_n_entries, rc, i; -+ -+ EFRM_ASSERT(high > 0); -+ EFRM_ASSERT(low < high); -+ -+ EFRM_TRACE("%s: low=%u high=%u", __func__, low, high); -+ EFRM_NOTICE("%s: low=%u high=%u", __func__, low, high); -+ -+ log2_n_entries = fls(high - 1); -+ -+ rc = efrm_buddy_ctor(&efrm_buffers.buddy, log2_n_entries); -+ if (rc < 0) { -+ EFRM_ERR("efrm_buffer_table_ctor: efrm_buddy_ctor(%d) " -+ "failed (%d)", log2_n_entries, rc); -+ return rc; -+ } -+ for (i = 0; i < (1 << log2_n_entries); ++i) { -+ rc = efrm_buddy_alloc(&efrm_buffers.buddy, 0); -+ EFRM_ASSERT(rc >= 0); -+ EFRM_ASSERT(rc < (1 << log2_n_entries)); -+ } -+ for (i = low; i < (int) high; ++i) -+ efrm_buddy_free(&efrm_buffers.buddy, i, 0); -+ -+ spin_lock_init(&efrm_buffers.lock); -+ -+ EFRM_TRACE("%s: done", __func__); -+ -+ return 0; -+} -+ -+void efrm_buffer_table_dtor(void) -+{ -+ /* ?? debug check that all allocations have been freed? */ -+ -+ spin_lock_destroy(&efrm_buffers.lock); -+ efrm_buddy_dtor(&efrm_buffers.buddy); -+ -+ EFRM_TRACE("%s: done", __func__); -+} -+ -+/**********************************************************************/ -+ -+int -+efrm_buffer_table_alloc(unsigned order, -+ struct efhw_buffer_table_allocation *a) -+{ -+ irq_flags_t lock_flags; -+ int rc; -+ -+ EFRM_ASSERT(&efrm_buffers.buddy); -+ EFRM_ASSERT(a); -+ -+ /* Round up to multiple of two, as the buffer clear logic works in -+ * pairs when not in "full" mode. */ -+ order = max_t(unsigned, order, 1); -+ -+ spin_lock_irqsave(&efrm_buffers.lock, lock_flags); -+ rc = efrm_buddy_alloc(&efrm_buffers.buddy, order); -+ spin_unlock_irqrestore(&efrm_buffers.lock, lock_flags); -+ -+ if (rc < 0) { -+ EFRM_ERR("efrm_buffer_table_alloc: failed (n=%ld) rc %d", -+ 1ul << order, rc); -+ return rc; -+ } -+ -+ EFRM_TRACE("efrm_buffer_table_alloc: base=%d n=%ld", -+ rc, 1ul << order); -+ a->order = order; -+ a->base = (unsigned)rc; -+ return 0; -+} -+ -+void efrm_buffer_table_free(struct efhw_buffer_table_allocation *a) -+{ -+ irq_flags_t lock_flags; -+ struct efhw_nic *nic; -+ int nic_i; -+ -+ EFRM_ASSERT(&efrm_buffers.buddy); -+ EFRM_ASSERT(a); -+ EFRM_ASSERT(a->base != -1); -+ EFRM_ASSERT((unsigned long)a->base + (1ul << a->order) <= -+ efrm_buddy_size(&efrm_buffers.buddy)); -+ -+ EFRM_TRACE("efrm_buffer_table_free: base=%d n=%ld", -+ a->base, (1ul << a->order)); -+ -+ EFRM_FOR_EACH_NIC(nic_i, nic) -+ efhw_nic_buffer_table_clear(nic, a->base, 1ul << a->order); -+ -+ spin_lock_irqsave(&efrm_buffers.lock, lock_flags); -+ efrm_buddy_free(&efrm_buffers.buddy, a->base, a->order); -+ spin_unlock_irqrestore(&efrm_buffers.lock, lock_flags); -+ -+ EFRM_DO_DEBUG(a->base = a->order = -1); -+} -+ -+/**********************************************************************/ -+ -+void -+efrm_buffer_table_set(struct efhw_buffer_table_allocation *a, -+ struct efhw_nic *nic, -+ unsigned i, dma_addr_t dma_addr, int owner) -+{ -+ EFRM_ASSERT(a); -+ EFRM_ASSERT(i < (unsigned)1 << a->order); -+ -+ efhw_nic_buffer_table_set(nic, dma_addr, EFHW_NIC_PAGE_SIZE, -+ 0, owner, a->base + i); -+} -+ -+ -+int efrm_buffer_table_size(void) -+{ -+ return efrm_buddy_size(&efrm_buffers.buddy); -+} -+ -+/**********************************************************************/ -+ -+int -+efrm_page_register(struct efhw_nic *nic, dma_addr_t dma_addr, int owner, -+ efhw_buffer_addr_t *buf_addr_out) -+{ -+ struct efhw_buffer_table_allocation alloc; -+ int rc; -+ -+ rc = efrm_buffer_table_alloc(0, &alloc); -+ if (rc == 0) { -+ efrm_buffer_table_set(&alloc, nic, 0, dma_addr, owner); -+ efrm_buffer_table_commit(); -+ *buf_addr_out = EFHW_BUFFER_ADDR(alloc.base, 0); -+ } -+ return rc; -+} -+EXPORT_SYMBOL(efrm_page_register); -+ -+void efrm_page_unregister(efhw_buffer_addr_t buf_addr) -+{ -+ struct efhw_buffer_table_allocation alloc; -+ -+ alloc.order = 0; -+ alloc.base = EFHW_BUFFER_PAGE(buf_addr); -+ efrm_buffer_table_free(&alloc); -+} -+EXPORT_SYMBOL(efrm_page_unregister); -+ -+void efrm_buffer_table_commit(void) -+{ -+ struct efhw_nic *nic; -+ int nic_i; -+ -+ EFRM_FOR_EACH_NIC(nic_i, nic) -+ efhw_nic_buffer_table_commit(nic); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,188 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides EtherFabric NIC hardware interface. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_EFAB_HARDWARE_H__ -+#define __CI_DRIVER_EFAB_HARDWARE_H__ -+ -+#include "ci/driver/efab/hardware/workarounds.h" -+#include -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Common EtherFabric definitions -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#include -+#include -+#include -+ -+/*---------------------------------------------------------------------------- -+ * -+ * EtherFabric varients -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#include -+ -+/*---------------------------------------------------------------------------- -+ * -+ * EtherFabric Portable Hardware Layer defines -+ * -+ *---------------------------------------------------------------------------*/ -+ -+ /*-------------- Initialisation ------------ */ -+#define efhw_nic_close_hardware(nic) \ -+ ((nic)->efhw_func->close_hardware(nic)) -+ -+#define efhw_nic_init_hardware(nic, ev_handlers, mac_addr, non_irq_evq) \ -+ ((nic)->efhw_func->init_hardware((nic), (ev_handlers), (mac_addr), \ -+ (non_irq_evq))) -+ -+/*-------------- Interrupt support ------------ */ -+/** Handle interrupt. Return 0 if not handled, 1 if handled. */ -+#define efhw_nic_interrupt(nic) \ -+ ((nic)->efhw_func->interrupt(nic)) -+ -+#define efhw_nic_interrupt_enable(nic) \ -+ ((nic)->efhw_func->interrupt_enable(nic)) -+ -+#define efhw_nic_interrupt_disable(nic) \ -+ ((nic)->efhw_func->interrupt_disable(nic)) -+ -+#define efhw_nic_set_interrupt_moderation(nic, evq, val) \ -+ ((nic)->efhw_func->set_interrupt_moderation(nic, evq, val)) -+ -+/*-------------- Event support ------------ */ -+ -+#define efhw_nic_event_queue_enable(nic, evq, size, q_base, buf_base, \ -+ interrupting) \ -+ ((nic)->efhw_func->event_queue_enable((nic), (evq), (size), (q_base), \ -+ (buf_base), (interrupting))) -+ -+#define efhw_nic_event_queue_disable(nic, evq, timer_only) \ -+ ((nic)->efhw_func->event_queue_disable(nic, evq, timer_only)) -+ -+#define efhw_nic_wakeup_request(nic, q_base, index, evq) \ -+ ((nic)->efhw_func->wakeup_request(nic, q_base, index, evq)) -+ -+#define efhw_nic_sw_event(nic, data, ev) \ -+ ((nic)->efhw_func->sw_event(nic, data, ev)) -+ -+/*-------------- Filter support ------------ */ -+#define efhw_nic_ipfilter_set(nic, type, index, dmaq, \ -+ saddr, sport, daddr, dport) \ -+ ((nic)->efhw_func->ipfilter_set(nic, type, index, dmaq, \ -+ saddr, sport, daddr, dport)) -+ -+#define efhw_nic_ipfilter_clear(nic, index) \ -+ ((nic)->efhw_func->ipfilter_clear(nic, index)) -+ -+/*-------------- DMA support ------------ */ -+#define efhw_nic_dmaq_tx_q_init(nic, dmaq, evq, owner, tag, \ -+ dmaq_size, index, flags) \ -+ ((nic)->efhw_func->dmaq_tx_q_init(nic, dmaq, evq, owner, tag, \ -+ dmaq_size, index, flags)) -+ -+#define efhw_nic_dmaq_rx_q_init(nic, dmaq, evq, owner, tag, \ -+ dmaq_size, index, flags) \ -+ ((nic)->efhw_func->dmaq_rx_q_init(nic, dmaq, evq, owner, tag, \ -+ dmaq_size, index, flags)) -+ -+#define efhw_nic_dmaq_tx_q_disable(nic, dmaq) \ -+ ((nic)->efhw_func->dmaq_tx_q_disable(nic, dmaq)) -+ -+#define efhw_nic_dmaq_rx_q_disable(nic, dmaq) \ -+ ((nic)->efhw_func->dmaq_rx_q_disable(nic, dmaq)) -+ -+#define efhw_nic_flush_tx_dma_channel(nic, dmaq) \ -+ ((nic)->efhw_func->flush_tx_dma_channel(nic, dmaq)) -+ -+#define efhw_nic_flush_rx_dma_channel(nic, dmaq) \ -+ ((nic)->efhw_func->flush_rx_dma_channel(nic, dmaq)) -+ -+/*-------------- MAC Low level interface ---- */ -+#define efhw_gmac_get_mac_addr(nic) \ -+ ((nic)->gmac->get_mac_addr((nic)->gmac)) -+ -+/*-------------- Buffer table -------------- */ -+#define efhw_nic_buffer_table_set(nic, addr, bufsz, region, \ -+ own_id, buf_id) \ -+ ((nic)->efhw_func->buffer_table_set(nic, addr, bufsz, region, \ -+ own_id, buf_id)) -+ -+#define efhw_nic_buffer_table_set_n(nic, buf_id, addr, bufsz, \ -+ region, n_pages, own_id) \ -+ ((nic)->efhw_func->buffer_table_set_n(nic, buf_id, addr, bufsz, \ -+ region, n_pages, own_id)) -+ -+#define efhw_nic_buffer_table_clear(nic, id, num) \ -+ ((nic)->efhw_func->buffer_table_clear(nic, id, num)) -+ -+#define efhw_nic_buffer_table_commit(nic) \ -+ ((nic)->efhw_func->buffer_table_commit(nic)) -+ -+/*-------------- New filter API ------------ */ -+#define efhw_nic_filter_set(nic, spec, index_out) \ -+ ((nic)->efhw_func->filter_set(nic, spec, index_out)) -+ -+#define efhw_nic_filter_clear(nic, type, index_out) \ -+ ((nic)->efhw_func->filter_clear(nic, type, index_out)) -+ -+ -+/* --- DMA --- */ -+#define EFHW_DMA_ADDRMASK (0xffffffffffffffffULL) -+ -+/* --- Buffers --- */ -+#define EFHW_BUFFER_ADDR FALCON_BUFFER_4K_ADDR -+#define EFHW_BUFFER_PAGE FALCON_BUFFER_4K_PAGE -+#define EFHW_BUFFER_OFF FALCON_BUFFER_4K_OFF -+ -+/* --- Filters --- */ -+#define EFHW_IP_FILTER_NUM FALCON_FILTER_TBL_NUM -+ -+#define EFHW_MAX_PAGE_SIZE FALCON_MAX_PAGE_SIZE -+ -+#if PAGE_SIZE <= EFHW_MAX_PAGE_SIZE -+#define EFHW_NIC_PAGE_SIZE PAGE_SIZE -+#else -+#define EFHW_NIC_PAGE_SIZE EFHW_MAX_PAGE_SIZE -+#endif -+#define EFHW_NIC_PAGE_MASK (~(EFHW_NIC_PAGE_SIZE-1)) -+ -+#endif /* __CI_DRIVER_EFAB_HARDWARE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/common.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,68 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides EtherFabric NIC hardware interface common -+ * definitions. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_EFAB_HARDWARE_COMMON_H__ -+#define __CI_DRIVER_EFAB_HARDWARE_COMMON_H__ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * EtherFabric constants -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#define EFHW_1K 0x00000400u -+#define EFHW_2K 0x00000800u -+#define EFHW_4K 0x00001000u -+#define EFHW_8K 0x00002000u -+#define EFHW_16K 0x00004000u -+#define EFHW_32K 0x00008000u -+#define EFHW_64K 0x00010000u -+#define EFHW_128K 0x00020000u -+#define EFHW_256K 0x00040000u -+#define EFHW_512K 0x00080000u -+#define EFHW_1M 0x00100000u -+#define EFHW_2M 0x00200000u -+#define EFHW_4M 0x00400000u -+#define EFHW_8M 0x00800000u -+#define EFHW_16M 0x01000000u -+#define EFHW_32M 0x02000000u -+#define EFHW_48M 0x03000000u -+#define EFHW_64M 0x04000000u -+#define EFHW_128M 0x08000000u -+#define EFHW_256M 0x10000000u -+#define EFHW_512M 0x20000000u -+#define EFHW_1G 0x40000000u -+#define EFHW_2G 0x80000000u -+#define EFHW_4G 0x100000000ULL -+#define EFHW_8G 0x200000000ULL -+ -+#endif /* __CI_DRIVER_EFAB_HARDWARE_COMMON_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,422 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) specific -+ * definitions. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_EFAB_HARDWARE_FALCON_H__ -+#define __CI_DRIVER_EFAB_HARDWARE_FALCON_H__ -+ -+/*---------------------------------------------------------------------------- -+ * Compile options -+ *---------------------------------------------------------------------------*/ -+ -+/* Falcon has an 8K maximum page size. */ -+#define FALCON_MAX_PAGE_SIZE EFHW_8K -+ -+/* include the register definitions */ -+#include -+#include -+#include -+#include -+ -+#define FALCON_DMA_TX_DESC_BYTES 8 -+#define FALCON_DMA_RX_PHYS_DESC_BYTES 8 -+#define FALCON_DMA_RX_BUF_DESC_BYTES 4 -+ -+ -+/* ---- efhw_event_t helpers --- */ -+ -+#ifndef EFHW_IS_LITTLE_ENDIAN -+#error This needs lots of cpu_to_le64s() in -+#endif -+ -+/*!\ TODO look at whether there is an efficiency gain to be had by -+ treating the event codes to 32bit masks as is done for EF1 -+ -+ These masks apply to the full 64 bits of the event to extract the -+ event code - followed by the common event codes to expect -+ */ -+#define __FALCON_OPEN_MASK(WIDTH) ((((uint64_t)1) << (WIDTH)) - 1) -+#define FALCON_EVENT_CODE_MASK \ -+ (__FALCON_OPEN_MASK(EV_CODE_WIDTH) << EV_CODE_LBN) -+#define FALCON_EVENT_EV_Q_ID_MASK \ -+ (__FALCON_OPEN_MASK(DRIVER_EV_EVQ_ID_WIDTH) << DRIVER_EV_EVQ_ID_LBN) -+#define FALCON_EVENT_TX_FLUSH_Q_ID_MASK \ -+ (__FALCON_OPEN_MASK(DRIVER_EV_TX_DESCQ_ID_WIDTH) << \ -+ DRIVER_EV_TX_DESCQ_ID_LBN) -+#define FALCON_EVENT_RX_FLUSH_Q_ID_MASK \ -+ (__FALCON_OPEN_MASK(DRIVER_EV_RX_DESCQ_ID_WIDTH) << \ -+ DRIVER_EV_RX_DESCQ_ID_LBN) -+#define FALCON_EVENT_DRV_SUBCODE_MASK \ -+ (__FALCON_OPEN_MASK(DRIVER_EV_SUB_CODE_WIDTH) << \ -+ DRIVER_EV_SUB_CODE_LBN) -+ -+#define FALCON_EVENT_FMT "[ev:%x:%08x:%08x]" -+#define FALCON_EVENT_PRI_ARG(e) \ -+ ((unsigned)(((e).u64 & FALCON_EVENT_CODE_MASK) >> EV_CODE_LBN)), \ -+ ((unsigned)((e).u64 >> 32)), ((unsigned)((e).u64 & 0xFFFFFFFF)) -+ -+#define FALCON_EVENT_CODE(evp) ((evp)->u64 & FALCON_EVENT_CODE_MASK) -+#define FALCON_EVENT_WAKE_EVQ_ID(evp) \ -+ (((evp)->u64 & FALCON_EVENT_EV_Q_ID_MASK) >> DRIVER_EV_EVQ_ID_LBN) -+#define FALCON_EVENT_TX_FLUSH_Q_ID(evp) \ -+ (((evp)->u64 & FALCON_EVENT_TX_FLUSH_Q_ID_MASK) >> \ -+ DRIVER_EV_TX_DESCQ_ID_LBN) -+#define FALCON_EVENT_RX_FLUSH_Q_ID(evp) \ -+ (((evp)->u64 & FALCON_EVENT_RX_FLUSH_Q_ID_MASK) >> \ -+ DRIVER_EV_RX_DESCQ_ID_LBN) -+#define FALCON_EVENT_DRIVER_SUBCODE(evp) \ -+ (((evp)->u64 & FALCON_EVENT_DRV_SUBCODE_MASK) >> \ -+ DRIVER_EV_SUB_CODE_LBN) -+ -+#define FALCON_EVENT_CODE_CHAR ((uint64_t)DRIVER_EV_DECODE << EV_CODE_LBN) -+#define FALCON_EVENT_CODE_SW ((uint64_t)DRV_GEN_EV_DECODE << EV_CODE_LBN) -+ -+ -+/* so this is the size in bytes of an awful lot of things */ -+#define FALCON_REGISTER128 (16) -+ -+/* we define some unique dummy values as a debug aid */ -+#ifdef _WIN32 -+#define FALCON_ATOMIC_BASE 0xdeadbeef00000000ui64 -+#else -+#define FALCON_ATOMIC_BASE 0xdeadbeef00000000ULL -+#endif -+#define FALCON_ATOMIC_UPD_REG (FALCON_ATOMIC_BASE | 0x1) -+#define FALCON_ATOMIC_PTR_TBL_REG (FALCON_ATOMIC_BASE | 0x2) -+#define FALCON_ATOMIC_SRPM_UDP_EVQ_REG (FALCON_ATOMIC_BASE | 0x3) -+#define FALCON_ATOMIC_RX_FLUSH_DESCQ (FALCON_ATOMIC_BASE | 0x4) -+#define FALCON_ATOMIC_TX_FLUSH_DESCQ (FALCON_ATOMIC_BASE | 0x5) -+#define FALCON_ATOMIC_INT_EN_REG (FALCON_ATOMIC_BASE | 0x6) -+#define FALCON_ATOMIC_TIMER_CMD_REG (FALCON_ATOMIC_BASE | 0x7) -+#define FALCON_ATOMIC_PACE_REG (FALCON_ATOMIC_BASE | 0x8) -+#define FALCON_ATOMIC_INT_ACK_REG (FALCON_ATOMIC_BASE | 0x9) -+/* XXX It crashed with odd value in FALCON_ATOMIC_INT_ADR_REG */ -+#define FALCON_ATOMIC_INT_ADR_REG (FALCON_ATOMIC_BASE | 0xa) -+ -+/*---------------------------------------------------------------------------- -+ * -+ * PCI control blocks for Falcon - -+ * (P) primary is for NET -+ * (S) secondary is for CHAR -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#define FALCON_P_CTR_AP_BAR 2 -+#define FALCON_S_CTR_AP_BAR 0 -+#define FALCON_S_DEVID 0x6703 -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Falcon constants -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/* Note: the following constants have moved to values in struct efhw_nic: -+ * FALCON_EVQ_TBL_NUM -> nic->num_evqs -+ * FALCON_DMAQ_NUM -> nic->num_dmaqs -+ * FALCON_TIMERS_NUM -> nic->num_times -+ * These replacement constants are used as sanity checks in assertions in -+ * certain functions that don't have access to struct efhw_nic. -+ */ -+#define FALCON_DMAQ_NUM_SANITY (EFHW_4K) -+#define FALCON_EVQ_TBL_NUM_SANITY (EFHW_4K) -+#define FALCON_TIMERS_NUM_SANITY (EFHW_4K) -+ -+/* This value is an upper limit on the total number of filter table -+ * entries. The actual size of filter table is determined at runtime, as -+ * it can vary. -+ */ -+#define FALCON_FILTER_TBL_NUM (EFHW_8K) -+ -+/* max number of buffers which can be pushed before commiting */ -+#define FALCON_BUFFER_UPD_MAX (128) -+ -+/* We can tell falcon to write its RX buffers in 32 byte quantums, -+ and since we pad packets 2 bytes to the right we can't use -+ a full page (not unless we use jumbo mode for all queues) -+ -+ NOTE: tests/nic/dma.c assumes that the value here is the real NIC -+ value, so we explicitly round it down to the nearest 32 bytes */ -+ -+/* #define FALCON_RX_USR_BUF_SIZE round_down(4096-2,32) */ -+#define FALCON_RX_USR_BUF_SIZE 4064 -+ -+#define FALCON_EVQ_RPTR_REG_P0 0x400 -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Falcon requires user-space descriptor pushes to be: -+ * dword[0-2]; wiob(); dword[3] -+ * -+ * Driver register access must be locked against other threads from -+ * the same driver but can be in any order: i.e dword[0-3]; wiob() -+ * -+ * The following helpers ensure that valid dword orderings are exercised -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/* A union to allow writting 64bit values as 32bit values, without -+ * hitting the compilers aliasing rules. We hope the compiler optimises -+ * away the copy's anyway */ -+union __u64to32 { -+ uint64_t u64; -+ struct { -+#ifdef EFHW_IS_LITTLE_ENDIAN -+ uint32_t a; -+ uint32_t b; -+#else -+ uint32_t b; -+ uint32_t a; -+#endif -+ } s; -+}; -+ -+static inline void -+falcon_write_ddd_d(volatile char __iomem *kva, -+ uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3) -+{ -+ writel(d0, kva + 0); -+ writel(d1, kva + 4); -+ writel(d2, kva + 8); -+ mmiowb(); -+ writel(d3, kva + 12); -+} -+ -+static inline void falcon_write_q(volatile char __iomem *kva, uint64_t q) -+{ -+ union __u64to32 u; -+ u.u64 = q; -+ -+ writel(u.s.a, kva); -+ mmiowb(); -+ writel(u.s.b, kva + 4); -+} -+ -+static inline void falcon_read_q(volatile char __iomem *addr, uint64_t *q0) -+{ -+ /* It is essential that we read dword0 first, so that -+ * the shadow register is updated with the latest value -+ * and we get a self consistent value. -+ */ -+ union __u64to32 u; -+ u.s.a = readl(addr); -+ rmb(); -+ u.s.b = readl(addr + 4); -+ -+ *q0 = u.u64; -+} -+ -+static inline void -+falcon_write_qq(volatile char __iomem *kva, uint64_t q0, uint64_t q1) -+{ -+ writeq(q0, kva + 0); -+ falcon_write_q(kva + 8, q1); -+} -+ -+static inline void -+falcon_read_qq(volatile char __iomem *addr, uint64_t *q0, uint64_t *q1) -+{ -+ falcon_read_q(addr, q0); -+ *q1 = readq(addr + 8); -+} -+ -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Buffer virtual addresses (4K buffers) -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/* Form a buffer virtual address from buffer ID and offset. If the offset -+** is larger than the buffer size, then the buffer indexed will be -+** calculated appropriately. It is the responsibility of the caller to -+** ensure that they have valid buffers programmed at that address. -+*/ -+#define FALCON_VADDR_8K_S (13) -+#define FALCON_VADDR_4K_S (12) -+#define FALCON_VADDR_M 0xfffff /* post shift mask */ -+ -+#define FALCON_BUFFER_8K_ADDR(id, off) (((id) << FALCON_VADDR_8K_S) + (off)) -+#define FALCON_BUFFER_8K_PAGE(vaddr) \ -+ (((vaddr) >> FALCON_VADDR_8K_S) & FALCON_VADDR_M) -+#define FALCON_BUFFER_8K_OFF(vaddr) \ -+ ((vaddr) & __FALCON_MASK32(FALCON_VADDR_8K_S)) -+ -+#define FALCON_BUFFER_4K_ADDR(id, off) (((id) << FALCON_VADDR_4K_S) + (off)) -+#define FALCON_BUFFER_4K_PAGE(vaddr) \ -+ (((vaddr) >> FALCON_VADDR_4K_S) & FALCON_VADDR_M) -+#define FALCON_BUFFER_4K_OFF(vaddr) \ -+ ((vaddr) & __FALCON_MASK32(FALCON_VADDR_4K_S)) -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Timer helpers -+ * -+ *---------------------------------------------------------------------------*/ -+ -+static inline int falcon_timer_page_addr(uint idx) -+{ -+ -+ EFHW_ASSERT(TIMER_CMD_REG_KER_OFST == -+ (TIMER_CMD_REG_PAGE4_OFST - 4 * EFHW_8K)); -+ -+ EFHW_ASSERT(idx < FALCON_TIMERS_NUM_SANITY); -+ -+ if (idx < 4) -+ return TIMER_CMD_REG_KER_OFST + (idx * EFHW_8K); -+ else if (idx < 1024) -+ return TIMER_CMD_REG_PAGE4_OFST + ((idx - 4) * EFHW_8K); -+ else -+ return TIMER_CMD_REG_PAGE123K_OFST + ((idx - 1024) * EFHW_8K); -+} -+ -+#define FALCON_TIMER_PAGE_MASK (EFHW_8K-1) -+ -+static inline int falcon_timer_page_offset(uint idx) -+{ -+ return falcon_timer_page_addr(idx) & FALCON_TIMER_PAGE_MASK; -+} -+ -+/*---------------------------------------------------------------------------- -+ * -+ * DMA Queue helpers -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/* iSCSI queue for A1; see bug 5427 for more details. */ -+#define FALCON_A1_ISCSI_DMAQ 4 -+ -+/*! returns an address within a bar of the TX DMA doorbell */ -+static inline uint falcon_tx_dma_page_addr(uint dmaq_idx) -+{ -+ uint page; -+ -+ EFHW_ASSERT((((TX_DESC_UPD_REG_PAGE123K_OFST) & (EFHW_8K - 1)) == -+ (((TX_DESC_UPD_REG_PAGE4_OFST) & (EFHW_8K - 1))))); -+ -+ EFHW_ASSERT(dmaq_idx < FALCON_DMAQ_NUM_SANITY); -+ -+ if (dmaq_idx < 1024) -+ page = TX_DESC_UPD_REG_PAGE4_OFST + ((dmaq_idx - 4) * EFHW_8K); -+ else -+ page = -+ TX_DESC_UPD_REG_PAGE123K_OFST + -+ ((dmaq_idx - 1024) * EFHW_8K); -+ -+ return page; -+} -+ -+/*! returns an address within a bar of the RX DMA doorbell */ -+static inline uint falcon_rx_dma_page_addr(uint dmaq_idx) -+{ -+ uint page; -+ -+ EFHW_ASSERT((((RX_DESC_UPD_REG_PAGE123K_OFST) & (EFHW_8K - 1)) == -+ ((RX_DESC_UPD_REG_PAGE4_OFST) & (EFHW_8K - 1)))); -+ -+ EFHW_ASSERT(dmaq_idx < FALCON_DMAQ_NUM_SANITY); -+ -+ if (dmaq_idx < 1024) -+ page = RX_DESC_UPD_REG_PAGE4_OFST + ((dmaq_idx - 4) * EFHW_8K); -+ else -+ page = -+ RX_DESC_UPD_REG_PAGE123K_OFST + -+ ((dmaq_idx - 1024) * EFHW_8K); -+ -+ return page; -+} -+ -+/*! "page"=NIC-dependent register set size */ -+#define FALCON_DMA_PAGE_MASK (EFHW_8K-1) -+ -+/*! returns an address within a bar of the start of the "page" -+ containing the TX DMA doorbell */ -+static inline int falcon_tx_dma_page_base(uint dma_idx) -+{ -+ return falcon_tx_dma_page_addr(dma_idx) & ~FALCON_DMA_PAGE_MASK; -+} -+ -+/*! returns an address within a bar of the start of the "page" -+ containing the RX DMA doorbell */ -+static inline int falcon_rx_dma_page_base(uint dma_idx) -+{ -+ return falcon_rx_dma_page_addr(dma_idx) & ~FALCON_DMA_PAGE_MASK; -+} -+ -+/*! returns an offset within a "page" of the TX DMA doorbell */ -+static inline int falcon_tx_dma_page_offset(uint dma_idx) -+{ -+ return falcon_tx_dma_page_addr(dma_idx) & FALCON_DMA_PAGE_MASK; -+} -+ -+/*! returns an offset within a "page" of the RX DMA doorbell */ -+static inline int falcon_rx_dma_page_offset(uint dma_idx) -+{ -+ return falcon_rx_dma_page_addr(dma_idx) & FALCON_DMA_PAGE_MASK; -+} -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Events -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/* Falcon nails down the event queue mappings */ -+#define FALCON_EVQ_KERNEL0 (0) /* hardwired for net driver */ -+#define FALCON_EVQ_CHAR (4) /* char driver's event queue */ -+ -+/* reserved by the drivers */ -+#define FALCON_EVQ_TBL_RESERVED (8) -+ -+/* default DMA-Q sizes */ -+#define FALCON_DMA_Q_DEFAULT_TX_SIZE 512 -+ -+#define FALCON_DMA_Q_DEFAULT_RX_SIZE 512 -+ -+#define FALCON_DMA_Q_DEFAULT_MMAP \ -+ (FALCON_DMA_Q_DEFAULT_TX_SIZE * (FALCON_DMA_TX_DESC_BYTES * 2)) -+ -+/*---------------------------------------------------------------------------- -+ * -+ * DEBUG - Analyser trigger -+ * -+ *---------------------------------------------------------------------------*/ -+ -+static inline void -+falcon_deadbeef(volatile char __iomem *efhw_kva, unsigned what) -+{ -+ writel(what, efhw_kva + 0x300); -+ mmiowb(); -+} -+#endif /* __CI_DRIVER_EFAB_HARDWARE_FALCON_H__ */ -+/*! \cidoxg_end */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_core.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,1147 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) core register -+ * definitions. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#define FALCON_EXTENDED_P_BAR 1 -+ -+/*************---- Bus Interface Unit Registers C Header ----*************/ -+#define IOM_IND_ADR_REG_OFST 0x0 /* IO-mapped indirect access address -+ register */ -+ #define IOM_AUTO_ADR_INC_EN_LBN 16 -+ #define IOM_AUTO_ADR_INC_EN_WIDTH 1 -+ #define IOM_IND_ADR_LBN 0 -+ #define IOM_IND_ADR_WIDTH 16 -+#define IOM_IND_DAT_REG_OFST 0x4 /* IO-mapped indirect access data register */ -+ #define IOM_IND_DAT_LBN 0 -+ #define IOM_IND_DAT_WIDTH 32 -+#define ADR_REGION_REG_KER_OFST 0x0 /* Address region register */ -+#define ADR_REGION_REG_OFST 0x0 /* Address region register */ -+ #define ADR_REGION3_LBN 96 -+ #define ADR_REGION3_WIDTH 18 -+ #define ADR_REGION2_LBN 64 -+ #define ADR_REGION2_WIDTH 18 -+ #define ADR_REGION1_LBN 32 -+ #define ADR_REGION1_WIDTH 18 -+ #define ADR_REGION0_LBN 0 -+ #define ADR_REGION0_WIDTH 18 -+#define INT_EN_REG_KER_OFST 0x10 /* Kernel driver Interrupt enable register */ -+ #define KER_INT_CHAR_LBN 4 -+ #define KER_INT_CHAR_WIDTH 1 -+ #define KER_INT_KER_LBN 3 -+ #define KER_INT_KER_WIDTH 1 -+ #define ILL_ADR_ERR_INT_EN_KER_LBN 2 -+ #define ILL_ADR_ERR_INT_EN_KER_WIDTH 1 -+ #define SRM_PERR_INT_EN_KER_LBN 1 -+ #define SRM_PERR_INT_EN_KER_WIDTH 1 -+ #define DRV_INT_EN_KER_LBN 0 -+ #define DRV_INT_EN_KER_WIDTH 1 -+#define INT_EN_REG_CHAR_OFST 0x20 /* Char Driver interrupt enable register */ -+ #define CHAR_INT_CHAR_LBN 4 -+ #define CHAR_INT_CHAR_WIDTH 1 -+ #define CHAR_INT_KER_LBN 3 -+ #define CHAR_INT_KER_WIDTH 1 -+ #define ILL_ADR_ERR_INT_EN_CHAR_LBN 2 -+ #define ILL_ADR_ERR_INT_EN_CHAR_WIDTH 1 -+ #define SRM_PERR_INT_EN_CHAR_LBN 1 -+ #define SRM_PERR_INT_EN_CHAR_WIDTH 1 -+ #define DRV_INT_EN_CHAR_LBN 0 -+ #define DRV_INT_EN_CHAR_WIDTH 1 -+#define INT_ADR_REG_KER_OFST 0x30 /* Interrupt host address for Kernel driver */ -+ #define INT_ADR_KER_LBN 0 -+ #define INT_ADR_KER_WIDTH 64 -+ #define DRV_INT_KER_LBN 32 -+ #define DRV_INT_KER_WIDTH 1 -+ #define EV_FF_HALF_INT_KER_LBN 3 -+ #define EV_FF_HALF_INT_KER_WIDTH 1 -+ #define EV_FF_FULL_INT_KER_LBN 2 -+ #define EV_FF_FULL_INT_KER_WIDTH 1 -+ #define ILL_ADR_ERR_INT_KER_LBN 1 -+ #define ILL_ADR_ERR_INT_KER_WIDTH 1 -+ #define SRAM_PERR_INT_KER_LBN 0 -+ #define SRAM_PERR_INT_KER_WIDTH 1 -+#define INT_ADR_REG_CHAR_OFST 0x40 /* Interrupt host address for Char driver */ -+ #define INT_ADR_CHAR_LBN 0 -+ #define INT_ADR_CHAR_WIDTH 64 -+ #define DRV_INT_CHAR_LBN 32 -+ #define DRV_INT_CHAR_WIDTH 1 -+ #define EV_FF_HALF_INT_CHAR_LBN 3 -+ #define EV_FF_HALF_INT_CHAR_WIDTH 1 -+ #define EV_FF_FULL_INT_CHAR_LBN 2 -+ #define EV_FF_FULL_INT_CHAR_WIDTH 1 -+ #define ILL_ADR_ERR_INT_CHAR_LBN 1 -+ #define ILL_ADR_ERR_INT_CHAR_WIDTH 1 -+ #define SRAM_PERR_INT_CHAR_LBN 0 -+ #define SRAM_PERR_INT_CHAR_WIDTH 1 -+#define INT_ISR0_B0_OFST 0x90 /* B0 only */ -+#define INT_ISR1_B0_OFST 0xA0 -+#define INT_ACK_REG_KER_A1_OFST 0x50 /* Kernel interrupt acknowledge register */ -+ #define RESERVED_LBN 0 -+ #define RESERVED_WIDTH 32 -+#define INT_ACK_REG_CHAR_A1_OFST 0x60 /* CHAR interrupt acknowledge register */ -+ #define RESERVED_LBN 0 -+ #define RESERVED_WIDTH 32 -+/*************---- Global CSR Registers C Header ----*************/ -+#define NIC_STAT_REG_KER_OFST 0x200 /* ASIC strap status register */ -+#define NIC_STAT_REG_OFST 0x200 /* ASIC strap status register */ -+ #define ONCHIP_SRAM_LBN 16 -+ #define ONCHIP_SRAM_WIDTH 0 -+ #define STRAP_PINS_LBN 0 -+ #define STRAP_PINS_WIDTH 3 -+#define GPIO_CTL_REG_KER_OFST 0x210 /* GPIO control register */ -+#define GPIO_CTL_REG_OFST 0x210 /* GPIO control register */ -+ #define GPIO_OEN_LBN 24 -+ #define GPIO_OEN_WIDTH 4 -+ #define GPIO_OUT_LBN 16 -+ #define GPIO_OUT_WIDTH 4 -+ #define GPIO_IN_LBN 8 -+ #define GPIO_IN_WIDTH 4 -+ #define GPIO_PWRUP_VALUE_LBN 0 -+ #define GPIO_PWRUP_VALUE_WIDTH 4 -+#define GLB_CTL_REG_KER_OFST 0x220 /* Global control register */ -+#define GLB_CTL_REG_OFST 0x220 /* Global control register */ -+ #define SWRST_LBN 0 -+ #define SWRST_WIDTH 1 -+#define FATAL_INTR_REG_KER_OFST 0x230 /* Fatal interrupt register for Kernel */ -+ #define PCI_BUSERR_INT_KER_EN_LBN 43 -+ #define PCI_BUSERR_INT_KER_EN_WIDTH 1 -+ #define SRAM_OOB_INT_KER_EN_LBN 42 -+ #define SRAM_OOB_INT_KER_EN_WIDTH 1 -+ #define BUFID_OOB_INT_KER_EN_LBN 41 -+ #define BUFID_OOB_INT_KER_EN_WIDTH 1 -+ #define MEM_PERR_INT_KER_EN_LBN 40 -+ #define MEM_PERR_INT_KER_EN_WIDTH 1 -+ #define RBUF_OWN_INT_KER_EN_LBN 39 -+ #define RBUF_OWN_INT_KER_EN_WIDTH 1 -+ #define TBUF_OWN_INT_KER_EN_LBN 38 -+ #define TBUF_OWN_INT_KER_EN_WIDTH 1 -+ #define RDESCQ_OWN_INT_KER_EN_LBN 37 -+ #define RDESCQ_OWN_INT_KER_EN_WIDTH 1 -+ #define TDESCQ_OWN_INT_KER_EN_LBN 36 -+ #define TDESCQ_OWN_INT_KER_EN_WIDTH 1 -+ #define EVQ_OWN_INT_KER_EN_LBN 35 -+ #define EVQ_OWN_INT_KER_EN_WIDTH 1 -+ #define EVFF_OFLO_INT_KER_EN_LBN 34 -+ #define EVFF_OFLO_INT_KER_EN_WIDTH 1 -+ #define ILL_ADR_INT_KER_EN_LBN 33 -+ #define ILL_ADR_INT_KER_EN_WIDTH 1 -+ #define SRM_PERR_INT_KER_EN_LBN 32 -+ #define SRM_PERR_INT_KER_EN_WIDTH 1 -+ #define PCI_BUSERR_INT_KER_LBN 11 -+ #define PCI_BUSERR_INT_KER_WIDTH 1 -+ #define SRAM_OOB_INT_KER_LBN 10 -+ #define SRAM_OOB_INT_KER_WIDTH 1 -+ #define BUFID_OOB_INT_KER_LBN 9 -+ #define BUFID_OOB_INT_KER_WIDTH 1 -+ #define MEM_PERR_INT_KER_LBN 8 -+ #define MEM_PERR_INT_KER_WIDTH 1 -+ #define RBUF_OWN_INT_KER_LBN 7 -+ #define RBUF_OWN_INT_KER_WIDTH 1 -+ #define TBUF_OWN_INT_KER_LBN 6 -+ #define TBUF_OWN_INT_KER_WIDTH 1 -+ #define RDESCQ_OWN_INT_KER_LBN 5 -+ #define RDESCQ_OWN_INT_KER_WIDTH 1 -+ #define TDESCQ_OWN_INT_KER_LBN 4 -+ #define TDESCQ_OWN_INT_KER_WIDTH 1 -+ #define EVQ_OWN_INT_KER_LBN 3 -+ #define EVQ_OWN_INT_KER_WIDTH 1 -+ #define EVFF_OFLO_INT_KER_LBN 2 -+ #define EVFF_OFLO_INT_KER_WIDTH 1 -+ #define ILL_ADR_INT_KER_LBN 1 -+ #define ILL_ADR_INT_KER_WIDTH 1 -+ #define SRM_PERR_INT_KER_LBN 0 -+ #define SRM_PERR_INT_KER_WIDTH 1 -+#define FATAL_INTR_REG_OFST 0x240 /* Fatal interrupt register for Char */ -+ #define PCI_BUSERR_INT_CHAR_EN_LBN 43 -+ #define PCI_BUSERR_INT_CHAR_EN_WIDTH 1 -+ #define SRAM_OOB_INT_CHAR_EN_LBN 42 -+ #define SRAM_OOB_INT_CHAR_EN_WIDTH 1 -+ #define BUFID_OOB_INT_CHAR_EN_LBN 41 -+ #define BUFID_OOB_INT_CHAR_EN_WIDTH 1 -+ #define MEM_PERR_INT_CHAR_EN_LBN 40 -+ #define MEM_PERR_INT_CHAR_EN_WIDTH 1 -+ #define RBUF_OWN_INT_CHAR_EN_LBN 39 -+ #define RBUF_OWN_INT_CHAR_EN_WIDTH 1 -+ #define TBUF_OWN_INT_CHAR_EN_LBN 38 -+ #define TBUF_OWN_INT_CHAR_EN_WIDTH 1 -+ #define RDESCQ_OWN_INT_CHAR_EN_LBN 37 -+ #define RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 -+ #define TDESCQ_OWN_INT_CHAR_EN_LBN 36 -+ #define TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 -+ #define EVQ_OWN_INT_CHAR_EN_LBN 35 -+ #define EVQ_OWN_INT_CHAR_EN_WIDTH 1 -+ #define EVFF_OFLO_INT_CHAR_EN_LBN 34 -+ #define EVFF_OFLO_INT_CHAR_EN_WIDTH 1 -+ #define ILL_ADR_INT_CHAR_EN_LBN 33 -+ #define ILL_ADR_INT_CHAR_EN_WIDTH 1 -+ #define SRM_PERR_INT_CHAR_EN_LBN 32 -+ #define SRM_PERR_INT_CHAR_EN_WIDTH 1 -+ #define FATAL_INTR_REG_EN_BITS 0xffffffffffffffffULL -+ #define PCI_BUSERR_INT_CHAR_LBN 11 -+ #define PCI_BUSERR_INT_CHAR_WIDTH 1 -+ #define SRAM_OOB_INT_CHAR_LBN 10 -+ #define SRAM_OOB_INT_CHAR_WIDTH 1 -+ #define BUFID_OOB_INT_CHAR_LBN 9 -+ #define BUFID_OOB_INT_CHAR_WIDTH 1 -+ #define MEM_PERR_INT_CHAR_LBN 8 -+ #define MEM_PERR_INT_CHAR_WIDTH 1 -+ #define RBUF_OWN_INT_CHAR_LBN 7 -+ #define RBUF_OWN_INT_CHAR_WIDTH 1 -+ #define TBUF_OWN_INT_CHAR_LBN 6 -+ #define TBUF_OWN_INT_CHAR_WIDTH 1 -+ #define RDESCQ_OWN_INT_CHAR_LBN 5 -+ #define RDESCQ_OWN_INT_CHAR_WIDTH 1 -+ #define TDESCQ_OWN_INT_CHAR_LBN 4 -+ #define TDESCQ_OWN_INT_CHAR_WIDTH 1 -+ #define EVQ_OWN_INT_CHAR_LBN 3 -+ #define EVQ_OWN_INT_CHAR_WIDTH 1 -+ #define EVFF_OFLO_INT_CHAR_LBN 2 -+ #define EVFF_OFLO_INT_CHAR_WIDTH 1 -+ #define ILL_ADR_INT_CHAR_LBN 1 -+ #define ILL_ADR_INT_CHAR_WIDTH 1 -+ #define SRM_PERR_INT_CHAR_LBN 0 -+ #define SRM_PERR_INT_CHAR_WIDTH 1 -+#define DP_CTRL_REG_OFST 0x250 /* Datapath control register */ -+ #define FLS_EVQ_ID_LBN 0 -+ #define FLS_EVQ_ID_WIDTH 12 -+#define MEM_STAT_REG_KER_OFST 0x260 /* Memory status register */ -+#define MEM_STAT_REG_OFST 0x260 /* Memory status register */ -+ #define MEM_PERR_VEC_LBN 53 -+ #define MEM_PERR_VEC_WIDTH 38 -+ #define MBIST_CORR_LBN 38 -+ #define MBIST_CORR_WIDTH 15 -+ #define MBIST_ERR_LBN 0 -+ #define MBIST_ERR_WIDTH 38 -+#define DEBUG_REG_KER_OFST 0x270 /* Debug register */ -+#define DEBUG_REG_OFST 0x270 /* Debug register */ -+ #define DEBUG_BLK_SEL2_LBN 47 -+ #define DEBUG_BLK_SEL2_WIDTH 3 -+ #define DEBUG_BLK_SEL1_LBN 44 -+ #define DEBUG_BLK_SEL1_WIDTH 3 -+ #define DEBUG_BLK_SEL0_LBN 41 -+ #define DEBUG_BLK_SEL0_WIDTH 3 -+ #define MISC_DEBUG_ADDR_LBN 36 -+ #define MISC_DEBUG_ADDR_WIDTH 5 -+ #define SERDES_DEBUG_ADDR_LBN 31 -+ #define SERDES_DEBUG_ADDR_WIDTH 5 -+ #define EM_DEBUG_ADDR_LBN 26 -+ #define EM_DEBUG_ADDR_WIDTH 5 -+ #define SR_DEBUG_ADDR_LBN 21 -+ #define SR_DEBUG_ADDR_WIDTH 5 -+ #define EV_DEBUG_ADDR_LBN 16 -+ #define EV_DEBUG_ADDR_WIDTH 5 -+ #define RX_DEBUG_ADDR_LBN 11 -+ #define RX_DEBUG_ADDR_WIDTH 5 -+ #define TX_DEBUG_ADDR_LBN 6 -+ #define TX_DEBUG_ADDR_WIDTH 5 -+ #define BIU_DEBUG_ADDR_LBN 1 -+ #define BIU_DEBUG_ADDR_WIDTH 5 -+ #define DEBUG_EN_LBN 0 -+ #define DEBUG_EN_WIDTH 1 -+#define DRIVER_REG0_KER_OFST 0x280 /* Driver scratch register 0 */ -+#define DRIVER_REG0_OFST 0x280 /* Driver scratch register 0 */ -+ #define DRIVER_DW0_LBN 0 -+ #define DRIVER_DW0_WIDTH 32 -+#define DRIVER_REG1_KER_OFST 0x290 /* Driver scratch register 1 */ -+#define DRIVER_REG1_OFST 0x290 /* Driver scratch register 1 */ -+ #define DRIVER_DW1_LBN 0 -+ #define DRIVER_DW1_WIDTH 32 -+#define DRIVER_REG2_KER_OFST 0x2A0 /* Driver scratch register 2 */ -+#define DRIVER_REG2_OFST 0x2A0 /* Driver scratch register 2 */ -+ #define DRIVER_DW2_LBN 0 -+ #define DRIVER_DW2_WIDTH 32 -+#define DRIVER_REG3_KER_OFST 0x2B0 /* Driver scratch register 3 */ -+#define DRIVER_REG3_OFST 0x2B0 /* Driver scratch register 3 */ -+ #define DRIVER_DW3_LBN 0 -+ #define DRIVER_DW3_WIDTH 32 -+#define DRIVER_REG4_KER_OFST 0x2C0 /* Driver scratch register 4 */ -+#define DRIVER_REG4_OFST 0x2C0 /* Driver scratch register 4 */ -+ #define DRIVER_DW4_LBN 0 -+ #define DRIVER_DW4_WIDTH 32 -+#define DRIVER_REG5_KER_OFST 0x2D0 /* Driver scratch register 5 */ -+#define DRIVER_REG5_OFST 0x2D0 /* Driver scratch register 5 */ -+ #define DRIVER_DW5_LBN 0 -+ #define DRIVER_DW5_WIDTH 32 -+#define DRIVER_REG6_KER_OFST 0x2E0 /* Driver scratch register 6 */ -+#define DRIVER_REG6_OFST 0x2E0 /* Driver scratch register 6 */ -+ #define DRIVER_DW6_LBN 0 -+ #define DRIVER_DW6_WIDTH 32 -+#define DRIVER_REG7_KER_OFST 0x2F0 /* Driver scratch register 7 */ -+#define DRIVER_REG7_OFST 0x2F0 /* Driver scratch register 7 */ -+ #define DRIVER_DW7_LBN 0 -+ #define DRIVER_DW7_WIDTH 32 -+#define ALTERA_BUILD_REG_OFST 0x300 /* Altera build register */ -+#define ALTERA_BUILD_REG_OFST 0x300 /* Altera build register */ -+ #define ALTERA_BUILD_VER_LBN 0 -+ #define ALTERA_BUILD_VER_WIDTH 32 -+ -+/* so called CSR spare register -+ - contains separate parity enable bits for the various internal memory -+ blocks */ -+#define MEM_PARITY_ERR_EN_REG_KER 0x310 -+#define MEM_PARITY_ALL_BLOCKS_EN_LBN 64 -+#define MEM_PARITY_ALL_BLOCKS_EN_WIDTH 38 -+#define MEM_PARITY_TX_DATA_EN_LBN 72 -+#define MEM_PARITY_TX_DATA_EN_WIDTH 2 -+ -+/*************---- Event & Timer Module Registers C Header ----*************/ -+ -+#if FALCON_EXTENDED_P_BAR -+#define EVQ_RPTR_REG_KER_OFST 0x11B00 /* Event queue read pointer register */ -+#else -+#define EVQ_RPTR_REG_KER_OFST 0x1B00 /* Event queue read pointer register */ -+#endif -+ -+#define EVQ_RPTR_REG_OFST 0xFA0000 /* Event queue read pointer register -+ array. */ -+ #define EVQ_RPTR_LBN 0 -+ #define EVQ_RPTR_WIDTH 15 -+ -+#if FALCON_EXTENDED_P_BAR -+#define EVQ_PTR_TBL_KER_OFST 0x11A00 /* Event queue pointer table for kernel -+ access */ -+#else -+#define EVQ_PTR_TBL_KER_OFST 0x1A00 /* Event queue pointer table for kernel -+ access */ -+#endif -+ -+#define EVQ_PTR_TBL_CHAR_OFST 0xF60000 /* Event queue pointer table for char -+ direct access */ -+ #define EVQ_WKUP_OR_INT_EN_LBN 39 -+ #define EVQ_WKUP_OR_INT_EN_WIDTH 1 -+ #define EVQ_NXT_WPTR_LBN 24 -+ #define EVQ_NXT_WPTR_WIDTH 15 -+ #define EVQ_EN_LBN 23 -+ #define EVQ_EN_WIDTH 1 -+ #define EVQ_SIZE_LBN 20 -+ #define EVQ_SIZE_WIDTH 3 -+ #define EVQ_BUF_BASE_ID_LBN 0 -+ #define EVQ_BUF_BASE_ID_WIDTH 20 -+#define TIMER_CMD_REG_KER_OFST 0x420 /* Timer table for kernel access. -+ Page-mapped */ -+#define TIMER_CMD_REG_PAGE4_OFST 0x8420 /* Timer table for user-level access. -+ Page-mapped. For lowest 1K queues. -+ */ -+#define TIMER_CMD_REG_PAGE123K_OFST 0x1000420 /* Timer table for user-level -+ access. Page-mapped. -+ For upper 3K queues. */ -+#define TIMER_TBL_OFST 0xF70000 /* Timer table for char driver direct access */ -+ #define TIMER_MODE_LBN 12 -+ #define TIMER_MODE_WIDTH 2 -+ #define TIMER_VAL_LBN 0 -+ #define TIMER_VAL_WIDTH 12 -+ #define TIMER_MODE_INT_HLDOFF 2 -+ #define EVQ_BUF_SIZE_LBN 0 -+ #define EVQ_BUF_SIZE_WIDTH 1 -+#define DRV_EV_REG_KER_OFST 0x440 /* Driver generated event register */ -+#define DRV_EV_REG_OFST 0x440 /* Driver generated event register */ -+ #define DRV_EV_QID_LBN 64 -+ #define DRV_EV_QID_WIDTH 12 -+ #define DRV_EV_DATA_LBN 0 -+ #define DRV_EV_DATA_WIDTH 64 -+#define EVQ_CTL_REG_KER_OFST 0x450 /* Event queue control register */ -+#define EVQ_CTL_REG_OFST 0x450 /* Event queue control register */ -+ #define RX_EVQ_WAKEUP_MASK_B0_LBN 15 -+ #define RX_EVQ_WAKEUP_MASK_B0_WIDTH 6 -+ #define EVQ_OWNERR_CTL_LBN 14 -+ #define EVQ_OWNERR_CTL_WIDTH 1 -+ #define EVQ_FIFO_AF_TH_LBN 8 -+ #define EVQ_FIFO_AF_TH_WIDTH 6 -+ #define EVQ_FIFO_NOTAF_TH_LBN 0 -+ #define EVQ_FIFO_NOTAF_TH_WIDTH 6 -+/*************---- SRAM Module Registers C Header ----*************/ -+#define BUF_TBL_CFG_REG_KER_OFST 0x600 /* Buffer table configuration register */ -+#define BUF_TBL_CFG_REG_OFST 0x600 /* Buffer table configuration register */ -+ #define BUF_TBL_MODE_LBN 3 -+ #define BUF_TBL_MODE_WIDTH 1 -+#define SRM_RX_DC_CFG_REG_KER_OFST 0x610 /* SRAM receive descriptor cache -+ configuration register */ -+#define SRM_RX_DC_CFG_REG_OFST 0x610 /* SRAM receive descriptor cache -+ configuration register */ -+ #define SRM_RX_DC_BASE_ADR_LBN 0 -+ #define SRM_RX_DC_BASE_ADR_WIDTH 21 -+#define SRM_TX_DC_CFG_REG_KER_OFST 0x620 /* SRAM transmit descriptor cache -+ configuration register */ -+#define SRM_TX_DC_CFG_REG_OFST 0x620 /* SRAM transmit descriptor cache -+ configuration register */ -+ #define SRM_TX_DC_BASE_ADR_LBN 0 -+ #define SRM_TX_DC_BASE_ADR_WIDTH 21 -+#define SRM_CFG_REG_KER_OFST 0x630 /* SRAM configuration register */ -+#define SRM_CFG_REG_OFST 0x630 /* SRAM configuration register */ -+ #define SRAM_OOB_ADR_INTEN_LBN 5 -+ #define SRAM_OOB_ADR_INTEN_WIDTH 1 -+ #define SRAM_OOB_BUF_INTEN_LBN 4 -+ #define SRAM_OOB_BUF_INTEN_WIDTH 1 -+ #define SRAM_BT_INIT_EN_LBN 3 -+ #define SRAM_BT_INIT_EN_WIDTH 1 -+ #define SRM_NUM_BANK_LBN 2 -+ #define SRM_NUM_BANK_WIDTH 1 -+ #define SRM_BANK_SIZE_LBN 0 -+ #define SRM_BANK_SIZE_WIDTH 2 -+#define BUF_TBL_UPD_REG_KER_OFST 0x650 /* Buffer table update register */ -+#define BUF_TBL_UPD_REG_OFST 0x650 /* Buffer table update register */ -+ #define BUF_UPD_CMD_LBN 63 -+ #define BUF_UPD_CMD_WIDTH 1 -+ #define BUF_CLR_CMD_LBN 62 -+ #define BUF_CLR_CMD_WIDTH 1 -+ #define BUF_CLR_END_ID_LBN 32 -+ #define BUF_CLR_END_ID_WIDTH 20 -+ #define BUF_CLR_START_ID_LBN 0 -+ #define BUF_CLR_START_ID_WIDTH 20 -+#define SRM_UPD_EVQ_REG_KER_OFST 0x660 /* Buffer table update register */ -+#define SRM_UPD_EVQ_REG_OFST 0x660 /* Buffer table update register */ -+ #define SRM_UPD_EVQ_ID_LBN 0 -+ #define SRM_UPD_EVQ_ID_WIDTH 12 -+#define SRAM_PARITY_REG_KER_OFST 0x670 /* SRAM parity register. */ -+#define SRAM_PARITY_REG_OFST 0x670 /* SRAM parity register. */ -+ #define FORCE_SRAM_PERR_LBN 0 -+ #define FORCE_SRAM_PERR_WIDTH 1 -+ -+#if FALCON_EXTENDED_P_BAR -+#define BUF_HALF_TBL_KER_OFST 0x18000 /* Buffer table in half buffer table -+ mode direct access by kernel driver */ -+#else -+#define BUF_HALF_TBL_KER_OFST 0x8000 /* Buffer table in half buffer table -+ mode direct access by kernel driver */ -+#endif -+ -+ -+#define BUF_HALF_TBL_OFST 0x800000 /* Buffer table in half buffer table mode -+ direct access by char driver */ -+ #define BUF_ADR_HBUF_ODD_LBN 44 -+ #define BUF_ADR_HBUF_ODD_WIDTH 20 -+ #define BUF_OWNER_ID_HBUF_ODD_LBN 32 -+ #define BUF_OWNER_ID_HBUF_ODD_WIDTH 12 -+ #define BUF_ADR_HBUF_EVEN_LBN 12 -+ #define BUF_ADR_HBUF_EVEN_WIDTH 20 -+ #define BUF_OWNER_ID_HBUF_EVEN_LBN 0 -+ #define BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 -+ -+ -+#if FALCON_EXTENDED_P_BAR -+#define BUF_FULL_TBL_KER_OFST 0x18000 /* Buffer table in full buffer table -+ mode direct access by kernel driver */ -+#else -+#define BUF_FULL_TBL_KER_OFST 0x8000 /* Buffer table in full buffer table mode -+ direct access by kernel driver */ -+#endif -+ -+ -+ -+ -+#define BUF_FULL_TBL_OFST 0x800000 /* Buffer table in full buffer table mode -+ direct access by char driver */ -+ #define IP_DAT_BUF_SIZE_LBN 50 -+ #define IP_DAT_BUF_SIZE_WIDTH 1 -+ #define BUF_ADR_REGION_LBN 48 -+ #define BUF_ADR_REGION_WIDTH 2 -+ #define BUF_ADR_FBUF_LBN 14 -+ #define BUF_ADR_FBUF_WIDTH 34 -+ #define BUF_OWNER_ID_FBUF_LBN 0 -+ #define BUF_OWNER_ID_FBUF_WIDTH 14 -+#define SRM_DBG_REG_OFST 0x3000000 /* SRAM debug access */ -+ #define SRM_DBG_LBN 0 -+ #define SRM_DBG_WIDTH 64 -+/*************---- RX Datapath Registers C Header ----*************/ -+ -+#define RX_CFG_REG_KER_OFST 0x800 /* Receive configuration register */ -+#define RX_CFG_REG_OFST 0x800 /* Receive configuration register */ -+ -+#if !defined(FALCON_64K_RXFIFO) && !defined(FALCON_PRE_02020029) -+# if !defined(FALCON_128K_RXFIFO) -+# define FALCON_128K_RXFIFO -+# endif -+#endif -+ -+#if defined(FALCON_128K_RXFIFO) -+ -+/* new for B0 */ -+ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 48 -+ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1 -+ #define RX_INGR_EN_B0_LBN 47 -+ #define RX_INGR_EN_B0_WIDTH 1 -+ #define RX_TOEP_IPV4_B0_LBN 46 -+ #define RX_TOEP_IPV4_B0_WIDTH 1 -+ #define RX_HASH_ALG_B0_LBN 45 -+ #define RX_HASH_ALG_B0_WIDTH 1 -+ #define RX_HASH_INSERT_HDR_B0_LBN 44 -+ #define RX_HASH_INSERT_HDR_B0_WIDTH 1 -+/* moved for B0 */ -+ #define RX_DESC_PUSH_EN_B0_LBN 43 -+ #define RX_DESC_PUSH_EN_B0_WIDTH 1 -+ #define RX_RDW_PATCH_EN_LBN 42 /* Non head of line blocking */ -+ #define RX_RDW_PATCH_EN_WIDTH 1 -+ #define RX_PCI_BURST_SIZE_B0_LBN 39 -+ #define RX_PCI_BURST_SIZE_B0_WIDTH 3 -+ #define RX_OWNERR_CTL_B0_LBN 38 -+ #define RX_OWNERR_CTL_B0_WIDTH 1 -+ #define RX_XON_TX_TH_B0_LBN 33 -+ #define RX_XON_TX_TH_B0_WIDTH 5 -+ #define RX_XOFF_TX_TH_B0_LBN 28 -+ #define RX_XOFF_TX_TH_B0_WIDTH 5 -+ #define RX_USR_BUF_SIZE_B0_LBN 19 -+ #define RX_USR_BUF_SIZE_B0_WIDTH 9 -+ #define RX_XON_MAC_TH_B0_LBN 10 -+ #define RX_XON_MAC_TH_B0_WIDTH 9 -+ #define RX_XOFF_MAC_TH_B0_LBN 1 -+ #define RX_XOFF_MAC_TH_B0_WIDTH 9 -+ #define RX_XOFF_MAC_EN_B0_LBN 0 -+ #define RX_XOFF_MAC_EN_B0_WIDTH 1 -+ -+#elif !defined(FALCON_PRE_02020029) -+/* new for B0 */ -+ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 46 -+ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1 -+ #define RX_INGR_EN_B0_LBN 45 -+ #define RX_INGR_EN_B0_WIDTH 1 -+ #define RX_TOEP_IPV4_B0_LBN 44 -+ #define RX_TOEP_IPV4_B0_WIDTH 1 -+ #define RX_HASH_ALG_B0_LBN 43 -+ #define RX_HASH_ALG_B0_WIDTH 41 -+ #define RX_HASH_INSERT_HDR_B0_LBN 42 -+ #define RX_HASH_INSERT_HDR_B0_WIDTH 1 -+/* moved for B0 */ -+ #define RX_DESC_PUSH_EN_B0_LBN 41 -+ #define RX_DESC_PUSH_EN_B0_WIDTH 1 -+ #define RX_PCI_BURST_SIZE_B0_LBN 37 -+ #define RX_PCI_BURST_SIZE_B0_WIDTH 3 -+ #define RX_OWNERR_CTL_B0_LBN 36 -+ #define RX_OWNERR_CTL_B0_WIDTH 1 -+ #define RX_XON_TX_TH_B0_LBN 31 -+ #define RX_XON_TX_TH_B0_WIDTH 5 -+ #define RX_XOFF_TX_TH_B0_LBN 26 -+ #define RX_XOFF_TX_TH_B0_WIDTH 5 -+ #define RX_USR_BUF_SIZE_B0_LBN 17 -+ #define RX_USR_BUF_SIZE_B0_WIDTH 9 -+ #define RX_XON_MAC_TH_B0_LBN 9 -+ #define RX_XON_MAC_TH_B0_WIDTH 8 -+ #define RX_XOFF_MAC_TH_B0_LBN 1 -+ #define RX_XOFF_MAC_TH_B0_WIDTH 8 -+ #define RX_XOFF_MAC_EN_B0_LBN 0 -+ #define RX_XOFF_MAC_EN_B0_WIDTH 1 -+ -+#else -+/* new for B0 */ -+ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 44 -+ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1 -+ #define RX_INGR_EN_B0_LBN 43 -+ #define RX_INGR_EN_B0_WIDTH 1 -+ #define RX_TOEP_IPV4_B0_LBN 42 -+ #define RX_TOEP_IPV4_B0_WIDTH 1 -+ #define RX_HASH_ALG_B0_LBN 41 -+ #define RX_HASH_ALG_B0_WIDTH 41 -+ #define RX_HASH_INSERT_HDR_B0_LBN 40 -+ #define RX_HASH_INSERT_HDR_B0_WIDTH 1 -+/* moved for B0 */ -+ #define RX_DESC_PUSH_EN_B0_LBN 35 -+ #define RX_DESC_PUSH_EN_B0_WIDTH 1 -+ #define RX_PCI_BURST_SIZE_B0_LBN 35 -+ #define RX_PCI_BURST_SIZE_B0_WIDTH 2 -+ #define RX_OWNERR_CTL_B0_LBN 34 -+ #define RX_OWNERR_CTL_B0_WIDTH 1 -+ #define RX_XON_TX_TH_B0_LBN 29 -+ #define RX_XON_TX_TH_B0_WIDTH 5 -+ #define RX_XOFF_TX_TH_B0_LBN 24 -+ #define RX_XOFF_TX_TH_B0_WIDTH 5 -+ #define RX_USR_BUF_SIZE_B0_LBN 15 -+ #define RX_USR_BUF_SIZE_B0_WIDTH 9 -+ #define RX_XON_MAC_TH_B0_LBN 8 -+ #define RX_XON_MAC_TH_B0_WIDTH 7 -+ #define RX_XOFF_MAC_TH_B0_LBN 1 -+ #define RX_XOFF_MAC_TH_B0_WIDTH 7 -+ #define RX_XOFF_MAC_EN_B0_LBN 0 -+ #define RX_XOFF_MAC_EN_B0_WIDTH 1 -+ -+#endif -+ -+/* A0/A1 */ -+ #define RX_PUSH_EN_A1_LBN 35 -+ #define RX_PUSH_EN_A1_WIDTH 1 -+ #define RX_PCI_BURST_SIZE_A1_LBN 31 -+ #define RX_PCI_BURST_SIZE_A1_WIDTH 3 -+ #define RX_OWNERR_CTL_A1_LBN 30 -+ #define RX_OWNERR_CTL_A1_WIDTH 1 -+ #define RX_XON_TX_TH_A1_LBN 25 -+ #define RX_XON_TX_TH_A1_WIDTH 5 -+ #define RX_XOFF_TX_TH_A1_LBN 20 -+ #define RX_XOFF_TX_TH_A1_WIDTH 5 -+ #define RX_USR_BUF_SIZE_A1_LBN 11 -+ #define RX_USR_BUF_SIZE_A1_WIDTH 9 -+ #define RX_XON_MAC_TH_A1_LBN 6 -+ #define RX_XON_MAC_TH_A1_WIDTH 5 -+ #define RX_XOFF_MAC_TH_A1_LBN 1 -+ #define RX_XOFF_MAC_TH_A1_WIDTH 5 -+ #define RX_XOFF_MAC_EN_A1_LBN 0 -+ #define RX_XOFF_MAC_EN_A1_WIDTH 1 -+ -+#define RX_FILTER_CTL_REG_OFST 0x810 /* Receive filter control registers */ -+ #define SCATTER_ENBL_NO_MATCH_Q_B0_LBN 40 -+ #define SCATTER_ENBL_NO_MATCH_Q_B0_WIDTH 1 -+ #define UDP_FULL_SRCH_LIMIT_LBN 32 -+ #define UDP_FULL_SRCH_LIMIT_WIDTH 8 -+ #define NUM_KER_LBN 24 -+ #define NUM_KER_WIDTH 2 -+ #define UDP_WILD_SRCH_LIMIT_LBN 16 -+ #define UDP_WILD_SRCH_LIMIT_WIDTH 8 -+ #define TCP_WILD_SRCH_LIMIT_LBN 8 -+ #define TCP_WILD_SRCH_LIMIT_WIDTH 8 -+ #define TCP_FULL_SRCH_LIMIT_LBN 0 -+ #define TCP_FULL_SRCH_LIMIT_WIDTH 8 -+#define RX_FLUSH_DESCQ_REG_KER_OFST 0x820 /* Receive flush descriptor queue -+ register */ -+#define RX_FLUSH_DESCQ_REG_OFST 0x820 /* Receive flush descriptor queue -+ register */ -+ #define RX_FLUSH_DESCQ_CMD_LBN 24 -+ #define RX_FLUSH_DESCQ_CMD_WIDTH 1 -+ #define RX_FLUSH_EVQ_ID_LBN 12 -+ #define RX_FLUSH_EVQ_ID_WIDTH 12 -+ #define RX_FLUSH_DESCQ_LBN 0 -+ #define RX_FLUSH_DESCQ_WIDTH 12 -+#define RX_DESC_UPD_REG_KER_OFST 0x830 /* Kernel receive descriptor update -+ register. Page-mapped */ -+#define RX_DESC_UPD_REG_PAGE4_OFST 0x8830 /* Char & user receive descriptor -+ update register. Page-mapped. -+ For lowest 1K queues. */ -+#define RX_DESC_UPD_REG_PAGE123K_OFST 0x1000830 /* Char & user receive -+ descriptor update register. -+ Page-mapped. For upper -+ 3K queues. */ -+ #define RX_DESC_WPTR_LBN 96 -+ #define RX_DESC_WPTR_WIDTH 12 -+ #define RX_DESC_PUSH_CMD_LBN 95 -+ #define RX_DESC_PUSH_CMD_WIDTH 1 -+ #define RX_DESC_LBN 0 -+ #define RX_DESC_WIDTH 64 -+ #define RX_KER_DESC_LBN 0 -+ #define RX_KER_DESC_WIDTH 64 -+ #define RX_USR_DESC_LBN 0 -+ #define RX_USR_DESC_WIDTH 32 -+#define RX_DC_CFG_REG_KER_OFST 0x840 /* Receive descriptor cache -+ configuration register */ -+#define RX_DC_CFG_REG_OFST 0x840 /* Receive descriptor cache -+ configuration register */ -+ #define RX_DC_SIZE_LBN 0 -+ #define RX_DC_SIZE_WIDTH 2 -+#define RX_DC_PF_WM_REG_KER_OFST 0x850 /* Receive descriptor cache pre-fetch -+ watermark register */ -+#define RX_DC_PF_WM_REG_OFST 0x850 /* Receive descriptor cache pre-fetch -+ watermark register */ -+ #define RX_DC_PF_LWM_LO_LBN 0 -+ #define RX_DC_PF_LWM_LO_WIDTH 6 -+ -+#define RX_RSS_TKEY_B0_OFST 0x860 /* RSS Toeplitz hash key (B0 only) */ -+ -+#define RX_NODESC_DROP_REG 0x880 -+ #define RX_NODESC_DROP_CNT_LBN 0 -+ #define RX_NODESC_DROP_CNT_WIDTH 16 -+ -+#define XM_TX_CFG_REG_OFST 0x1230 -+ #define XM_AUTO_PAD_LBN 5 -+ #define XM_AUTO_PAD_WIDTH 1 -+ -+#define RX_FILTER_TBL0_OFST 0xF00000 /* Receive filter table - even entries */ -+ #define RSS_EN_0_B0_LBN 110 -+ #define RSS_EN_0_B0_WIDTH 1 -+ #define SCATTER_EN_0_B0_LBN 109 -+ #define SCATTER_EN_0_B0_WIDTH 1 -+ #define TCP_UDP_0_LBN 108 -+ #define TCP_UDP_0_WIDTH 1 -+ #define RXQ_ID_0_LBN 96 -+ #define RXQ_ID_0_WIDTH 12 -+ #define DEST_IP_0_LBN 64 -+ #define DEST_IP_0_WIDTH 32 -+ #define DEST_PORT_TCP_0_LBN 48 -+ #define DEST_PORT_TCP_0_WIDTH 16 -+ #define SRC_IP_0_LBN 16 -+ #define SRC_IP_0_WIDTH 32 -+ #define SRC_TCP_DEST_UDP_0_LBN 0 -+ #define SRC_TCP_DEST_UDP_0_WIDTH 16 -+#define RX_FILTER_TBL1_OFST 0xF00010 /* Receive filter table - odd entries */ -+ #define RSS_EN_1_B0_LBN 110 -+ #define RSS_EN_1_B0_WIDTH 1 -+ #define SCATTER_EN_1_B0_LBN 109 -+ #define SCATTER_EN_1_B0_WIDTH 1 -+ #define TCP_UDP_1_LBN 108 -+ #define TCP_UDP_1_WIDTH 1 -+ #define RXQ_ID_1_LBN 96 -+ #define RXQ_ID_1_WIDTH 12 -+ #define DEST_IP_1_LBN 64 -+ #define DEST_IP_1_WIDTH 32 -+ #define DEST_PORT_TCP_1_LBN 48 -+ #define DEST_PORT_TCP_1_WIDTH 16 -+ #define SRC_IP_1_LBN 16 -+ #define SRC_IP_1_WIDTH 32 -+ #define SRC_TCP_DEST_UDP_1_LBN 0 -+ #define SRC_TCP_DEST_UDP_1_WIDTH 16 -+ -+#if FALCON_EXTENDED_P_BAR -+#define RX_DESC_PTR_TBL_KER_OFST 0x11800 /* Receive descriptor pointer -+ kernel access */ -+#else -+#define RX_DESC_PTR_TBL_KER_OFST 0x1800 /* Receive descriptor pointer -+ kernel access */ -+#endif -+ -+ -+#define RX_DESC_PTR_TBL_OFST 0xF40000 /* Receive descriptor pointer table */ -+ #define RX_ISCSI_DDIG_EN_LBN 88 -+ #define RX_ISCSI_DDIG_EN_WIDTH 1 -+ #define RX_ISCSI_HDIG_EN_LBN 87 -+ #define RX_ISCSI_HDIG_EN_WIDTH 1 -+ #define RX_DESC_PREF_ACT_LBN 86 -+ #define RX_DESC_PREF_ACT_WIDTH 1 -+ #define RX_DC_HW_RPTR_LBN 80 -+ #define RX_DC_HW_RPTR_WIDTH 6 -+ #define RX_DESCQ_HW_RPTR_LBN 68 -+ #define RX_DESCQ_HW_RPTR_WIDTH 12 -+ #define RX_DESCQ_SW_WPTR_LBN 56 -+ #define RX_DESCQ_SW_WPTR_WIDTH 12 -+ #define RX_DESCQ_BUF_BASE_ID_LBN 36 -+ #define RX_DESCQ_BUF_BASE_ID_WIDTH 20 -+ #define RX_DESCQ_EVQ_ID_LBN 24 -+ #define RX_DESCQ_EVQ_ID_WIDTH 12 -+ #define RX_DESCQ_OWNER_ID_LBN 10 -+ #define RX_DESCQ_OWNER_ID_WIDTH 14 -+ #define RX_DESCQ_LABEL_LBN 5 -+ #define RX_DESCQ_LABEL_WIDTH 5 -+ #define RX_DESCQ_SIZE_LBN 3 -+ #define RX_DESCQ_SIZE_WIDTH 2 -+ #define RX_DESCQ_TYPE_LBN 2 -+ #define RX_DESCQ_TYPE_WIDTH 1 -+ #define RX_DESCQ_JUMBO_LBN 1 -+ #define RX_DESCQ_JUMBO_WIDTH 1 -+ #define RX_DESCQ_EN_LBN 0 -+ #define RX_DESCQ_EN_WIDTH 1 -+ -+ -+#define RX_RSS_INDIR_TBL_B0_OFST 0xFB0000 /* RSS indirection table (B0 only) */ -+ #define RX_RSS_INDIR_ENT_B0_LBN 0 -+ #define RX_RSS_INDIR_ENT_B0_WIDTH 6 -+ -+/*************---- TX Datapath Registers C Header ----*************/ -+#define TX_FLUSH_DESCQ_REG_KER_OFST 0xA00 /* Transmit flush descriptor -+ queue register */ -+#define TX_FLUSH_DESCQ_REG_OFST 0xA00 /* Transmit flush descriptor queue -+ register */ -+ #define TX_FLUSH_DESCQ_CMD_LBN 12 -+ #define TX_FLUSH_DESCQ_CMD_WIDTH 1 -+ #define TX_FLUSH_DESCQ_LBN 0 -+ #define TX_FLUSH_DESCQ_WIDTH 12 -+#define TX_DESC_UPD_REG_KER_OFST 0xA10 /* Kernel transmit descriptor update -+ register. Page-mapped */ -+#define TX_DESC_UPD_REG_PAGE4_OFST 0x8A10 /* Char & user transmit descriptor -+ update register. Page-mapped */ -+#define TX_DESC_UPD_REG_PAGE123K_OFST 0x1000A10 /* Char & user transmit -+ descriptor update register. -+ Page-mapped */ -+ #define TX_DESC_WPTR_LBN 96 -+ #define TX_DESC_WPTR_WIDTH 12 -+ #define TX_DESC_PUSH_CMD_LBN 95 -+ #define TX_DESC_PUSH_CMD_WIDTH 1 -+ #define TX_DESC_LBN 0 -+ #define TX_DESC_WIDTH 95 -+ #define TX_KER_DESC_LBN 0 -+ #define TX_KER_DESC_WIDTH 64 -+ #define TX_USR_DESC_LBN 0 -+ #define TX_USR_DESC_WIDTH 64 -+#define TX_DC_CFG_REG_KER_OFST 0xA20 /* Transmit descriptor cache -+ configuration register */ -+#define TX_DC_CFG_REG_OFST 0xA20 /* Transmit descriptor cache configuration -+ register */ -+ #define TX_DC_SIZE_LBN 0 -+ #define TX_DC_SIZE_WIDTH 2 -+ -+#if FALCON_EXTENDED_P_BAR -+#define TX_DESC_PTR_TBL_KER_OFST 0x11900 /* Transmit descriptor pointer. */ -+#else -+#define TX_DESC_PTR_TBL_KER_OFST 0x1900 /* Transmit descriptor pointer. */ -+#endif -+ -+ -+#define TX_DESC_PTR_TBL_OFST 0xF50000 /* Transmit descriptor pointer */ -+ #define TX_NON_IP_DROP_DIS_B0_LBN 91 -+ #define TX_NON_IP_DROP_DIS_B0_WIDTH 1 -+ #define TX_IP_CHKSM_DIS_B0_LBN 90 -+ #define TX_IP_CHKSM_DIS_B0_WIDTH 1 -+ #define TX_TCP_CHKSM_DIS_B0_LBN 89 -+ #define TX_TCP_CHKSM_DIS_B0_WIDTH 1 -+ #define TX_DESCQ_EN_LBN 88 -+ #define TX_DESCQ_EN_WIDTH 1 -+ #define TX_ISCSI_DDIG_EN_LBN 87 -+ #define TX_ISCSI_DDIG_EN_WIDTH 1 -+ #define TX_ISCSI_HDIG_EN_LBN 86 -+ #define TX_ISCSI_HDIG_EN_WIDTH 1 -+ #define TX_DC_HW_RPTR_LBN 80 -+ #define TX_DC_HW_RPTR_WIDTH 6 -+ #define TX_DESCQ_HW_RPTR_LBN 68 -+ #define TX_DESCQ_HW_RPTR_WIDTH 12 -+ #define TX_DESCQ_SW_WPTR_LBN 56 -+ #define TX_DESCQ_SW_WPTR_WIDTH 12 -+ #define TX_DESCQ_BUF_BASE_ID_LBN 36 -+ #define TX_DESCQ_BUF_BASE_ID_WIDTH 20 -+ #define TX_DESCQ_EVQ_ID_LBN 24 -+ #define TX_DESCQ_EVQ_ID_WIDTH 12 -+ #define TX_DESCQ_OWNER_ID_LBN 10 -+ #define TX_DESCQ_OWNER_ID_WIDTH 14 -+ #define TX_DESCQ_LABEL_LBN 5 -+ #define TX_DESCQ_LABEL_WIDTH 5 -+ #define TX_DESCQ_SIZE_LBN 3 -+ #define TX_DESCQ_SIZE_WIDTH 2 -+ #define TX_DESCQ_TYPE_LBN 1 -+ #define TX_DESCQ_TYPE_WIDTH 2 -+ #define TX_DESCQ_FLUSH_LBN 0 -+ #define TX_DESCQ_FLUSH_WIDTH 1 -+#define TX_CFG_REG_KER_OFST 0xA50 /* Transmit configuration register */ -+#define TX_CFG_REG_OFST 0xA50 /* Transmit configuration register */ -+ #define TX_IP_ID_P1_OFS_LBN 32 -+ #define TX_IP_ID_P1_OFS_WIDTH 15 -+ #define TX_IP_ID_P0_OFS_LBN 16 -+ #define TX_IP_ID_P0_OFS_WIDTH 15 -+ #define TX_TURBO_EN_LBN 3 -+ #define TX_TURBO_EN_WIDTH 1 -+ #define TX_OWNERR_CTL_LBN 2 -+ #define TX_OWNERR_CTL_WIDTH 2 -+ #define TX_NON_IP_DROP_DIS_LBN 1 -+ #define TX_NON_IP_DROP_DIS_WIDTH 1 -+ #define TX_IP_ID_REP_EN_LBN 0 -+ #define TX_IP_ID_REP_EN_WIDTH 1 -+#define TX_RESERVED_REG_KER_OFST 0xA80 /* Transmit configuration register */ -+#define TX_RESERVED_REG_OFST 0xA80 /* Transmit configuration register */ -+ #define TX_CSR_PUSH_EN_LBN 89 -+ #define TX_CSR_PUSH_EN_WIDTH 1 -+ #define TX_RX_SPACER_LBN 64 -+ #define TX_RX_SPACER_WIDTH 8 -+ #define TX_SW_EV_EN_LBN 59 -+ #define TX_SW_EV_EN_WIDTH 1 -+ #define TX_RX_SPACER_EN_LBN 57 -+ #define TX_RX_SPACER_EN_WIDTH 1 -+ #define TX_CSR_PREF_WD_TMR_LBN 24 -+ #define TX_CSR_PREF_WD_TMR_WIDTH 16 -+ #define TX_CSR_ONLY1TAG_LBN 21 -+ #define TX_CSR_ONLY1TAG_WIDTH 1 -+ #define TX_PREF_THRESHOLD_LBN 19 -+ #define TX_PREF_THRESHOLD_WIDTH 2 -+ #define TX_ONE_PKT_PER_Q_LBN 18 -+ #define TX_ONE_PKT_PER_Q_WIDTH 1 -+ #define TX_DIS_NON_IP_EV_LBN 17 -+ #define TX_DIS_NON_IP_EV_WIDTH 1 -+ #define TX_DMA_SPACER_LBN 8 -+ #define TX_DMA_SPACER_WIDTH 8 -+ #define TX_FLUSH_MIN_LEN_EN_B0_LBN 7 -+ #define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1 -+ #define TX_TCP_DIS_A1_LBN 7 -+ #define TX_TCP_DIS_A1_WIDTH 1 -+ #define TX_IP_DIS_A1_LBN 6 -+ #define TX_IP_DIS_A1_WIDTH 1 -+ #define TX_MAX_CPL_LBN 2 -+ #define TX_MAX_CPL_WIDTH 2 -+ #define TX_MAX_PREF_LBN 0 -+ #define TX_MAX_PREF_WIDTH 2 -+#define TX_VLAN_REG_OFST 0xAE0 /* Transmit VLAN tag register */ -+ #define TX_VLAN_EN_LBN 127 -+ #define TX_VLAN_EN_WIDTH 1 -+ #define TX_VLAN7_PORT1_EN_LBN 125 -+ #define TX_VLAN7_PORT1_EN_WIDTH 1 -+ #define TX_VLAN7_PORT0_EN_LBN 124 -+ #define TX_VLAN7_PORT0_EN_WIDTH 1 -+ #define TX_VLAN7_LBN 112 -+ #define TX_VLAN7_WIDTH 12 -+ #define TX_VLAN6_PORT1_EN_LBN 109 -+ #define TX_VLAN6_PORT1_EN_WIDTH 1 -+ #define TX_VLAN6_PORT0_EN_LBN 108 -+ #define TX_VLAN6_PORT0_EN_WIDTH 1 -+ #define TX_VLAN6_LBN 96 -+ #define TX_VLAN6_WIDTH 12 -+ #define TX_VLAN5_PORT1_EN_LBN 93 -+ #define TX_VLAN5_PORT1_EN_WIDTH 1 -+ #define TX_VLAN5_PORT0_EN_LBN 92 -+ #define TX_VLAN5_PORT0_EN_WIDTH 1 -+ #define TX_VLAN5_LBN 80 -+ #define TX_VLAN5_WIDTH 12 -+ #define TX_VLAN4_PORT1_EN_LBN 77 -+ #define TX_VLAN4_PORT1_EN_WIDTH 1 -+ #define TX_VLAN4_PORT0_EN_LBN 76 -+ #define TX_VLAN4_PORT0_EN_WIDTH 1 -+ #define TX_VLAN4_LBN 64 -+ #define TX_VLAN4_WIDTH 12 -+ #define TX_VLAN3_PORT1_EN_LBN 61 -+ #define TX_VLAN3_PORT1_EN_WIDTH 1 -+ #define TX_VLAN3_PORT0_EN_LBN 60 -+ #define TX_VLAN3_PORT0_EN_WIDTH 1 -+ #define TX_VLAN3_LBN 48 -+ #define TX_VLAN3_WIDTH 12 -+ #define TX_VLAN2_PORT1_EN_LBN 45 -+ #define TX_VLAN2_PORT1_EN_WIDTH 1 -+ #define TX_VLAN2_PORT0_EN_LBN 44 -+ #define TX_VLAN2_PORT0_EN_WIDTH 1 -+ #define TX_VLAN2_LBN 32 -+ #define TX_VLAN2_WIDTH 12 -+ #define TX_VLAN1_PORT1_EN_LBN 29 -+ #define TX_VLAN1_PORT1_EN_WIDTH 1 -+ #define TX_VLAN1_PORT0_EN_LBN 28 -+ #define TX_VLAN1_PORT0_EN_WIDTH 1 -+ #define TX_VLAN1_LBN 16 -+ #define TX_VLAN1_WIDTH 12 -+ #define TX_VLAN0_PORT1_EN_LBN 13 -+ #define TX_VLAN0_PORT1_EN_WIDTH 1 -+ #define TX_VLAN0_PORT0_EN_LBN 12 -+ #define TX_VLAN0_PORT0_EN_WIDTH 1 -+ #define TX_VLAN0_LBN 0 -+ #define TX_VLAN0_WIDTH 12 -+#define TX_FIL_CTL_REG_OFST 0xAF0 /* Transmit filter control register */ -+ #define TX_MADR1_FIL_EN_LBN 65 -+ #define TX_MADR1_FIL_EN_WIDTH 1 -+ #define TX_MADR0_FIL_EN_LBN 64 -+ #define TX_MADR0_FIL_EN_WIDTH 1 -+ #define TX_IPFIL31_PORT1_EN_LBN 63 -+ #define TX_IPFIL31_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL31_PORT0_EN_LBN 62 -+ #define TX_IPFIL31_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL30_PORT1_EN_LBN 61 -+ #define TX_IPFIL30_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL30_PORT0_EN_LBN 60 -+ #define TX_IPFIL30_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL29_PORT1_EN_LBN 59 -+ #define TX_IPFIL29_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL29_PORT0_EN_LBN 58 -+ #define TX_IPFIL29_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL28_PORT1_EN_LBN 57 -+ #define TX_IPFIL28_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL28_PORT0_EN_LBN 56 -+ #define TX_IPFIL28_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL27_PORT1_EN_LBN 55 -+ #define TX_IPFIL27_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL27_PORT0_EN_LBN 54 -+ #define TX_IPFIL27_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL26_PORT1_EN_LBN 53 -+ #define TX_IPFIL26_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL26_PORT0_EN_LBN 52 -+ #define TX_IPFIL26_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL25_PORT1_EN_LBN 51 -+ #define TX_IPFIL25_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL25_PORT0_EN_LBN 50 -+ #define TX_IPFIL25_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL24_PORT1_EN_LBN 49 -+ #define TX_IPFIL24_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL24_PORT0_EN_LBN 48 -+ #define TX_IPFIL24_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL23_PORT1_EN_LBN 47 -+ #define TX_IPFIL23_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL23_PORT0_EN_LBN 46 -+ #define TX_IPFIL23_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL22_PORT1_EN_LBN 45 -+ #define TX_IPFIL22_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL22_PORT0_EN_LBN 44 -+ #define TX_IPFIL22_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL21_PORT1_EN_LBN 43 -+ #define TX_IPFIL21_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL21_PORT0_EN_LBN 42 -+ #define TX_IPFIL21_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL20_PORT1_EN_LBN 41 -+ #define TX_IPFIL20_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL20_PORT0_EN_LBN 40 -+ #define TX_IPFIL20_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL19_PORT1_EN_LBN 39 -+ #define TX_IPFIL19_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL19_PORT0_EN_LBN 38 -+ #define TX_IPFIL19_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL18_PORT1_EN_LBN 37 -+ #define TX_IPFIL18_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL18_PORT0_EN_LBN 36 -+ #define TX_IPFIL18_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL17_PORT1_EN_LBN 35 -+ #define TX_IPFIL17_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL17_PORT0_EN_LBN 34 -+ #define TX_IPFIL17_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL16_PORT1_EN_LBN 33 -+ #define TX_IPFIL16_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL16_PORT0_EN_LBN 32 -+ #define TX_IPFIL16_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL15_PORT1_EN_LBN 31 -+ #define TX_IPFIL15_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL15_PORT0_EN_LBN 30 -+ #define TX_IPFIL15_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL14_PORT1_EN_LBN 29 -+ #define TX_IPFIL14_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL14_PORT0_EN_LBN 28 -+ #define TX_IPFIL14_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL13_PORT1_EN_LBN 27 -+ #define TX_IPFIL13_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL13_PORT0_EN_LBN 26 -+ #define TX_IPFIL13_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL12_PORT1_EN_LBN 25 -+ #define TX_IPFIL12_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL12_PORT0_EN_LBN 24 -+ #define TX_IPFIL12_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL11_PORT1_EN_LBN 23 -+ #define TX_IPFIL11_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL11_PORT0_EN_LBN 22 -+ #define TX_IPFIL11_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL10_PORT1_EN_LBN 21 -+ #define TX_IPFIL10_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL10_PORT0_EN_LBN 20 -+ #define TX_IPFIL10_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL9_PORT1_EN_LBN 19 -+ #define TX_IPFIL9_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL9_PORT0_EN_LBN 18 -+ #define TX_IPFIL9_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL8_PORT1_EN_LBN 17 -+ #define TX_IPFIL8_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL8_PORT0_EN_LBN 16 -+ #define TX_IPFIL8_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL7_PORT1_EN_LBN 15 -+ #define TX_IPFIL7_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL7_PORT0_EN_LBN 14 -+ #define TX_IPFIL7_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL6_PORT1_EN_LBN 13 -+ #define TX_IPFIL6_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL6_PORT0_EN_LBN 12 -+ #define TX_IPFIL6_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL5_PORT1_EN_LBN 11 -+ #define TX_IPFIL5_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL5_PORT0_EN_LBN 10 -+ #define TX_IPFIL5_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL4_PORT1_EN_LBN 9 -+ #define TX_IPFIL4_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL4_PORT0_EN_LBN 8 -+ #define TX_IPFIL4_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL3_PORT1_EN_LBN 7 -+ #define TX_IPFIL3_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL3_PORT0_EN_LBN 6 -+ #define TX_IPFIL3_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL2_PORT1_EN_LBN 5 -+ #define TX_IPFIL2_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL2_PORT0_EN_LBN 4 -+ #define TX_IPFIL2_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL1_PORT1_EN_LBN 3 -+ #define TX_IPFIL1_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL1_PORT0_EN_LBN 2 -+ #define TX_IPFIL1_PORT0_EN_WIDTH 1 -+ #define TX_IPFIL0_PORT1_EN_LBN 1 -+ #define TX_IPFIL0_PORT1_EN_WIDTH 1 -+ #define TX_IPFIL0_PORT0_EN_LBN 0 -+ #define TX_IPFIL0_PORT0_EN_WIDTH 1 -+#define TX_IPFIL_TBL_OFST 0xB00 /* Transmit IP source address filter table */ -+ #define TX_IPFIL_MASK_LBN 32 -+ #define TX_IPFIL_MASK_WIDTH 32 -+ #define TX_IP_SRC_ADR_LBN 0 -+ #define TX_IP_SRC_ADR_WIDTH 32 -+#define TX_PACE_REG_A1_OFST 0xF80000 /* Transmit pace control register */ -+#define TX_PACE_REG_B0_OFST 0xA90 /* Transmit pace control register */ -+ #define TX_PACE_SB_NOTAF_LBN 19 -+ #define TX_PACE_SB_NOTAF_WIDTH 10 -+ #define TX_PACE_SB_AF_LBN 9 -+ #define TX_PACE_SB_AF_WIDTH 10 -+ #define TX_PACE_FB_BASE_LBN 5 -+ #define TX_PACE_FB_BASE_WIDTH 4 -+ #define TX_PACE_BIN_TH_LBN 0 -+ #define TX_PACE_BIN_TH_WIDTH 5 -+#define TX_PACE_TBL_A1_OFST 0xF80040 /* Transmit pacing table */ -+#define TX_PACE_TBL_FIRST_QUEUE_A1 4 -+#define TX_PACE_TBL_B0_OFST 0xF80000 /* Transmit pacing table */ -+#define TX_PACE_TBL_FIRST_QUEUE_B0 0 -+ #define TX_PACE_LBN 0 -+ #define TX_PACE_WIDTH 5 -+ -+/*************---- EE/Flash Registers C Header ----*************/ -+#define EE_SPI_HCMD_REG_KER_OFST 0x100 /* SPI host command register */ -+#define EE_SPI_HCMD_REG_OFST 0x100 /* SPI host command register */ -+ #define EE_SPI_HCMD_CMD_EN_LBN 31 -+ #define EE_SPI_HCMD_CMD_EN_WIDTH 1 -+ #define EE_WR_TIMER_ACTIVE_LBN 28 -+ #define EE_WR_TIMER_ACTIVE_WIDTH 1 -+ #define EE_SPI_HCMD_SF_SEL_LBN 24 -+ #define EE_SPI_HCMD_SF_SEL_WIDTH 1 -+ #define EE_SPI_HCMD_DABCNT_LBN 16 -+ #define EE_SPI_HCMD_DABCNT_WIDTH 5 -+ #define EE_SPI_HCMD_READ_LBN 15 -+ #define EE_SPI_HCMD_READ_WIDTH 1 -+ #define EE_SPI_HCMD_DUBCNT_LBN 12 -+ #define EE_SPI_HCMD_DUBCNT_WIDTH 2 -+ #define EE_SPI_HCMD_ADBCNT_LBN 8 -+ #define EE_SPI_HCMD_ADBCNT_WIDTH 2 -+ #define EE_SPI_HCMD_ENC_LBN 0 -+ #define EE_SPI_HCMD_ENC_WIDTH 8 -+#define EE_SPI_HADR_REG_KER_OFST 0X110 /* SPI host address register */ -+#define EE_SPI_HADR_REG_OFST 0X110 /* SPI host address register */ -+ #define EE_SPI_HADR_DUBYTE_LBN 24 -+ #define EE_SPI_HADR_DUBYTE_WIDTH 8 -+ #define EE_SPI_HADR_ADR_LBN 0 -+ #define EE_SPI_HADR_ADR_WIDTH 24 -+#define EE_SPI_HDATA_REG_KER_OFST 0x120 /* SPI host data register */ -+#define EE_SPI_HDATA_REG_OFST 0x120 /* SPI host data register */ -+ #define EE_SPI_HDATA3_LBN 96 -+ #define EE_SPI_HDATA3_WIDTH 32 -+ #define EE_SPI_HDATA2_LBN 64 -+ #define EE_SPI_HDATA2_WIDTH 32 -+ #define EE_SPI_HDATA1_LBN 32 -+ #define EE_SPI_HDATA1_WIDTH 32 -+ #define EE_SPI_HDATA0_LBN 0 -+ #define EE_SPI_HDATA0_WIDTH 32 -+#define EE_BASE_PAGE_REG_KER_OFST 0x130 /* Expansion ROM base mirror register */ -+#define EE_BASE_PAGE_REG_OFST 0x130 /* Expansion ROM base mirror register */ -+ #define EE_EXP_ROM_WINDOW_BASE_LBN 16 -+ #define EE_EXP_ROM_WINDOW_BASE_WIDTH 13 -+ #define EE_EXPROM_MASK_LBN 0 -+ #define EE_EXPROM_MASK_WIDTH 13 -+#define EE_VPD_CFG0_REG_KER_OFST 0X140 /* SPI/VPD configuration register */ -+#define EE_VPD_CFG0_REG_OFST 0X140 /* SPI/VPD configuration register */ -+ #define EE_SF_FASTRD_EN_LBN 127 -+ #define EE_SF_FASTRD_EN_WIDTH 1 -+ #define EE_SF_CLOCK_DIV_LBN 120 -+ #define EE_SF_CLOCK_DIV_WIDTH 7 -+ #define EE_VPD_WIP_POLL_LBN 119 -+ #define EE_VPD_WIP_POLL_WIDTH 1 -+ #define EE_VPDW_LENGTH_LBN 80 -+ #define EE_VPDW_LENGTH_WIDTH 15 -+ #define EE_VPDW_BASE_LBN 64 -+ #define EE_VPDW_BASE_WIDTH 15 -+ #define EE_VPD_WR_CMD_EN_LBN 56 -+ #define EE_VPD_WR_CMD_EN_WIDTH 8 -+ #define EE_VPD_BASE_LBN 32 -+ #define EE_VPD_BASE_WIDTH 24 -+ #define EE_VPD_LENGTH_LBN 16 -+ #define EE_VPD_LENGTH_WIDTH 13 -+ #define EE_VPD_AD_SIZE_LBN 8 -+ #define EE_VPD_AD_SIZE_WIDTH 5 -+ #define EE_VPD_ACCESS_ON_LBN 5 -+ #define EE_VPD_ACCESS_ON_WIDTH 1 -+#define EE_VPD_SW_CNTL_REG_KER_OFST 0X150 /* VPD access SW control register */ -+#define EE_VPD_SW_CNTL_REG_OFST 0X150 /* VPD access SW control register */ -+ #define EE_VPD_CYCLE_PENDING_LBN 31 -+ #define EE_VPD_CYCLE_PENDING_WIDTH 1 -+ #define EE_VPD_CYC_WRITE_LBN 28 -+ #define EE_VPD_CYC_WRITE_WIDTH 1 -+ #define EE_VPD_CYC_ADR_LBN 0 -+ #define EE_VPD_CYC_ADR_WIDTH 15 -+#define EE_VPD_SW_DATA_REG_KER_OFST 0x160 /* VPD access SW data register */ -+#define EE_VPD_SW_DATA_REG_OFST 0x160 /* VPD access SW data register */ -+ #define EE_VPD_CYC_DAT_LBN 0 -+ #define EE_VPD_CYC_DAT_WIDTH 32 ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_desc.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,75 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) descriptor -+ * definitions. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+/*************---- Descriptors C Headers ----*************/ -+/* Receive Kernel IP Descriptor */ -+ #define RX_KER_BUF_SIZE_LBN 48 -+ #define RX_KER_BUF_SIZE_WIDTH 14 -+ #define RX_KER_BUF_REGION_LBN 46 -+ #define RX_KER_BUF_REGION_WIDTH 2 -+ #define RX_KER_BUF_REGION0_DECODE 0 -+ #define RX_KER_BUF_REGION1_DECODE 1 -+ #define RX_KER_BUF_REGION2_DECODE 2 -+ #define RX_KER_BUF_REGION3_DECODE 3 -+ #define RX_KER_BUF_ADR_LBN 0 -+ #define RX_KER_BUF_ADR_WIDTH 46 -+/* Receive User IP Descriptor */ -+ #define RX_USR_2BYTE_OFS_LBN 20 -+ #define RX_USR_2BYTE_OFS_WIDTH 12 -+ #define RX_USR_BUF_ID_LBN 0 -+ #define RX_USR_BUF_ID_WIDTH 20 -+/* Transmit Kernel IP Descriptor */ -+ #define TX_KER_PORT_LBN 63 -+ #define TX_KER_PORT_WIDTH 1 -+ #define TX_KER_CONT_LBN 62 -+ #define TX_KER_CONT_WIDTH 1 -+ #define TX_KER_BYTE_CNT_LBN 48 -+ #define TX_KER_BYTE_CNT_WIDTH 14 -+ #define TX_KER_BUF_REGION_LBN 46 -+ #define TX_KER_BUF_REGION_WIDTH 2 -+ #define TX_KER_BUF_REGION0_DECODE 0 -+ #define TX_KER_BUF_REGION1_DECODE 1 -+ #define TX_KER_BUF_REGION2_DECODE 2 -+ #define TX_KER_BUF_REGION3_DECODE 3 -+ #define TX_KER_BUF_ADR_LBN 0 -+ #define TX_KER_BUF_ADR_WIDTH 46 -+/* Transmit User IP Descriptor */ -+ #define TX_USR_PORT_LBN 47 -+ #define TX_USR_PORT_WIDTH 1 -+ #define TX_USR_CONT_LBN 46 -+ #define TX_USR_CONT_WIDTH 1 -+ #define TX_USR_BYTE_CNT_LBN 33 -+ #define TX_USR_BYTE_CNT_WIDTH 13 -+ #define TX_USR_BUF_ID_LBN 13 -+ #define TX_USR_BUF_ID_WIDTH 20 -+ #define TX_USR_BYTE_OFS_LBN 0 -+ #define TX_USR_BYTE_OFS_WIDTH 13 ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_event.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,155 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) event -+ * definitions. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+/*************---- Events Format C Header ----*************/ -+/*************---- Event entry ----*************/ -+ #define EV_CODE_LBN 60 -+ #define EV_CODE_WIDTH 4 -+ #define RX_IP_EV_DECODE 0 -+ #define TX_IP_EV_DECODE 2 -+ #define DRIVER_EV_DECODE 5 -+ #define GLOBAL_EV_DECODE 6 -+ #define DRV_GEN_EV_DECODE 7 -+ #define EV_DATA_LBN 0 -+ #define EV_DATA_WIDTH 60 -+/******---- Receive IP events for both Kernel & User event queues ----******/ -+ #define RX_EV_PKT_OK_LBN 56 -+ #define RX_EV_PKT_OK_WIDTH 1 -+ #define RX_EV_BUF_OWNER_ID_ERR_LBN 54 -+ #define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 -+ #define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 -+ #define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 -+ #define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 -+ #define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 -+ #define RX_EV_ETH_CRC_ERR_LBN 50 -+ #define RX_EV_ETH_CRC_ERR_WIDTH 1 -+ #define RX_EV_FRM_TRUNC_LBN 49 -+ #define RX_EV_FRM_TRUNC_WIDTH 1 -+ #define RX_EV_DRIB_NIB_LBN 48 -+ #define RX_EV_DRIB_NIB_WIDTH 1 -+ #define RX_EV_TOBE_DISC_LBN 47 -+ #define RX_EV_TOBE_DISC_WIDTH 1 -+ #define RX_EV_PKT_TYPE_LBN 44 -+ #define RX_EV_PKT_TYPE_WIDTH 3 -+ #define RX_EV_PKT_TYPE_ETH_DECODE 0 -+ #define RX_EV_PKT_TYPE_LLC_DECODE 1 -+ #define RX_EV_PKT_TYPE_JUMBO_DECODE 2 -+ #define RX_EV_PKT_TYPE_VLAN_DECODE 3 -+ #define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4 -+ #define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5 -+ #define RX_EV_HDR_TYPE_LBN 42 -+ #define RX_EV_HDR_TYPE_WIDTH 2 -+ #define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0 -+ #define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1 -+ #define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2 -+ #define RX_EV_HDR_TYPE_NON_IP_DECODE 3 -+ #define RX_EV_DESC_Q_EMPTY_LBN 41 -+ #define RX_EV_DESC_Q_EMPTY_WIDTH 1 -+ #define RX_EV_MCAST_HASH_MATCH_LBN 40 -+ #define RX_EV_MCAST_HASH_MATCH_WIDTH 1 -+ #define RX_EV_MCAST_PKT_LBN 39 -+ #define RX_EV_MCAST_PKT_WIDTH 1 -+ #define RX_EV_Q_LABEL_LBN 32 -+ #define RX_EV_Q_LABEL_WIDTH 5 -+ #define RX_JUMBO_CONT_LBN 31 -+ #define RX_JUMBO_CONT_WIDTH 1 -+ #define RX_SOP_LBN 15 -+ #define RX_SOP_WIDTH 1 -+ #define RX_PORT_LBN 30 -+ #define RX_PORT_WIDTH 1 -+ #define RX_EV_BYTE_CNT_LBN 16 -+ #define RX_EV_BYTE_CNT_WIDTH 14 -+ #define RX_iSCSI_PKT_OK_LBN 14 -+ #define RX_iSCSI_PKT_OK_WIDTH 1 -+ #define RX_ISCSI_DDIG_ERR_LBN 13 -+ #define RX_ISCSI_DDIG_ERR_WIDTH 1 -+ #define RX_ISCSI_HDIG_ERR_LBN 12 -+ #define RX_ISCSI_HDIG_ERR_WIDTH 1 -+ #define RX_EV_DESC_PTR_LBN 0 -+ #define RX_EV_DESC_PTR_WIDTH 12 -+/******---- Transmit IP events for both Kernel & User event queues ----******/ -+ #define TX_EV_PKT_ERR_LBN 38 -+ #define TX_EV_PKT_ERR_WIDTH 1 -+ #define TX_EV_PKT_TOO_BIG_LBN 37 -+ #define TX_EV_PKT_TOO_BIG_WIDTH 1 -+ #define TX_EV_Q_LABEL_LBN 32 -+ #define TX_EV_Q_LABEL_WIDTH 5 -+ #define TX_EV_PORT_LBN 16 -+ #define TX_EV_PORT_WIDTH 1 -+ #define TX_EV_WQ_FF_FULL_LBN 15 -+ #define TX_EV_WQ_FF_FULL_WIDTH 1 -+ #define TX_EV_BUF_OWNER_ID_ERR_LBN 14 -+ #define TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 -+ #define TX_EV_COMP_LBN 12 -+ #define TX_EV_COMP_WIDTH 1 -+ #define TX_EV_DESC_PTR_LBN 0 -+ #define TX_EV_DESC_PTR_WIDTH 12 -+/*************---- Char or Kernel driver events ----*************/ -+ #define DRIVER_EV_SUB_CODE_LBN 56 -+ #define DRIVER_EV_SUB_CODE_WIDTH 4 -+ #define TX_DESCQ_FLS_DONE_EV_DECODE 0x0 -+ #define RX_DESCQ_FLS_DONE_EV_DECODE 0x1 -+ #define EVQ_INIT_DONE_EV_DECODE 0x2 -+ #define EVQ_NOT_EN_EV_DECODE 0x3 -+ #define RX_DESCQ_FLSFF_OVFL_EV_DECODE 0x4 -+ #define SRM_UPD_DONE_EV_DECODE 0x5 -+ #define WAKE_UP_EV_DECODE 0x6 -+ #define TX_PKT_NON_TCP_UDP_DECODE 0x9 -+ #define TIMER_EV_DECODE 0xA -+ #define RX_DSC_ERROR_EV_DECODE 0xE -+ #define DRIVER_EV_TX_DESCQ_ID_LBN 0 -+ #define DRIVER_EV_TX_DESCQ_ID_WIDTH 12 -+ #define DRIVER_EV_RX_DESCQ_ID_LBN 0 -+ #define DRIVER_EV_RX_DESCQ_ID_WIDTH 12 -+ #define DRIVER_EV_EVQ_ID_LBN 0 -+ #define DRIVER_EV_EVQ_ID_WIDTH 12 -+ #define DRIVER_TMR_ID_LBN 0 -+ #define DRIVER_TMR_ID_WIDTH 12 -+ #define DRIVER_EV_SRM_UPD_LBN 0 -+ #define DRIVER_EV_SRM_UPD_WIDTH 2 -+ #define SRM_CLR_EV_DECODE 0 -+ #define SRM_UPD_EV_DECODE 1 -+ #define SRM_ILLCLR_EV_DECODE 2 -+/********---- Global events. Sent to both event queue 0 and 4. ----********/ -+ #define XFP_PHY_INTR_LBN 10 -+ #define XFP_PHY_INTR_WIDTH 1 -+ #define XG_PHY_INTR_LBN 9 -+ #define XG_PHY_INTR_WIDTH 1 -+ #define G_PHY1_INTR_LBN 8 -+ #define G_PHY1_INTR_WIDTH 1 -+ #define G_PHY0_INTR_LBN 7 -+ #define G_PHY0_INTR_WIDTH 1 -+/*************---- Driver generated events ----*************/ -+ #define DRV_GEN_EV_CODE_LBN 60 -+ #define DRV_GEN_EV_CODE_WIDTH 4 -+ #define DRV_GEN_EV_DATA_LBN 0 -+ #define DRV_GEN_EV_DATA_WIDTH 60 ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_intr_vec.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,44 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) interrupt -+ * vector definitions. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+/*************---- Interrupt Vector Format C Header ----*************/ -+#define DW0_OFST 0x0 /* Double-word 0: Event queue FIFO interrupts */ -+ #define EVQ_FIFO_HF_LBN 1 -+ #define EVQ_FIFO_HF_WIDTH 1 -+ #define EVQ_FIFO_AF_LBN 0 -+ #define EVQ_FIFO_AF_WIDTH 1 -+#define DW1_OFST 0x4 /* Double-word 1: Interrupt indicator */ -+ #define INT_FLAG_LBN 0 -+ #define INT_FLAG_WIDTH 1 -+#define DW2_OFST 0x8 /* Double-word 2: Fatal interrupts */ -+ #define FATAL_INT_LBN 0 -+ #define FATAL_INT_WIDTH 1 ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/workarounds.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,67 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides workaround settings for EtherFabric NICs. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_EFAB_WORKAROUNDS_H__ -+#define __CI_DRIVER_EFAB_WORKAROUNDS_H__ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Hardware workarounds which have global scope -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#if defined(__CI_HARDWARE_CONFIG_FALCON_B0__) -+/*------------------------------- B0 ---------------------------------------*/ -+ -+#define BUG2175_WORKAROUND 0 /* TX event batching for dual port operation. -+ This removes the effect (dup TX events) -+ of the fix -+ (TX event per packet + batch events) */ -+#define BUG5302_WORKAROUND 0 /* unstick TX DMAQ after out-of-range wr ptr */ -+#define BUG5762_WORKAROUND 0 /* Set all queues to jumbo mode */ -+#define BUG5391_WORKAROUND 0 /* Misaligned TX can't span 512-byte boundary */ -+#define BUG7916_WORKAROUND 0 /* RX flush gets lost */ -+ -+#else -+/*------------------------------- A0/A1 ------------------------------------*/ -+ -+#define BUG2175_WORKAROUND 1 /* TX event batching for dual port operation. -+ This removes the effect (dup TX events) -+ of the fix -+ (TX event per packet + batch events) */ -+#define BUG5302_WORKAROUND 1 /* unstick TX DMAQ after out-of-range wr ptr */ -+#define BUG5762_WORKAROUND 1 /* Set all queues to jumbo mode */ -+#define BUG5391_WORKAROUND 1 /* Misaligned TX can't span 512-byte boundary */ -+#define BUG7916_WORKAROUND 1 /* RX flush gets lost */ -+ -+#endif /* B0/A01 */ -+ -+#endif /* __CI_DRIVER_EFAB_WORKAROUNDS_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,273 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains public EFX VI API to Solarflare resource manager. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_RESOURCE_EFX_VI_H__ -+#define __CI_DRIVER_RESOURCE_EFX_VI_H__ -+ -+/* Default size of event queue in the efx_vi resource. Copied from -+ * CI_CFG_NETIF_EVENTQ_SIZE */ -+#define EFX_VI_EVENTQ_SIZE_DEFAULT 1024 -+ -+extern int efx_vi_eventq_size; -+ -+/************************************************************************** -+ * efx_vi_state types, allocation and free -+ **************************************************************************/ -+ -+/*! Handle for refering to a efx_vi */ -+struct efx_vi_state; -+ -+/*! -+ * Allocate an efx_vi, including event queue and pt_endpoint -+ * -+ * \param vih_out Pointer to a handle that is set on success -+ * \param ifindex Index of the network interface desired -+ * \return Zero on success (and vih_out set), non-zero on failure. -+ */ -+extern int -+efx_vi_alloc(struct efx_vi_state **vih_out, int ifindex); -+ -+/*! -+ * Free a previously allocated efx_vi -+ * -+ * \param vih The handle of the efx_vi to free -+ */ -+extern void -+efx_vi_free(struct efx_vi_state *vih); -+ -+/*! -+ * Reset a previously allocated efx_vi -+ * -+ * \param vih The handle of the efx_vi to reset -+ */ -+extern void -+efx_vi_reset(struct efx_vi_state *vih); -+ -+/************************************************************************** -+ * efx_vi_eventq types and functions -+ **************************************************************************/ -+ -+/*! -+ * Register a function to receive callbacks when event queue timeouts -+ * or wakeups occur. Only one function per efx_vi can be registered -+ * at once. -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param callback The function to callback -+ * \param context An argument to pass to the callback function -+ * \return Zero on success, non-zero on failure. -+ */ -+extern int -+efx_vi_eventq_register_callback(struct efx_vi_state *vih, -+ void (*callback)(void *context, int is_timeout), -+ void *context); -+ -+/*! -+ * Remove the current eventq timeout or wakeup callback function -+ * -+ * \param vih The handle to identify the efx_vi -+ * \return Zero on success, non-zero on failure -+ */ -+extern int -+efx_vi_eventq_kill_callback(struct efx_vi_state *vih); -+ -+/************************************************************************** -+ * efx_vi_dma_map types and functions -+ **************************************************************************/ -+ -+/*! -+ * Handle for refering to a efx_vi -+ */ -+struct efx_vi_dma_map_state; -+ -+/*! -+ * Map a list of buffer pages so they are registered with the hardware -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param addrs An array of page pointers to map -+ * \param n_addrs Length of the page pointer array. Must be a power of two. -+ * \param dmh_out Set on success to a handle used to refer to this mapping -+ * \return Zero on success, non-zero on failure. -+ */ -+extern int -+efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages, -+ int n_pages, struct efx_vi_dma_map_state **dmh_out); -+extern int -+efx_vi_dma_map_addrs(struct efx_vi_state *vih, -+ unsigned long long *dev_bus_addrs, int n_pages, -+ struct efx_vi_dma_map_state **dmh_out); -+ -+/*! -+ * Unmap a previously mapped set of pages so they are no longer registered -+ * with the hardware. -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param dmh The handle to identify the dma mapping -+ */ -+extern void -+efx_vi_dma_unmap_pages(struct efx_vi_state *vih, -+ struct efx_vi_dma_map_state *dmh); -+extern void -+efx_vi_dma_unmap_addrs(struct efx_vi_state *vih, -+ struct efx_vi_dma_map_state *dmh); -+ -+/*! -+ * Retrieve the buffer address of the mapping -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param dmh The handle to identify the buffer mapping -+ * \return The buffer address on success, or zero on failure -+ */ -+extern unsigned -+efx_vi_dma_get_map_addr(struct efx_vi_state *vih, -+ struct efx_vi_dma_map_state *dmh); -+ -+/************************************************************************** -+ * efx_vi filter functions -+ **************************************************************************/ -+ -+#define EFX_VI_STATIC_FILTERS 32 -+ -+/*! Handle to refer to a filter instance */ -+struct filter_resource_t; -+ -+/*! -+ * Allocate and add a filter -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param protocol The protocol of the new filter: UDP or TCP -+ * \param ip_addr_be32 The local ip address of the filter -+ * \param port_le16 The local port of the filter -+ * \param fh_out Set on success to be a handle to refer to this filter -+ * \return Zero on success, non-zero on failure. -+ */ -+extern int -+efx_vi_filter(struct efx_vi_state *vih, int protocol, unsigned ip_addr_be32, -+ int port_le16, struct filter_resource_t **fh_out); -+ -+/*! -+ * Remove a filter and free resources associated with it -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param fh The handle to identify the filter -+ * \return Zero on success, non-zero on failure -+ */ -+extern int -+efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh); -+ -+/************************************************************************** -+ * efx_vi hw resources types and functions -+ **************************************************************************/ -+ -+/*! Constants for the type field in efx_vi_hw_resource */ -+#define EFX_VI_HW_RESOURCE_TXDMAQ 0x0 /* PFN of TX DMA Q */ -+#define EFX_VI_HW_RESOURCE_RXDMAQ 0x1 /* PFN of RX DMA Q */ -+#define EFX_VI_HW_RESOURCE_EVQTIMER 0x4 /* Address of event q timer */ -+ -+/* Address of event q pointer (EF1) */ -+#define EFX_VI_HW_RESOURCE_EVQPTR 0x5 -+/* Address of register pointer (Falcon A) */ -+#define EFX_VI_HW_RESOURCE_EVQRPTR 0x6 -+/* Offset of register pointer (Falcon B) */ -+#define EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET 0x7 -+/* Address of mem KVA */ -+#define EFX_VI_HW_RESOURCE_EVQMEMKVA 0x8 -+/* PFN of doorbell page (Falcon) */ -+#define EFX_VI_HW_RESOURCE_BELLPAGE 0x9 -+ -+/*! How large an array to allocate for the get_() functions - smaller -+ than the total number of constants as some are mutually exclusive */ -+#define EFX_VI_HW_RESOURCE_MAXSIZE 0x7 -+ -+/*! Constants for the mem_type field in efx_vi_hw_resource */ -+#define EFX_VI_HW_RESOURCE_IOBUFFER 0 /* Host memory */ -+#define EFX_VI_HW_RESOURCE_PERIPHERAL 1 /* Card memory/registers */ -+ -+/*! -+ * Data structure providing information on a hardware resource mapping -+ */ -+struct efx_vi_hw_resource { -+ u8 type; /*!< What this resource represents */ -+ u8 mem_type; /*!< What type of memory is it in, eg, -+ * host or iomem */ -+ u8 more_to_follow; /*!< Is this part of a multi-region resource */ -+ u32 length; /*!< Length of the resource in bytes */ -+ unsigned long address; /*!< Address of this resource */ -+}; -+ -+/*! -+ * Metadata concerning the list of hardware resource mappings -+ */ -+struct efx_vi_hw_resource_metadata { -+ int evq_order; -+ int evq_offs; -+ int evq_capacity; -+ int instance; -+ unsigned rx_capacity; -+ unsigned tx_capacity; -+ int nic_arch; -+ int nic_revision; -+ char nic_variant; -+}; -+ -+/*! -+ * Obtain a list of hardware resource mappings, using virtual addresses -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param mdata Pointer to a structure to receive the metadata -+ * \param hw_res_array An array to receive the list of hardware resources -+ * \param length The length of hw_res_array. Updated on success to contain -+ * the number of entries in the supplied array that were used. -+ * \return Zero on success, non-zero on failure -+ */ -+extern int -+efx_vi_hw_resource_get_virt(struct efx_vi_state *vih, -+ struct efx_vi_hw_resource_metadata *mdata, -+ struct efx_vi_hw_resource *hw_res_array, -+ int *length); -+ -+/*! -+ * Obtain a list of hardware resource mappings, using physical addresses -+ * -+ * \param vih The handle to identify the efx_vi -+ * \param mdata Pointer to a structure to receive the metadata -+ * \param hw_res_array An array to receive the list of hardware resources -+ * \param length The length of hw_res_array. Updated on success to contain -+ * the number of entries in the supplied array that were used. -+ * \return Zero on success, non-zero on failure -+ */ -+extern int -+efx_vi_hw_resource_get_phys(struct efx_vi_state *vih, -+ struct efx_vi_hw_resource_metadata *mdata, -+ struct efx_vi_hw_resource *hw_res_array, -+ int *length); -+ -+#endif /* __CI_DRIVER_RESOURCE_EFX_VI_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,69 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains definition of the public type struct linux_efhw_nic. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_RESOURCE_LINUX_RESOURCE__ -+#define __CI_DRIVER_RESOURCE_LINUX_RESOURCE__ -+ -+#include -+#include -+ -+ -+/************************************************************************ -+ * Per-nic structure in the resource driver * -+ ************************************************************************/ -+ -+struct linux_efhw_nic { -+ struct efrm_nic efrm_nic; -+ -+ struct pci_dev *pci_dev; /*!< pci descriptor */ -+ struct tasklet_struct tasklet; /*!< for interrupt bottom half */ -+ -+ /* Physical addresses of the control aperture bar. */ -+ unsigned long ctr_ap_pci_addr; -+ -+ /*! Callbacks for driverlink, when needed. */ -+ struct efx_dl_callbacks *dl_callbacks; -+ -+ /*! Event handlers. */ -+ struct efhw_ev_handler *ev_handlers; -+ -+}; -+ -+#define linux_efhw_nic(_efhw_nic) \ -+ container_of(_efhw_nic, struct linux_efhw_nic, efrm_nic.efhw_nic) -+ -+#endif /* __CI_DRIVER_RESOURCE_LINUX_RESOURCE__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/checks.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,118 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides helpers to turn bit shifts into dword shifts and -+ * check that the bit fields haven't overflown the dword etc. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_CHECK_H__ -+#define __CI_EFHW_CHECK_H__ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Helpers to turn bit shifts into dword shifts and check that the bit fields -+ * haven't overflown the dword etc. Aim is to preserve consistency with the -+ * autogenerated headers - once stable we could hard code. -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/* mask constructors */ -+#define __FALCON_MASK(WIDTH, T) ((((T)1) << (WIDTH)) - 1) -+#define __FALCON_MASK32(WIDTH) __FALCON_MASK((WIDTH), uint32_t) -+#define __FALCON_MASK64(WIDTH) __FALCON_MASK((WIDTH), uint64_t) -+ -+#define __FALCON_MASKFIELD32(LBN, WIDTH) \ -+ ((uint32_t)(__FALCON_MASK32(WIDTH) << (LBN))) -+ -+/* constructors for fields which span the first and second dwords */ -+#define __LW(LBN) (32 - LBN) -+#define __LOW(v, LBN, WIDTH) \ -+ ((uint32_t)(((v) & __FALCON_MASK64(__LW((LBN)))) << (LBN))) -+#define __HIGH(v, LBN, WIDTH) \ -+ ((uint32_t)(((v) >> __LW((LBN))) & \ -+ __FALCON_MASK64((WIDTH - __LW((LBN)))))) -+/* constructors for fields within the second dword */ -+#define __DW2(LBN) ((LBN) - 32) -+ -+/* constructors for fields which span the second and third dwords */ -+#define __LW2(LBN) (64 - LBN) -+#define __LOW2(v, LBN, WIDTH) \ -+ ((uint32_t)(((v) & __FALCON_MASK64(__LW2((LBN)))) << ((LBN) - 32))) -+#define __HIGH2(v, LBN, WIDTH) \ -+ ((uint32_t)(((v) >> __LW2((LBN))) & \ -+ __FALCON_MASK64((WIDTH - __LW2((LBN)))))) -+ -+/* constructors for fields within the third dword */ -+#define __DW3(LBN) ((LBN) - 64) -+ -+/* constructors for fields which span the third and fourth dwords */ -+#define __LW3(LBN) (96 - LBN) -+#define __LOW3(v, LBN, WIDTH) \ -+ ((uint32_t)(((v) & __FALCON_MASK64(__LW3((LBN)))) << ((LBN) - 64))) -+#define __HIGH3(v, LBN, WIDTH) \ -+ ((ci_unit32)(((v) >> __LW3((LBN))) & \ -+ __FALCON_MASK64((WIDTH - __LW3((LBN)))))) -+ -+/* constructors for fields within the fourth dword */ -+#define __DW4(LBN) ((LBN) - 96) -+ -+/* checks that the autogenerated headers are consistent with our model */ -+#define __WIDTHCHCK(a, b) EFHW_ASSERT((a) == (b)) -+#define __RANGECHCK(v, WIDTH) \ -+ EFHW_ASSERT(((uint64_t)(v) & ~(__FALCON_MASK64((WIDTH)))) == 0) -+ -+/* fields within the first dword */ -+#define __DWCHCK(LBN, WIDTH) \ -+ EFHW_ASSERT(((LBN) >= 0) && (((LBN)+(WIDTH)) <= 32)) -+ -+/* fields which span the first and second dwords */ -+#define __LWCHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW(LBN)) -+ -+/* fields within the second dword */ -+#define __DW2CHCK(LBN, WIDTH) \ -+ EFHW_ASSERT(((LBN) >= 32) && (((LBN)+(WIDTH)) <= 64)) -+ -+/* fields which span the second and third dwords */ -+#define __LW2CHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW2(LBN)) -+ -+/* fields within the third dword */ -+#define __DW3CHCK(LBN, WIDTH) \ -+ EFHW_ASSERT(((LBN) >= 64) && (((LBN)+(WIDTH)) <= 96)) -+ -+/* fields which span the third and fourth dwords */ -+#define __LW3CHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW3(LBN)) -+ -+/* fields within the fourth dword */ -+#define __DW4CHCK(LBN, WIDTH) \ -+ EFHW_ASSERT(((LBN) >= 96) && (((LBN)+(WIDTH)) <= 128)) -+ -+/* fields in the first qword */ -+#define __QWCHCK(LBN, WIDTH) \ -+ EFHW_ASSERT(((LBN) >= 0) && (((LBN)+(WIDTH)) <= 64)) -+ -+#endif /* __CI_EFHW_CHECK_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/common.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,93 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides API of the efhw library which may be used both from -+ * the kernel and from the user-space code. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_COMMON_H__ -+#define __CI_EFHW_COMMON_H__ -+ -+#include -+ -+typedef uint32_t efhw_buffer_addr_t; -+#define EFHW_BUFFER_ADDR_FMT "[ba:%"PRIx32"]" -+ -+/*! Comment? */ -+typedef union { -+ uint64_t u64; -+ struct { -+ uint32_t a; -+ uint32_t b; -+ } opaque; -+} efhw_event_t; -+ -+/* Flags for TX/RX queues */ -+#define EFHW_VI_JUMBO_EN 0x01 /*! scatter RX over multiple desc */ -+#define EFHW_VI_ISCSI_RX_HDIG_EN 0x02 /*! iscsi rx header digest */ -+#define EFHW_VI_ISCSI_TX_HDIG_EN 0x04 /*! iscsi tx header digest */ -+#define EFHW_VI_ISCSI_RX_DDIG_EN 0x08 /*! iscsi rx data digest */ -+#define EFHW_VI_ISCSI_TX_DDIG_EN 0x10 /*! iscsi tx data digest */ -+#define EFHW_VI_TX_PHYS_ADDR_EN 0x20 /*! TX physical address mode */ -+#define EFHW_VI_RX_PHYS_ADDR_EN 0x40 /*! RX physical address mode */ -+#define EFHW_VI_RM_WITH_INTERRUPT 0x80 /*! VI with an interrupt */ -+#define EFHW_VI_TX_IP_CSUM_DIS 0x100 /*! enable ip checksum generation */ -+#define EFHW_VI_TX_TCPUDP_CSUM_DIS 0x200 /*! enable tcp/udp checksum -+ generation */ -+#define EFHW_VI_TX_TCPUDP_ONLY 0x400 /*! drop non-tcp/udp packets */ -+ -+/* Types of hardware filter */ -+/* Each of these values implicitly selects scatter filters on B0 - or in -+ EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK if a non-scatter filter is required */ -+#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD (0) /* dest host only */ -+#define EFHW_IP_FILTER_TYPE_UDP_FULL (1) /* dest host and port */ -+#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD (2) /* dest based filter */ -+#define EFHW_IP_FILTER_TYPE_TCP_FULL (3) /* src filter */ -+/* Same again, but with RSS (for B0 only) */ -+#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD_RSS_B0 (4) -+#define EFHW_IP_FILTER_TYPE_UDP_FULL_RSS_B0 (5) -+#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD_RSS_B0 (6) -+#define EFHW_IP_FILTER_TYPE_TCP_FULL_RSS_B0 (7) -+ -+#define EFHW_IP_FILTER_TYPE_FULL_MASK (0x1) /* Mask for full / wildcard */ -+#define EFHW_IP_FILTER_TYPE_TCP_MASK (0x2) /* Mask for TCP type */ -+#define EFHW_IP_FILTER_TYPE_RSS_B0_MASK (0x4) /* Mask for B0 RSS enable */ -+#define EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK (0x8) /* Mask for B0 SCATTER dsbl */ -+ -+#define EFHW_IP_FILTER_TYPE_MASK (0xffff) /* Mask of types above */ -+ -+#define EFHW_IP_FILTER_BROADCAST (0x10000) /* driverlink filter -+ support */ -+ -+#endif /* __CI_EFHW_COMMON_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,61 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides version-independent Linux kernel API for -+ * userland-to-kernel interfaces. -+ * Only kernels >=2.6.9 are supported. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_COMMON_LINUX_H__ -+#define __CI_EFHW_COMMON_LINUX_H__ -+ -+#include -+ -+/* Dirty hack, but Linux kernel does not provide DMA_ADDR_T_FMT */ -+#if BITS_PER_LONG == 64 || defined(CONFIG_HIGHMEM64G) -+#define DMA_ADDR_T_FMT "%llx" -+#else -+#define DMA_ADDR_T_FMT "%x" -+#endif -+ -+/* Linux kernel also does not provide PRIx32... Sigh. */ -+#define PRIx32 "x" -+ -+#ifdef __ia64__ -+# define PRIx64 "lx" -+#else -+# define PRIx64 "llx" -+#endif -+ -+#endif /* __CI_EFHW_COMMON_LINUX_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/debug.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,84 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides debug-related API for efhw library using Linux kernel -+ * primitives. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_DEBUG_LINUX_H__ -+#define __CI_EFHW_DEBUG_LINUX_H__ -+ -+#define EFHW_PRINTK_PREFIX "[sfc efhw] " -+ -+#define EFHW_PRINTK(level, fmt, ...) \ -+ printk(level EFHW_PRINTK_PREFIX fmt "\n", __VA_ARGS__) -+ -+/* Following macros should be used with non-zero format parameters -+ * due to __VA_ARGS__ limitations. Use "%s" with __func__ if you can't -+ * find better parameters. */ -+#define EFHW_ERR(fmt, ...) EFHW_PRINTK(KERN_ERR, fmt, __VA_ARGS__) -+#define EFHW_WARN(fmt, ...) EFHW_PRINTK(KERN_WARNING, fmt, __VA_ARGS__) -+#define EFHW_NOTICE(fmt, ...) EFHW_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__) -+#if 0 && !defined(NDEBUG) -+#define EFHW_TRACE(fmt, ...) EFHW_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__) -+#else -+#define EFHW_TRACE(fmt, ...) -+#endif -+ -+#ifndef NDEBUG -+#define EFHW_ASSERT(cond) BUG_ON((cond) == 0) -+#define EFHW_DO_DEBUG(expr) expr -+#else -+#define EFHW_ASSERT(cond) -+#define EFHW_DO_DEBUG(expr) -+#endif -+ -+#define EFHW_TEST(expr) \ -+ do { \ -+ if (unlikely(!(expr))) \ -+ BUG(); \ -+ } while (0) -+ -+/* Build time asserts. We paste the line number into the type name -+ * so that the macro can be used more than once per file even if the -+ * compiler objects to multiple identical typedefs. Collisions -+ * between use in different header files is still possible. */ -+#ifndef EFHW_BUILD_ASSERT -+#define __EFHW_BUILD_ASSERT_NAME(_x) __EFHW_BUILD_ASSERT_ILOATHECPP(_x) -+#define __EFHW_BUILD_ASSERT_ILOATHECPP(_x) __EFHW_BUILD_ASSERT__ ##_x -+#define EFHW_BUILD_ASSERT(e) \ -+ typedef char __EFHW_BUILD_ASSERT_NAME(__LINE__)[(e) ? 1 : -1] -+#endif -+ -+#endif /* __CI_EFHW_DEBUG_LINUX_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/efhw_config.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,43 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides some limits used in both kernel and userland code. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_EFAB_CONFIG_H__ -+#define __CI_EFHW_EFAB_CONFIG_H__ -+ -+#define EFHW_MAX_NR_DEVS 5 /* max number of efhw devices supported */ -+ -+#endif /* __CI_EFHW_EFAB_CONFIG_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,382 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides struct efhw_nic and some related types. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_EFAB_TYPES_H__ -+#define __CI_EFHW_EFAB_TYPES_H__ -+ -+#include -+#include -+#include -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * forward type declarations -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_nic; -+ -+/*-------------------------------------------------------------------- -+ * -+ * Managed interface -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_buffer_table_allocation{ -+ unsigned base; -+ unsigned order; -+}; -+ -+struct eventq_resource_hardware { -+ /*!iobuffer allocated for eventq - can be larger than eventq */ -+ struct efhw_iopages iobuff; -+ unsigned iobuff_off; -+ struct efhw_buffer_table_allocation buf_tbl_alloc; -+ int capacity; /*!< capacity of event queue */ -+}; -+ -+/*-------------------------------------------------------------------- -+ * -+ * event queues and event driven callbacks -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_keventq { -+ int lock; -+ caddr_t evq_base; -+ int32_t evq_ptr; -+ uint32_t evq_mask; -+ unsigned instance; -+ struct eventq_resource_hardware hw; -+ struct efhw_ev_handler *ev_handlers; -+}; -+ -+/*-------------------------------------------------------------------- -+ * -+ * filters -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_filter_spec { -+ uint dmaq_id; -+ uint32_t saddr_le32; -+ uint32_t daddr_le32; -+ uint16_t sport_le16; -+ uint16_t dport_le16; -+ unsigned tcp : 1; -+ unsigned full : 1; -+ unsigned rss : 1; /* not supported on A1 */ -+ unsigned scatter : 1; /* not supported on A1 */ -+}; -+ -+struct efhw_filter_depth { -+ unsigned needed; -+ unsigned max; -+}; -+ -+struct efhw_filter_search_limits { -+ unsigned tcp_full; -+ unsigned tcp_wild; -+ unsigned udp_full; -+ unsigned udp_wild; -+}; -+ -+ -+/********************************************************************** -+ * Portable HW interface. *************************************** -+ **********************************************************************/ -+ -+/*-------------------------------------------------------------------- -+ * -+ * EtherFabric Functional units - configuration and control -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_func_ops { -+ -+ /*-------------- Initialisation ------------ */ -+ -+ /*! close down all hardware functional units - leaves NIC in a safe -+ state for driver unload */ -+ void (*close_hardware) (struct efhw_nic *nic); -+ -+ /*! initialise all hardware functional units */ -+ int (*init_hardware) (struct efhw_nic *nic, -+ struct efhw_ev_handler *, -+ const uint8_t *mac_addr, int non_irq_evq); -+ -+ /*-------------- Interrupt support ------------ */ -+ -+ /*! Main interrupt routine -+ ** This function returns, -+ ** - zero, if the IRQ was not generated by EF1 -+ ** - non-zero, if EF1 was the source of the IRQ -+ ** -+ ** -+ ** opaque is an OS provided pointer for use by the OS callbacks -+ ** e.g in Windows used to indicate DPC scheduled -+ */ -+ int (*interrupt) (struct efhw_nic *nic); -+ -+ /*! Enable the interrupt */ -+ void (*interrupt_enable) (struct efhw_nic *nic); -+ -+ /*! Disable the interrupt */ -+ void (*interrupt_disable) (struct efhw_nic *nic); -+ -+ /*! Set interrupt moderation strategy for the given IRQ unit -+ ** val is in usec -+ */ -+ void (*set_interrupt_moderation)(struct efhw_nic *nic, int evq, -+ uint val); -+ -+ /*-------------- Event support ------------ */ -+ -+ /*! Enable the given event queue -+ depending on the underlying implementation (EF1 or Falcon) then -+ either a q_base_addr in host memory, or a buffer base id should -+ be proivded -+ */ -+ void (*event_queue_enable) (struct efhw_nic *nic, -+ uint evq, /* evnt queue index */ -+ uint evq_size, /* units of #entries */ -+ dma_addr_t q_base_addr, uint buf_base_id, -+ int interrupting); -+ -+ /*! Disable the given event queue (and any associated timer) */ -+ void (*event_queue_disable) (struct efhw_nic *nic, uint evq, -+ int timer_only); -+ -+ /*! request wakeup from the NIC on a given event Q */ -+ void (*wakeup_request) (struct efhw_nic *nic, dma_addr_t q_base_addr, -+ int next_i, int evq); -+ -+ /*! Push a SW event on a given eventQ */ -+ void (*sw_event) (struct efhw_nic *nic, int data, int evq); -+ -+ /*-------------- IP Filter API ------------ */ -+ -+ /*! Setup a given filter - The software can request a filter_i, -+ * but some EtherFabric implementations will override with -+ * a more suitable index -+ */ -+ int (*ipfilter_set) (struct efhw_nic *nic, int type, -+ int *filter_i, int dmaq, -+ unsigned saddr_be32, unsigned sport_be16, -+ unsigned daddr_be32, unsigned dport_be16); -+ -+ /*! Clear down a given filter */ -+ void (*ipfilter_clear) (struct efhw_nic *nic, int filter_idx); -+ -+ /*-------------- DMA support ------------ */ -+ -+ /*! Initialise NIC state for a given TX DMAQ */ -+ void (*dmaq_tx_q_init) (struct efhw_nic *nic, -+ uint dmaq, uint evq, uint owner, uint tag, -+ uint dmaq_size, uint buf_idx, uint flags); -+ -+ /*! Initialise NIC state for a given RX DMAQ */ -+ void (*dmaq_rx_q_init) (struct efhw_nic *nic, -+ uint dmaq, uint evq, uint owner, uint tag, -+ uint dmaq_size, uint buf_idx, uint flags); -+ -+ /*! Disable a given TX DMAQ */ -+ void (*dmaq_tx_q_disable) (struct efhw_nic *nic, uint dmaq); -+ -+ /*! Disable a given RX DMAQ */ -+ void (*dmaq_rx_q_disable) (struct efhw_nic *nic, uint dmaq); -+ -+ /*! Flush a given TX DMA channel */ -+ int (*flush_tx_dma_channel) (struct efhw_nic *nic, uint dmaq); -+ -+ /*! Flush a given RX DMA channel */ -+ int (*flush_rx_dma_channel) (struct efhw_nic *nic, uint dmaq); -+ -+ /*-------------- Buffer table Support ------------ */ -+ -+ /*! Initialise a buffer table page */ -+ void (*buffer_table_set) (struct efhw_nic *nic, -+ dma_addr_t dma_addr, -+ uint bufsz, uint region, -+ int own_id, int buffer_id); -+ -+ /*! Initialise a block of buffer table pages */ -+ void (*buffer_table_set_n) (struct efhw_nic *nic, int buffer_id, -+ dma_addr_t dma_addr, -+ uint bufsz, uint region, -+ int n_pages, int own_id); -+ -+ /*! Clear a block of buffer table pages */ -+ void (*buffer_table_clear) (struct efhw_nic *nic, int buffer_id, -+ int num); -+ -+ /*! Commit a buffer table update */ -+ void (*buffer_table_commit) (struct efhw_nic *nic); -+ -+ /*-------------- New filter API ------------ */ -+ -+ /*! Set a given filter */ -+ int (*filter_set) (struct efhw_nic *nic, struct efhw_filter_spec *spec, -+ int *filter_idx_out); -+ -+ /*! Clear a given filter */ -+ void (*filter_clear) (struct efhw_nic *nic, int filter_idx); -+}; -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * NIC type -+ * -+ *---------------------------------------------------------------------------*/ -+ -+struct efhw_device_type { -+ int arch; /* enum efhw_arch */ -+ char variant; /* 'A', 'B', ... */ -+ int revision; /* 0, 1, ... */ -+}; -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * EtherFabric NIC instance - nic.c for HW independent functions -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/*! */ -+struct efhw_nic { -+ /*! zero base index in efrm_nic_tablep->nic array */ -+ int index; -+ int ifindex; /*!< OS level nic index */ -+ struct net *nd_net; -+ -+ struct efhw_device_type devtype; -+ -+ /*! Options that can be set by user. */ -+ unsigned options; -+# define NIC_OPT_EFTEST 0x1 /* owner is an eftest app */ -+ -+# define NIC_OPT_DEFAULT 0 -+ -+ /*! Internal flags that indicate hardware properties at runtime. */ -+ unsigned flags; -+# define NIC_FLAG_NO_INTERRUPT 0x01 /* to be set at init time only */ -+# define NIC_FLAG_TRY_MSI 0x02 -+# define NIC_FLAG_MSI 0x04 -+# define NIC_FLAG_OS_IRQ_EN 0x08 -+ -+ unsigned mtu; /*!< MAC MTU (includes MAC hdr) */ -+ -+ /* hardware resources */ -+ -+ /*! I/O address of the start of the bar */ -+ volatile char __iomem *bar_ioaddr; -+ -+ /*! Bar number of control aperture. */ -+ unsigned ctr_ap_bar; -+ /*! Length of control aperture in bytes. */ -+ unsigned ctr_ap_bytes; -+ -+ uint8_t mac_addr[ETH_ALEN]; /*!< mac address */ -+ -+ /*! EtherFabric Functional Units -- functions */ -+ const struct efhw_func_ops *efhw_func; -+ -+ /*! This lock protects a number of misc NIC resources. It should -+ * only be used for things that can be at the bottom of the lock -+ * order. ie. You mustn't attempt to grab any other lock while -+ * holding this one. -+ */ -+ spinlock_t *reg_lock; -+ spinlock_t the_reg_lock; -+ -+ int buf_commit_outstanding; /*!< outstanding buffer commits */ -+ -+ /*! interrupt callbacks (hard-irq) */ -+ void (*irq_handler) (struct efhw_nic *, int unit); -+ -+ /*! event queues per driver */ -+ struct efhw_keventq interrupting_evq; -+ -+/* for marking when we are not using an IRQ unit -+ - 0 is a valid offset to an IRQ unit on EF1! */ -+#define EFHW_IRQ_UNIT_UNUSED 0xffff -+ /*! interrupt unit in use for the interrupting event queue */ -+ unsigned int irq_unit; -+ -+ struct efhw_keventq non_interrupting_evq; -+ -+ struct efhw_iopage irq_iobuff; /*!< Falcon SYSERR interrupt */ -+ -+ /* The new driverlink infrastructure. */ -+ struct efx_dl_device *net_driver_dev; -+ struct efx_dlfilt_cb_s *dlfilter_cb; -+ -+ /*! Bit masks of the sizes of event queues and dma queues supported -+ * by the nic. */ -+ unsigned evq_sizes; -+ unsigned rxq_sizes; -+ unsigned txq_sizes; -+ -+ /* Size of filter table. */ -+ unsigned ip_filter_tbl_size; -+ -+ /* Number of filters currently used */ -+ unsigned ip_filter_tbl_used; -+ -+ /* Dynamically allocated filter state. */ -+ uint8_t *filter_in_use; -+ struct efhw_filter_spec *filter_spec_cache; -+ -+ /* Currently required and maximum filter table search depths. */ -+ struct efhw_filter_depth tcp_full_srch; -+ struct efhw_filter_depth tcp_wild_srch; -+ struct efhw_filter_depth udp_full_srch; -+ struct efhw_filter_depth udp_wild_srch; -+ -+ /* Number of event queues, DMA queues and timers. */ -+ unsigned num_evqs; -+ unsigned num_dmaqs; -+ unsigned num_timers; -+}; -+ -+ -+#define EFHW_KVA(nic) ((nic)->bar_ioaddr) -+ -+ -+#endif /* __CI_EFHW_EFHW_TYPES_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,72 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains API provided by efhw/eventq.c file. This file is not -+ * designed for use outside of the SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_EVENTQ_H__ -+#define __CI_EFHW_EVENTQ_H__ -+ -+#include -+#include -+ -+/*! Poll the event queue. */ -+extern int efhw_keventq_poll(struct efhw_nic *, struct efhw_keventq *); -+ -+/*! Callbacks for handling events. */ -+struct efhw_ev_handler { -+ void (*wakeup_fn)(struct efhw_nic *nic, unsigned); -+ void (*timeout_fn)(struct efhw_nic *nic, unsigned); -+ void (*dmaq_flushed_fn) (struct efhw_nic *, unsigned, int); -+}; -+ -+extern int efhw_keventq_ctor(struct efhw_nic *, int instance, -+ struct efhw_keventq *, struct efhw_ev_handler *); -+extern void efhw_keventq_dtor(struct efhw_nic *, struct efhw_keventq *); -+ -+extern void efhw_handle_txdmaq_flushed(struct efhw_nic *, -+ struct efhw_ev_handler *, -+ efhw_event_t *); -+extern void efhw_handle_rxdmaq_flushed(struct efhw_nic *, -+ struct efhw_ev_handler *, -+ efhw_event_t *); -+extern void efhw_handle_wakeup_event(struct efhw_nic *, -+ struct efhw_ev_handler *, -+ efhw_event_t *); -+extern void efhw_handle_timeout_event(struct efhw_nic *, -+ struct efhw_ev_handler *, -+ efhw_event_t *); -+ -+#endif /* __CI_EFHW_EVENTQ_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/eventq_macros.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,77 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides some event-related macros. This file is designed for -+ * use from kernel and from the userland contexts. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_EVENTQ_MACROS_H__ -+#define __CI_EFHW_EVENTQ_MACROS_H__ -+ -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * Event Queue manipulation -+ * -+ *--------------------------------------------------------------------*/ -+ -+#define EFHW_EVENT_OFFSET(q, s, i) \ -+ (((s)->evq_ptr - (i) * (int32_t)sizeof(efhw_event_t)) \ -+ & (q)->evq_mask) -+ -+#define EFHW_EVENT_PTR(q, s, i) \ -+ ((efhw_event_t *)((q)->evq_base + EFHW_EVENT_OFFSET(q, s, i))) -+ -+#define EFHW_EVENTQ_NEXT(s) \ -+ do { ((s)->evq_ptr += sizeof(efhw_event_t)); } while (0) -+ -+#define EFHW_EVENTQ_PREV(s) \ -+ do { ((s)->evq_ptr -= sizeof(efhw_event_t)); } while (0) -+ -+/* Be worried about this on byteswapped machines */ -+/* Due to crazy chipsets, we see the event words being written in -+** arbitrary order (bug4539). So test for presence of event must ensure -+** that both halves have changed from the null. -+*/ -+#define EFHW_IS_EVENT(evp) \ -+ (((evp)->opaque.a != (uint32_t)-1) && \ -+ ((evp)->opaque.b != (uint32_t)-1)) -+#define EFHW_CLEAR_EVENT(evp) ((evp)->u64 = (uint64_t)-1) -+#define EFHW_CLEAR_EVENT_VALUE 0xff -+ -+#define EFHW_EVENT_OVERFLOW(evq, s) \ -+ (EFHW_IS_EVENT(EFHW_EVENT_PTR(evq, s, 1))) -+ -+#endif /* __CI_EFHW_EVENTQ_MACROS_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,94 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains API provided by efhw/falcon.c file. This file is not -+ * designed for use outside of the SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_FALCON_H__ -+#define __CI_EFHW_FALCON_H__ -+ -+#include -+#include -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Locks - unfortunately required -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#define FALCON_LOCK_DECL irq_flags_t lock_state -+#define FALCON_LOCK_LOCK(nic) \ -+ spin_lock_irqsave((nic)->reg_lock, lock_state) -+#define FALCON_LOCK_UNLOCK(nic) \ -+ spin_unlock_irqrestore((nic)->reg_lock, lock_state) -+ -+extern struct efhw_func_ops falcon_char_functional_units; -+ -+/*! specify a pace value for a TX DMA Queue */ -+extern void falcon_nic_pace(struct efhw_nic *nic, uint dmaq, uint pace); -+ -+/*! configure the pace engine */ -+extern void falcon_nic_pace_cfg(struct efhw_nic *nic, int fb_base, -+ int bin_thresh); -+ -+/*! confirm buffer table updates - should be used for items where -+ loss of data would be unacceptable. E.g for the buffers that back -+ an event or DMA queue */ -+extern void falcon_nic_buffer_table_confirm(struct efhw_nic *nic); -+ -+/*! Reset the all the TX DMA queue pointers. */ -+extern void falcon_clobber_tx_dma_ptrs(struct efhw_nic *nic, uint dmaq); -+ -+extern int -+falcon_handle_char_event(struct efhw_nic *nic, -+ struct efhw_ev_handler *h, efhw_event_t *evp); -+ -+/*! Acknowledge to HW that processing is complete on a given event queue */ -+extern void falcon_nic_evq_ack(struct efhw_nic *nic, uint evq, /* evq id */ -+ uint rptr, /* new read pointer update */ -+ bool wakeup /* request a wakeup event if -+ ptr's != */ -+ ); -+ -+extern void -+falcon_nic_buffer_table_set_n(struct efhw_nic *nic, int buffer_id, -+ dma_addr_t dma_addr, uint bufsz, uint region, -+ int n_pages, int own_id); -+ -+extern int falcon_nic_filter_ctor(struct efhw_nic *nic); -+ -+extern void falcon_nic_filter_dtor(struct efhw_nic *nic); -+ -+#endif /* __CI_EFHW_FALCON_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/falcon_hash.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,58 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains API provided by efhw/falcon_hash.c file. -+ * Function declared in this file are not exported from the Linux -+ * sfc_resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_FALCON_HASH_H__ -+#define __CI_EFHW_FALCON_HASH_H__ -+ -+extern unsigned int -+falcon_hash_get_ip_key(unsigned int src_ip, unsigned int src_port, -+ unsigned int dest_ip, unsigned int dest_port, -+ int tcp, int full); -+ -+extern unsigned int -+falcon_hash_function1(unsigned int key, unsigned int nfilters); -+ -+extern unsigned int -+falcon_hash_function2(unsigned int key, unsigned int nfilters); -+ -+extern unsigned int -+falcon_hash_iterator(unsigned int hash1, unsigned int hash2, -+ unsigned int n_search, unsigned int nfilters); -+ -+#endif /* __CI_EFHW_FALCON_HASH_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,69 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides version-independent Linux kernel API for header files -+ * with hardware-related definitions (in ci/driver/efab/hardware*). -+ * Only kernels >=2.6.9 are supported. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_HARDWARE_LINUX_H__ -+#define __CI_EFHW_HARDWARE_LINUX_H__ -+ -+#include -+ -+#ifdef __LITTLE_ENDIAN -+#define EFHW_IS_LITTLE_ENDIAN -+#elif __BIG_ENDIAN -+#define EFHW_IS_BIG_ENDIAN -+#else -+#error Unknown endianness -+#endif -+ -+#ifndef readq -+static inline uint64_t __readq(volatile void __iomem *addr) -+{ -+ return *(volatile uint64_t *)addr; -+} -+#define readq(x) __readq(x) -+#endif -+ -+#ifndef writeq -+static inline void __writeq(uint64_t v, volatile void __iomem *addr) -+{ -+ *(volatile uint64_t *)addr = v; -+} -+#define writeq(val, addr) __writeq((val), (addr)) -+#endif -+ -+#endif /* __CI_EFHW_HARDWARE_LINUX_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,58 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains OS-independent API for allocating iopage types. -+ * The implementation of these functions is highly OS-dependent. -+ * This file is not designed for use outside of the SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_RESOURCE_IOPAGE_H__ -+#define __CI_DRIVER_RESOURCE_IOPAGE_H__ -+ -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * memory allocation -+ * -+ *--------------------------------------------------------------------*/ -+ -+extern int efhw_iopage_alloc(struct efhw_nic *, struct efhw_iopage *p); -+extern void efhw_iopage_free(struct efhw_nic *, struct efhw_iopage *p); -+ -+extern int efhw_iopages_alloc(struct efhw_nic *, struct efhw_iopages *p, -+ unsigned order); -+extern void efhw_iopages_free(struct efhw_nic *, struct efhw_iopages *p); -+ -+#endif /* __CI_DRIVER_RESOURCE_IOPAGE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,190 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides struct efhw_page and struct efhw_iopage for Linux -+ * kernel. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_IOPAGE_LINUX_H__ -+#define __CI_EFHW_IOPAGE_LINUX_H__ -+ -+#include -+#include -+#include -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * struct efhw_page: A single page of memory. Directly mapped in the -+ * driver, and can be mapped to userlevel. -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_page { -+ unsigned long kva; -+}; -+ -+static inline int efhw_page_alloc(struct efhw_page *p) -+{ -+ p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL); -+ return p->kva ? 0 : -ENOMEM; -+} -+ -+static inline int efhw_page_alloc_zeroed(struct efhw_page *p) -+{ -+ p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL); -+ return p->kva ? 0 : -ENOMEM; -+} -+ -+static inline void efhw_page_free(struct efhw_page *p) -+{ -+ free_page(p->kva); -+ EFHW_DO_DEBUG(memset(p, 0, sizeof(*p))); -+} -+ -+static inline char *efhw_page_ptr(struct efhw_page *p) -+{ -+ return (char *)p->kva; -+} -+ -+static inline unsigned efhw_page_pfn(struct efhw_page *p) -+{ -+ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT); -+} -+ -+static inline void efhw_page_mark_invalid(struct efhw_page *p) -+{ -+ p->kva = 0; -+} -+ -+static inline int efhw_page_is_valid(struct efhw_page *p) -+{ -+ return p->kva != 0; -+} -+ -+static inline void efhw_page_init_from_va(struct efhw_page *p, void *va) -+{ -+ p->kva = (unsigned long)va; -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * struct efhw_iopage: A single page of memory. Directly mapped in the driver, -+ * and can be mapped to userlevel. Can also be accessed by the NIC. -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_iopage { -+ struct efhw_page p; -+ dma_addr_t dma_addr; -+}; -+ -+static inline dma_addr_t efhw_iopage_dma_addr(struct efhw_iopage *p) -+{ -+ return p->dma_addr; -+} -+ -+#define efhw_iopage_ptr(iop) efhw_page_ptr(&(iop)->p) -+#define efhw_iopage_pfn(iop) efhw_page_pfn(&(iop)->p) -+#define efhw_iopage_mark_invalid(iop) efhw_page_mark_invalid(&(iop)->p) -+#define efhw_iopage_is_valid(iop) efhw_page_is_valid(&(iop)->p) -+ -+/*-------------------------------------------------------------------- -+ * -+ * struct efhw_iopages: A set of pages that are contiguous in physical -+ * memory. Directly mapped in the driver, and can be mapped to userlevel. -+ * Can also be accessed by the NIC. -+ * -+ * NB. The O/S may be unwilling to allocate many, or even any of these. So -+ * only use this type where the NIC really needs a physically contiguous -+ * buffer. -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_iopages { -+ caddr_t kva; -+ unsigned order; -+ dma_addr_t dma_addr; -+}; -+ -+static inline caddr_t efhw_iopages_ptr(struct efhw_iopages *p) -+{ -+ return p->kva; -+} -+ -+static inline unsigned efhw_iopages_pfn(struct efhw_iopages *p) -+{ -+ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT); -+} -+ -+static inline dma_addr_t efhw_iopages_dma_addr(struct efhw_iopages *p) -+{ -+ return p->dma_addr; -+} -+ -+static inline unsigned efhw_iopages_size(struct efhw_iopages *p) -+{ -+ return 1u << (p->order + PAGE_SHIFT); -+} -+ -+/* struct efhw_iopage <-> struct efhw_iopages conversions for handling -+ * physically contiguous allocations in iobufsets for iSCSI. This allows -+ * the essential information about contiguous allocations from -+ * efhw_iopages_alloc() to be saved away in the struct efhw_iopage array in -+ * an iobufset. (Changing the iobufset resource to use a union type would -+ * involve a lot of code changes, and make the iobufset's metadata larger -+ * which could be bad as it's supposed to fit into a single page on some -+ * platforms.) -+ */ -+static inline void -+efhw_iopage_init_from_iopages(struct efhw_iopage *iopage, -+ struct efhw_iopages *iopages, unsigned pageno) -+{ -+ iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages)) -+ + (pageno * PAGE_SIZE); -+ iopage->dma_addr = efhw_iopages_dma_addr(iopages) + -+ (pageno * PAGE_SIZE); -+} -+ -+static inline void -+efhw_iopages_init_from_iopage(struct efhw_iopages *iopages, -+ struct efhw_iopage *iopage, unsigned order) -+{ -+ iopages->kva = (caddr_t) efhw_iopage_ptr(iopage); -+ EFHW_ASSERT(iopages->kva); -+ iopages->order = order; -+ iopages->dma_addr = efhw_iopage_dma_addr(iopage); -+} -+ -+#endif /* __CI_EFHW_IOPAGE_LINUX_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/nic.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,62 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains API provided by efhw/nic.c file. This file is not -+ * designed for use outside of the SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_NIC_H__ -+#define __CI_EFHW_NIC_H__ -+ -+#include -+#include -+ -+ -+/* Convert PCI info to device type. Returns false when device is not -+ * recognised. -+ */ -+extern int efhw_device_type_init(struct efhw_device_type *dt, -+ int vendor_id, int device_id, int revision); -+ -+/* Initialise fields that do not involve touching hardware. */ -+extern void efhw_nic_init(struct efhw_nic *nic, unsigned flags, -+ unsigned options, struct efhw_device_type dev_type); -+ -+/*! Destruct NIC resources */ -+extern void efhw_nic_dtor(struct efhw_nic *nic); -+ -+/*! Shutdown interrupts */ -+extern void efhw_nic_close_interrupts(struct efhw_nic *nic); -+ -+#endif /* __CI_EFHW_NIC_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/public.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,104 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides public API of efhw library exported from the SFC -+ * resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_PUBLIC_H__ -+#define __CI_EFHW_PUBLIC_H__ -+ -+#include -+#include -+ -+/*! Returns true if we have some EtherFabric functional units - -+ whether configured or not */ -+static inline int efhw_nic_have_functional_units(struct efhw_nic *nic) -+{ -+ return nic->efhw_func != 0; -+} -+ -+/*! Returns true if the EtherFabric functional units have been configured */ -+static inline int efhw_nic_have_hw(struct efhw_nic *nic) -+{ -+ return efhw_nic_have_functional_units(nic) && (EFHW_KVA(nic) != 0); -+} -+ -+/*! Helper function to allocate the iobuffer needed by an eventq -+ * - it ensures the eventq has the correct alignment for the NIC -+ * -+ * \param rm Event-queue resource manager -+ * \param instance Event-queue instance (index) -+ * \param buf_bytes Requested size of eventq -+ * \return < 0 if iobuffer allocation fails -+ */ -+int efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic, -+ struct eventq_resource_hardware *h, -+ int evq_instance, unsigned buf_bytes); -+ -+extern void falcon_nic_set_rx_usr_buf_size(struct efhw_nic *, -+ int rx_usr_buf_size); -+ -+/*! Get RX filter search limits from RX_FILTER_CTL_REG. -+ * use_raw_values = 0 to get actual depth of search, or 1 to get raw values -+ * from register. -+ */ -+extern void -+falcon_nic_get_rx_filter_search_limits(struct efhw_nic *nic, -+ struct efhw_filter_search_limits *lim, -+ int use_raw_values); -+ -+/*! Set RX filter search limits in RX_FILTER_CTL_REG. -+ * use_raw_values = 0 if specifying actual depth of search, or 1 if specifying -+ * raw values to write to the register. -+ */ -+extern void -+falcon_nic_set_rx_filter_search_limits(struct efhw_nic *nic, -+ struct efhw_filter_search_limits *lim, -+ int use_raw_values); -+ -+ -+/*! Legacy RX IP filter search depth control interface */ -+extern void -+falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full, -+ uint32_t tcp_wild, -+ uint32_t udp_full, uint32_t udp_wild); -+ -+/*! Legacy RX IP filter search depth control interface */ -+extern void -+falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full, -+ uint32_t *tcp_wild, -+ uint32_t *udp_full, uint32_t *udp_wild); -+ -+#endif /* __CI_EFHW_PUBLIC_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efhw/sysdep.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,55 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides version-independent Linux kernel API for efhw library. -+ * Only kernels >=2.6.9 are supported. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFHW_SYSDEP_LINUX_H__ -+#define __CI_EFHW_SYSDEP_LINUX_H__ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include /* necessary for etherdevice.h on some kernels */ -+#include -+ -+typedef unsigned long irq_flags_t; -+ -+#define spin_lock_destroy(l_) do {} while (0) -+ -+#endif /* __CI_EFHW_SYSDEP_LINUX_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/buddy.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,68 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides private API for buddy allocator. This API is not -+ * designed for use outside of SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_BUDDY_H__ -+#define __CI_EFRM_BUDDY_H__ -+ -+#include -+ -+/*! Comment? */ -+struct efrm_buddy_allocator { -+ struct list_head *free_lists; /* array[order+1] */ -+ struct list_head *links; /* array[1<order; -+} -+ -+int efrm_buddy_ctor(struct efrm_buddy_allocator *b, unsigned order); -+void efrm_buddy_dtor(struct efrm_buddy_allocator *b); -+int efrm_buddy_alloc(struct efrm_buddy_allocator *b, unsigned order); -+void efrm_buddy_free(struct efrm_buddy_allocator *b, unsigned addr, -+ unsigned order); -+ -+ -+#endif /* __CI_EFRM_BUDDY_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/buffer_table.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,81 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides private buffer table API. This API is not designed -+ * for use outside of SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_BUFFER_TABLE_H__ -+#define __CI_EFRM_BUFFER_TABLE_H__ -+ -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * NIC's buffer table. -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Managed interface. */ -+ -+/*! construct a managed buffer table object, allocated over a region of -+ * the NICs buffer table space -+ */ -+extern int efrm_buffer_table_ctor(unsigned low, unsigned high); -+/*! destructor for above */ -+extern void efrm_buffer_table_dtor(void); -+ -+/*! allocate a contiguous region of buffer table space */ -+extern int efrm_buffer_table_alloc(unsigned order, -+ struct efhw_buffer_table_allocation *a); -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * buffer table operations through the HW independent API -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! free a previously allocated region of buffer table space */ -+extern void efrm_buffer_table_free(struct efhw_buffer_table_allocation *a); -+ -+/*! commit the update of a buffer table entry to every NIC */ -+extern void efrm_buffer_table_commit(void); -+ -+extern void efrm_buffer_table_set(struct efhw_buffer_table_allocation *, -+ struct efhw_nic *, -+ unsigned i, dma_addr_t dma_addr, int owner); -+ -+ -+#endif /* __CI_EFRM_BUFFER_TABLE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/debug.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,78 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides debug-related API for efrm library using Linux kernel -+ * primitives. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_DEBUG_LINUX_H__ -+#define __CI_EFRM_DEBUG_LINUX_H__ -+ -+#define EFRM_PRINTK_PREFIX "[sfc efrm] " -+ -+#define EFRM_PRINTK(level, fmt, ...) \ -+ printk(level EFRM_PRINTK_PREFIX fmt "\n", __VA_ARGS__) -+ -+/* Following macros should be used with non-zero format parameters -+ * due to __VA_ARGS__ limitations. Use "%s" with __func__ if you can't -+ * find better parameters. */ -+#define EFRM_ERR(fmt, ...) EFRM_PRINTK(KERN_ERR, fmt, __VA_ARGS__) -+#define EFRM_WARN(fmt, ...) EFRM_PRINTK(KERN_WARNING, fmt, __VA_ARGS__) -+#define EFRM_NOTICE(fmt, ...) EFRM_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__) -+#if !defined(NDEBUG) -+#define EFRM_TRACE(fmt, ...) EFRM_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__) -+#else -+#define EFRM_TRACE(fmt, ...) -+#endif -+ -+#ifndef NDEBUG -+#define EFRM_ASSERT(cond) BUG_ON((cond) == 0) -+#define _EFRM_ASSERT(cond, file, line) \ -+ do { \ -+ if (unlikely(!(cond))) { \ -+ EFRM_ERR("assertion \"%s\" failed at %s %d", \ -+ #cond, file, line); \ -+ BUG(); \ -+ } \ -+ } while (0) -+ -+#define EFRM_DO_DEBUG(expr) expr -+#define EFRM_VERIFY_EQ(expr, val) EFRM_ASSERT((expr) == (val)) -+#else -+#define EFRM_ASSERT(cond) -+#define EFRM_DO_DEBUG(expr) -+#define EFRM_VERIFY_EQ(expr, val) expr -+#endif -+ -+#endif /* __CI_EFRM_DEBUG_LINUX_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,89 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides private API of efrm library to be used from the SFC -+ * resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_DRIVER_PRIVATE_H__ -+#define __CI_EFRM_DRIVER_PRIVATE_H__ -+ -+#include -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * global variables -+ * -+ *--------------------------------------------------------------------*/ -+ -+/* Internal structure for resource driver */ -+extern struct efrm_resource_manager *efrm_rm_table[]; -+ -+/*-------------------------------------------------------------------- -+ * -+ * efrm_nic_table handling -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efrm_nic; -+ -+extern void efrm_driver_ctor(void); -+extern void efrm_driver_dtor(void); -+extern int efrm_driver_register_nic(struct efrm_nic *, int nic_index, -+ int ifindex); -+extern int efrm_driver_unregister_nic(struct efrm_nic *); -+ -+/*-------------------------------------------------------------------- -+ * -+ * create/destroy resource managers -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct vi_resource_dimensions { -+ unsigned evq_int_min, evq_int_lim; -+ unsigned evq_timer_min, evq_timer_lim; -+ unsigned rxq_min, rxq_lim; -+ unsigned txq_min, txq_lim; -+}; -+ -+/*! Initialise resources */ -+extern int -+efrm_resources_init(const struct vi_resource_dimensions *, -+ int buffer_table_min, int buffer_table_lim); -+ -+/*! Tear down resources */ -+extern void efrm_resources_fini(void); -+ -+#endif /* __CI_EFRM_DRIVER_PRIVATE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/efrm_client.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,32 @@ -+#ifndef __EFRM_CLIENT_H__ -+#define __EFRM_CLIENT_H__ -+ -+ -+struct efrm_client; -+ -+ -+struct efrm_client_callbacks { -+ /* Called before device is reset. Callee may block. */ -+ void (*pre_reset)(struct efrm_client *, void *user_data); -+ void (*stop)(struct efrm_client *, void *user_data); -+ void (*restart)(struct efrm_client *, void *user_data); -+}; -+ -+ -+#define EFRM_IFINDEX_DEFAULT -1 -+ -+ -+/* NB. Callbacks may be invoked even before this returns. */ -+extern int efrm_client_get(int ifindex, struct efrm_client_callbacks *, -+ void *user_data, struct efrm_client **client_out); -+extern void efrm_client_put(struct efrm_client *); -+ -+extern struct efhw_nic *efrm_client_get_nic(struct efrm_client *); -+ -+#if 0 -+/* For each resource type... */ -+extern void efrm_x_resource_resume(struct x_resource *); -+#endif -+ -+ -+#endif /* __EFRM_CLIENT_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/efrm_nic.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,26 @@ -+#ifndef __EFRM_NIC_H__ -+#define __EFRM_NIC_H__ -+ -+#include -+ -+ -+struct efrm_nic_per_vi { -+ unsigned long state; -+ struct vi_resource *vi; -+}; -+ -+ -+struct efrm_nic { -+ struct efhw_nic efhw_nic; -+ struct list_head link; -+ struct list_head clients; -+ struct efrm_nic_per_vi *vis; -+}; -+ -+ -+#define efrm_nic(_efhw_nic) \ -+ container_of(_efhw_nic, struct efrm_nic, efhw_nic) -+ -+ -+ -+#endif /* __EFRM_NIC_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/filter.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,122 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides public API for filter resource. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_FILTER_H__ -+#define __CI_EFRM_FILTER_H__ -+ -+#include -+#include -+ -+ -+struct filter_resource; -+struct vi_resource; -+struct efrm_client; -+ -+ -+/*! -+ * Allocate filter resource. -+ * -+ * \param vi_parent VI resource to use as parent. The function takes -+ * reference to the VI resource on success. -+ * \param frs_out pointer to return the new filter resource -+ * -+ * \return status code; if non-zero, frs_out is unchanged -+ */ -+extern int -+efrm_filter_resource_alloc(struct vi_resource *vi_parent, -+ struct filter_resource **frs_out); -+ -+extern void -+efrm_filter_resource_release(struct filter_resource *); -+ -+ -+extern int efrm_filter_resource_clear(struct filter_resource *frs); -+ -+extern int __efrm_filter_resource_set(struct filter_resource *frs, int type, -+ unsigned saddr_be32, uint16_t sport_be16, -+ unsigned daddr_be32, uint16_t dport_be16); -+ -+static inline int -+efrm_filter_resource_tcp_set(struct filter_resource *frs, -+ unsigned saddr, uint16_t sport, -+ unsigned daddr, uint16_t dport) -+{ -+ int type; -+ -+ EFRM_ASSERT((saddr && sport) || (!saddr && !sport)); -+ -+ type = -+ saddr ? EFHW_IP_FILTER_TYPE_TCP_FULL : -+ EFHW_IP_FILTER_TYPE_TCP_WILDCARD; -+ -+ return __efrm_filter_resource_set(frs, type, -+ saddr, sport, daddr, dport); -+} -+ -+static inline int -+efrm_filter_resource_udp_set(struct filter_resource *frs, -+ unsigned saddr, uint16_t sport, -+ unsigned daddr, uint16_t dport) -+{ -+ int type; -+ -+ EFRM_ASSERT((saddr && sport) || (!saddr && !sport)); -+ -+ type = -+ saddr ? EFHW_IP_FILTER_TYPE_UDP_FULL : -+ EFHW_IP_FILTER_TYPE_UDP_WILDCARD; -+ -+ return __efrm_filter_resource_set(frs, -+ type, saddr, sport, daddr, dport); -+} -+ -+ -+extern int -+efrm_filter_resource_instance(struct filter_resource *); -+ -+extern struct efrm_resource * -+efrm_filter_resource_to_resource(struct filter_resource *); -+ -+extern struct filter_resource * -+efrm_filter_resource_from_resource(struct efrm_resource *); -+ -+extern void -+efrm_filter_resource_free(struct filter_resource *); -+ -+ -+#endif /* __CI_EFRM_FILTER_H__ */ -+/*! \cidoxg_end */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,110 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides public API for iobufset resource. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_IOBUFSET_H__ -+#define __CI_EFRM_IOBUFSET_H__ -+ -+#include -+ -+/*! Iobufset resource structture. -+ * Users should not access the structure fields directly, but use the API -+ * below. -+ * However, this structure should not be moved out of public headers, -+ * because part of API (ex. efrm_iobufset_dma_addr function) is inline and -+ * is used in the fast-path code. -+ */ -+struct iobufset_resource { -+ struct efrm_resource rs; -+ struct vi_resource *evq; -+ struct iobufset_resource *linked; -+ struct efhw_buffer_table_allocation buf_tbl_alloc; -+ unsigned int n_bufs; -+ unsigned int pages_per_contiguous_chunk; -+ unsigned chunk_order; -+ struct efhw_iopage bufs[1]; -+ /*!< up to n_bufs can follow this, so this must be the last member */ -+}; -+ -+#define iobufset_resource(rs1) \ -+ container_of((rs1), struct iobufset_resource, rs) -+ -+/*! -+ * Allocate iobufset resource. -+ * -+ * \param vi VI that "owns" these buffers. Grabs a reference -+ * on success. -+ * \param linked Uses memory from an existing iobufset. Grabs a -+ * reference on success. -+ * \param iobrs_out pointer to return the new filter resource -+ * -+ * \return status code; if non-zero, frs_out is unchanged -+ */ -+extern int -+efrm_iobufset_resource_alloc(int32_t n_pages, -+ int32_t pages_per_contiguous_chunk, -+ struct vi_resource *vi, -+ struct iobufset_resource *linked, -+ bool phys_addr_mode, -+ struct iobufset_resource **iobrs_out); -+ -+extern void efrm_iobufset_resource_free(struct iobufset_resource *); -+extern void efrm_iobufset_resource_release(struct iobufset_resource *); -+ -+static inline char * -+efrm_iobufset_ptr(struct iobufset_resource *rs, unsigned offs) -+{ -+ EFRM_ASSERT(offs < (unsigned)(rs->n_bufs << PAGE_SHIFT)); -+ return efhw_iopage_ptr(&rs->bufs[offs >> PAGE_SHIFT]) -+ + (offs & (PAGE_SIZE - 1)); -+} -+ -+static inline char *efrm_iobufset_page_ptr(struct iobufset_resource *rs, -+ unsigned page_i) -+{ -+ EFRM_ASSERT(page_i < (unsigned)rs->n_bufs); -+ return efhw_iopage_ptr(&rs->bufs[page_i]); -+} -+ -+static inline dma_addr_t -+efrm_iobufset_dma_addr(struct iobufset_resource *rs, unsigned offs) -+{ -+ EFRM_ASSERT(offs < (unsigned)(rs->n_bufs << PAGE_SHIFT)); -+ return efhw_iopage_dma_addr(&rs->bufs[offs >> PAGE_SHIFT]) -+ + (offs & (PAGE_SIZE - 1)); -+} -+ -+#endif /* __CI_EFRM_IOBUFSET_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/nic_set.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,104 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides public API for NIC sets. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_NIC_SET_H__ -+#define __CI_EFRM_NIC_SET_H__ -+ -+#include -+#include -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * efrm_nic_set_t - tracks which NICs something has been done on -+ * -+ *--------------------------------------------------------------------*/ -+ -+/* Internal suructure of efrm_nic_set_t should not be referenced outside of -+ * this file. Add a new accessor if you should do it. */ -+typedef struct { -+ uint32_t nics; -+} efrm_nic_set_t; -+ -+#if EFHW_MAX_NR_DEVS > 32 -+#error change efrm_nic_set to handle EFHW_MAX_NR_DEVS number of devices -+#endif -+ -+static inline bool -+efrm_nic_set_read(const efrm_nic_set_t *nic_set, unsigned index) -+{ -+ EFRM_ASSERT(nic_set); -+ EFRM_ASSERT(index < EFHW_MAX_NR_DEVS && index < 32); -+ return (nic_set->nics & (1 << index)) ? true : false; -+} -+ -+static inline void -+efrm_nic_set_write(efrm_nic_set_t *nic_set, unsigned index, bool value) -+{ -+ EFRM_ASSERT(nic_set); -+ EFRM_ASSERT(index < EFHW_MAX_NR_DEVS && index < 32); -+ EFRM_ASSERT(value == false || value == true); -+ nic_set->nics = (nic_set->nics & (~(1 << index))) + (value << index); -+} -+ -+static inline void efrm_nic_set_clear(efrm_nic_set_t *nic_set) -+{ -+ nic_set->nics = 0; -+} -+ -+static inline void efrm_nic_set_all(efrm_nic_set_t *nic_set) -+{ -+ nic_set->nics = 0xffffffff; -+} -+ -+static inline bool efrm_nic_set_is_all_clear(efrm_nic_set_t *nic_set) -+{ -+ return nic_set->nics == 0 ? true : false; -+} -+ -+#define EFRM_NIC_SET_FMT "%x" -+ -+static inline uint32_t efrm_nic_set_pri_arg(efrm_nic_set_t *nic_set) -+{ -+ return nic_set->nics; -+} -+ -+#define EFRM_FOR_EACH_NIC_INDEX_IN_SET(_set, _nic_i) \ -+ for ((_nic_i) = 0; (_nic_i) < EFHW_MAX_NR_DEVS; ++(_nic_i)) \ -+ if (efrm_nic_set_read((_set), (_nic_i))) -+ -+#endif /* __CI_EFRM_NIC_SET_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,98 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides public API for NIC table. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_NIC_TABLE_H__ -+#define __CI_EFRM_NIC_TABLE_H__ -+ -+#include -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * struct efrm_nic_table - top level driver object keeping all NICs - -+ * implemented in driver_object.c -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Comment? */ -+struct efrm_nic_table { -+ /*! nics attached to this driver */ -+ struct efhw_nic *nic[EFHW_MAX_NR_DEVS]; -+ /*! pointer to an arbitrary struct efhw_nic if one exists; -+ * for code which does not care which NIC it wants but -+ * still needs one. Note you cannot assume nic[0] exists. */ -+ struct efhw_nic *a_nic; -+ uint32_t nic_count; /*!< number of nics attached to this driver */ -+ spinlock_t lock; /*!< lock for table modifications */ -+ atomic_t ref_count; /*!< refcount for users of nic table */ -+}; -+ -+/* Resource driver structures used by other drivers as well */ -+extern struct efrm_nic_table *efrm_nic_tablep; -+ -+static inline void efrm_nic_table_hold(void) -+{ -+ atomic_inc(&efrm_nic_tablep->ref_count); -+} -+ -+static inline void efrm_nic_table_rele(void) -+{ -+ atomic_dec(&efrm_nic_tablep->ref_count); -+} -+ -+static inline int efrm_nic_table_held(void) -+{ -+ return atomic_read(&efrm_nic_tablep->ref_count) != 0; -+} -+ -+/* Run code block _x multiple times with variable nic set to each -+ * registered NIC in turn. -+ * DO NOT "break" out of this loop early. */ -+#define EFRM_FOR_EACH_NIC(_nic_i, _nic) \ -+ for ((_nic_i) = (efrm_nic_table_hold(), 0); \ -+ (_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \ -+ (_nic_i)++) \ -+ if (((_nic) = efrm_nic_tablep->nic[_nic_i])) -+ -+#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \ -+ for ((_i) = (efrm_nic_table_hold(), 0); \ -+ (_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \ -+ ++(_i)) \ -+ if (((_nic) = efrm_nic_tablep->nic[_i]) && \ -+ efrm_nic_set_read((_set), (_i))) -+ -+#endif /* __CI_EFRM_NIC_TABLE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/private.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,118 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides private API of efrm library -- resource handling. -+ * This API is not designed for use outside of SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_PRIVATE_H__ -+#define __CI_EFRM_PRIVATE_H__ -+ -+#include -+#include -+#include -+#include -+ -+/*-------------------------------------------------------------------- -+ * -+ * create resource managers -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Create a resource manager for various types of resources -+ */ -+extern int -+efrm_create_iobufset_resource_manager(struct efrm_resource_manager **out); -+ -+extern int -+efrm_create_filter_resource_manager(struct efrm_resource_manager **out); -+ -+extern int -+efrm_create_vi_resource_manager(struct efrm_resource_manager **out, -+ const struct vi_resource_dimensions *); -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * Instance pool management -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Allocate instance pool. Use kfifo_vfree to destroy it. */ -+static inline int -+efrm_kfifo_id_ctor(struct kfifo **ids_out, -+ unsigned int base, unsigned int limit, spinlock_t *lock) -+{ -+ unsigned int i; -+ struct kfifo *ids; -+ unsigned char *buffer; -+ unsigned int size = roundup_pow_of_two((limit - base) * sizeof(int)); -+ EFRM_ASSERT(base <= limit); -+ buffer = vmalloc(size); -+ ids = kfifo_init(buffer, size, GFP_KERNEL, lock); -+ if (IS_ERR(ids)) -+ return PTR_ERR(ids); -+ for (i = base; i < limit; i++) -+ EFRM_VERIFY_EQ(__kfifo_put(ids, (unsigned char *)&i, -+ sizeof(i)), sizeof(i)); -+ -+ *ids_out = ids; -+ return 0; -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * Various private functions -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Initialize the fields in the provided resource manager memory area -+ * \param rm The area of memory to be initialized -+ * \param dtor A method to destroy the resource manager -+ * \param name A Textual name for the resource manager -+ * \param type The type of resource managed -+ * \param initial_table_size Initial size of the ID table -+ * \param auto_destroy Destroy resource manager on driver onload iff true -+ * -+ * A default table size is provided if the value 0 is provided. -+ */ -+extern int -+efrm_resource_manager_ctor(struct efrm_resource_manager *rm, -+ void (*dtor)(struct efrm_resource_manager *), -+ const char *name, unsigned type); -+ -+extern void efrm_resource_manager_dtor(struct efrm_resource_manager *rm); -+ -+ -+#endif /* __CI_EFRM_PRIVATE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/resource.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,119 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides public interface of efrm library -- resource handling. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_RESOURCE_H__ -+#define __CI_EFRM_RESOURCE_H__ -+ -+/*-------------------------------------------------------------------- -+ * -+ * headers for type dependencies -+ * -+ *--------------------------------------------------------------------*/ -+ -+#include -+#include -+#include -+#include -+ -+#ifndef __ci_driver__ -+#error "Driver-only file" -+#endif -+ -+/*-------------------------------------------------------------------- -+ * -+ * struct efrm_resource - represents an allocated resource -+ * (eg. pinned pages of memory, or resource on a NIC) -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Representation of an allocated resource */ -+struct efrm_resource { -+ int rs_ref_count; -+ efrm_resource_handle_t rs_handle; -+ struct efrm_client *rs_client; -+ struct list_head rs_client_link; -+ struct list_head rs_manager_link; -+}; -+ -+/*-------------------------------------------------------------------- -+ * -+ * managed resource abstraction -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Factory for resources of a specific type */ -+struct efrm_resource_manager { -+ const char *rm_name; /*!< human readable only */ -+ spinlock_t rm_lock; -+#ifndef NDEBUG -+ unsigned rm_type; -+#endif -+ int rm_resources; -+ int rm_resources_hiwat; -+ struct list_head rm_resources_list; -+ /** -+ * Destructor for the resource manager. Other resource managers -+ * might be already dead, although the system guarantees that -+ * managers are destructed in the order by which they were created -+ */ -+ void (*rm_dtor)(struct efrm_resource_manager *); -+}; -+ -+#ifdef NDEBUG -+# define EFRM_RESOURCE_ASSERT_VALID(rs, rc_mbz) -+# define EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm) -+#else -+/*! Check validity of resource and report on failure */ -+extern void efrm_resource_assert_valid(struct efrm_resource *, -+ int rc_may_be_zero, -+ const char *file, int line); -+# define EFRM_RESOURCE_ASSERT_VALID(rs, rc_mbz) \ -+ efrm_resource_assert_valid((rs), (rc_mbz), __FILE__, __LINE__) -+ -+/*! Check validity of resource manager and report on failure */ -+extern void efrm_resource_manager_assert_valid(struct efrm_resource_manager *, -+ const char *file, int line); -+# define EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm) \ -+ efrm_resource_manager_assert_valid((rm), __FILE__, __LINE__) -+#endif -+ -+ -+extern void efrm_resource_ref(struct efrm_resource *rs); -+extern int __efrm_resource_release(struct efrm_resource *); -+ -+ -+#endif /* __CI_EFRM_RESOURCE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,104 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides public type and definitions resource handle, and the -+ * definitions of resource types. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_EFRM_RESOURCE_ID_H__ -+#define __CI_DRIVER_EFRM_RESOURCE_ID_H__ -+ -+/*********************************************************************** -+ * Resource handles -+ * -+ * Resource handles are intended for identifying resources at kernel -+ * level, within the context of a particular NIC. particularly because -+ * for some resource types, the low 16 bites correspond to hardware -+ * IDs. They were historically also used at user level, with a nonce -+ * stored in the bits 16 to 27 (inclusive), but that approach is -+ * deprecated (but sill alive!). -+ * -+ * The handle value 0 is used to mean "no resource". -+ * Identify resources within the context of a file descriptor at user -+ * level. -+ ***********************************************************************/ -+ -+typedef struct { -+ uint32_t handle; -+} efrm_resource_handle_t; -+ -+/* You may think these following functions should all have -+ * _HANDLE_ in their names, but really we are providing an abstract set -+ * of methods on a (hypothetical) efrm_resource_t object, with -+ * efrm_resource_handle_t being just the reference one holds to access -+ * the object (aka "this" or "self"). -+ */ -+ -+/* Below I use inline instead of macros where possible in order to get -+ * more type checking help from the compiler; hopefully we'll never -+ * have to rewrite these to use #define as we've found some horrible -+ * compiler on which we cannot make static inline do the Right Thing (tm). -+ * -+ * For consistency and to avoid pointless change I spell these -+ * routines as macro names (CAPTILIZE_UNDERSCORED), which also serves -+ * to remind people they are compact and inlined. -+ */ -+ -+#define EFRM_RESOURCE_FMT "[rs:%08x]" -+ -+static inline unsigned EFRM_RESOURCE_PRI_ARG(efrm_resource_handle_t h) -+{ -+ return h.handle; -+} -+ -+static inline unsigned EFRM_RESOURCE_INSTANCE(efrm_resource_handle_t h) -+{ -+ return h.handle & 0x0000ffff; -+} -+ -+static inline unsigned EFRM_RESOURCE_TYPE(efrm_resource_handle_t h) -+{ -+ return (h.handle & 0xf0000000) >> 28; -+} -+ -+/*********************************************************************** -+ * Resource type codes -+ ***********************************************************************/ -+ -+#define EFRM_RESOURCE_IOBUFSET 0x0 -+#define EFRM_RESOURCE_VI 0x1 -+#define EFRM_RESOURCE_FILTER 0x2 -+#define EFRM_RESOURCE_NUM 0x3 /* This isn't a resource! */ -+ -+#define EFRM_RESOURCE_NAME(type) \ -+ ((type) == EFRM_RESOURCE_IOBUFSET? "IOBUFSET" : \ -+ (type) == EFRM_RESOURCE_VI? "VI" : \ -+ (type) == EFRM_RESOURCE_FILTER? "FILTER" : \ -+ "") -+ -+#endif /* __CI_DRIVER_EFRM_RESOURCE_ID_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,46 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides Linux-like system-independent API for efrm library. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_SYSDEP_H__ -+#define __CI_EFRM_SYSDEP_H__ -+ -+/* Spinlocks are defined in efhw/sysdep.h */ -+#include -+ -+#include -+ -+#endif /* __CI_EFRM_SYSDEP_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,93 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides version-independent Linux kernel API for efrm library. -+ * Only kernels >=2.6.9 are supported. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Kfifo API is partially stolen from linux-2.6.22/include/linux/list.h -+ * Copyright (C) 2004 Stelian Pop -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_SYSDEP_LINUX_H__ -+#define __CI_EFRM_SYSDEP_LINUX_H__ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+/******************************************************************** -+ * -+ * List API -+ * -+ ********************************************************************/ -+ -+static inline struct list_head *list_pop(struct list_head *list) -+{ -+ struct list_head *link = list->next; -+ list_del(link); -+ return link; -+} -+ -+static inline struct list_head *list_pop_tail(struct list_head *list) -+{ -+ struct list_head *link = list->prev; -+ list_del(link); -+ return link; -+} -+ -+/******************************************************************** -+ * -+ * Kfifo API -+ * -+ ********************************************************************/ -+ -+static inline void kfifo_vfree(struct kfifo *fifo) -+{ -+ vfree(fifo->buffer); -+ kfree(fifo); -+} -+ -+#endif /* __CI_EFRM_SYSDEP_LINUX_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,157 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains public API for VI resource. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_VI_RESOURCE_H__ -+#define __CI_EFRM_VI_RESOURCE_H__ -+ -+#include -+#include -+#include -+ -+struct vi_resource; -+ -+/* Make these inline instead of macros for type checking */ -+static inline struct vi_resource * -+efrm_to_vi_resource(struct efrm_resource *rs) -+{ -+ EFRM_ASSERT(EFRM_RESOURCE_TYPE(rs->rs_handle) == EFRM_RESOURCE_VI); -+ return (struct vi_resource *) rs; -+} -+static inline struct -+efrm_resource *efrm_from_vi_resource(struct vi_resource *rs) -+{ -+ return (struct efrm_resource *)rs; -+} -+ -+#define EFAB_VI_RESOURCE_INSTANCE(virs) \ -+ EFRM_RESOURCE_INSTANCE(efrm_from_vi_resource(virs)->rs_handle) -+ -+#define EFAB_VI_RESOURCE_PRI_ARG(virs) \ -+ EFRM_RESOURCE_PRI_ARG(efrm_from_vi_resource(virs)->rs_handle) -+ -+extern int -+efrm_vi_resource_alloc(struct efrm_client *client, -+ struct vi_resource *evq_virs, -+ uint16_t vi_flags, int32_t evq_capacity, -+ int32_t txq_capacity, int32_t rxq_capacity, -+ uint8_t tx_q_tag, uint8_t rx_q_tag, -+ struct vi_resource **virs_in_out, -+ uint32_t *out_io_mmap_bytes, -+ uint32_t *out_mem_mmap_bytes, -+ uint32_t *out_txq_capacity, -+ uint32_t *out_rxq_capacity); -+ -+extern void efrm_vi_resource_free(struct vi_resource *); -+extern void efrm_vi_resource_release(struct vi_resource *); -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * eventq handling -+ * -+ *--------------------------------------------------------------------*/ -+ -+/*! Reset an event queue and clear any associated timers */ -+extern void efrm_eventq_reset(struct vi_resource *virs); -+ -+/*! Register a kernel-level handler for the event queue. This function is -+ * called whenever a timer expires, or whenever the event queue is woken -+ * but no thread is blocked on it. -+ * -+ * This function returns -EBUSY if a callback is already installed. -+ * -+ * \param rs Event-queue resource -+ * \param handler Callback-handler -+ * \param arg Argument to pass to callback-handler -+ * \return Status code -+ */ -+extern int -+efrm_eventq_register_callback(struct vi_resource *rs, -+ void (*handler)(void *arg, int is_timeout, -+ struct efhw_nic *nic), -+ void *arg); -+ -+/*! Kill the kernel-level callback. -+ * -+ * This function stops the timer from running and unregisters the callback -+ * function. It waits for any running timeout handlers to complete before -+ * returning. -+ * -+ * \param rs Event-queue resource -+ * \return Nothing -+ */ -+extern void efrm_eventq_kill_callback(struct vi_resource *rs); -+ -+/*! Ask the NIC to generate a wakeup when an event is next delivered. */ -+extern void efrm_eventq_request_wakeup(struct vi_resource *rs, -+ unsigned current_ptr); -+ -+/*! Register a kernel-level handler for flush completions. -+ * \TODO Currently, it is unsafe to install a callback more than once. -+ * -+ * \param rs VI resource being flushed. -+ * \param handler Callback handler function. -+ * \param arg Argument to be passed to handler. -+ */ -+extern void -+efrm_vi_register_flush_callback(struct vi_resource *rs, -+ void (*handler)(void *), -+ void *arg); -+ -+int efrm_vi_resource_flush_retry(struct vi_resource *virs); -+ -+/*! Comment? */ -+extern int efrm_pt_flush(struct vi_resource *); -+ -+/*! Comment? */ -+extern int efrm_pt_pace(struct vi_resource *, unsigned int val); -+ -+uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs -+ /*,struct efhw_nic *nic */); -+uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs -+ /*,struct efhw_nic *nic */); -+uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs -+ /*,struct efhw_nic *nic */); -+ -+ -+/* Fill [out_vi_data] with information required to allow a VI to be init'd. -+ * [out_vi_data] must ref at least VI_MAPPINGS_SIZE bytes. -+ */ -+extern void efrm_vi_resource_mappings(struct vi_resource *, void *out_vi_data); -+ -+ -+#endif /* __CI_EFRM_VI_RESOURCE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,155 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains type definitions for VI resource. These types -+ * may be used outside of the SFC resource driver, but such use is not -+ * recommended. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__ -+#define __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__ -+ -+#include -+#include -+ -+ -+#define EFRM_VI_RM_DMA_QUEUE_COUNT 2 -+#define EFRM_VI_RM_DMA_QUEUE_TX 0 -+#define EFRM_VI_RM_DMA_QUEUE_RX 1 -+ -+/** Numbers of bits which can be set in the evq_state member of -+ * vi_resource_evq_info. */ -+enum { -+ /** This bit is set if a wakeup has been requested on the NIC. */ -+ VI_RESOURCE_EVQ_STATE_WAKEUP_PENDING, -+ /** This bit is set if the wakeup is valid for the sleeping -+ * process. */ -+ VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, -+ /** This bit is set if a wakeup or timeout event is currently being -+ * processed. */ -+ VI_RESOURCE_EVQ_STATE_BUSY, -+}; -+#define VI_RESOURCE_EVQ_STATE(X) \ -+ (((int32_t)1) << (VI_RESOURCE_EVQ_STATE_##X)) -+ -+ -+/*! Global information for the VI resource manager. */ -+struct vi_resource_manager { -+ struct efrm_resource_manager rm; -+ -+ struct kfifo *instances_with_timer; -+ int with_timer_base; -+ int with_timer_limit; -+ struct kfifo *instances_with_interrupt; -+ int with_interrupt_base; -+ int with_interrupt_limit; -+ -+ bool iscsi_dmaq_instance_is_free; -+ -+ /* We keep VI resources which need flushing on these lists. The VI -+ * is put on the outstanding list when the flush request is issued -+ * to the hardware and removed when the flush event arrives. The -+ * hardware can only handle a limited number of RX flush requests at -+ * once, so VIs are placed in the waiting list until the flush can -+ * be issued. Flushes can be requested by the client or internally -+ * by the VI resource manager. In the former case, the reference -+ * count must be non-zero for the duration of the flush and in the -+ * later case, the reference count must be zero. */ -+ struct list_head rx_flush_waiting_list; -+ struct list_head rx_flush_outstanding_list; -+ struct list_head tx_flush_outstanding_list; -+ int rx_flush_outstanding_count; -+ -+ /* once the flush has happened we push the close into the work queue -+ * so its OK on Windows to free the resources (Bug 3469). Resources -+ * on this list have zero reference count. -+ */ -+ struct list_head close_pending; -+ struct work_struct work_item; -+ struct workqueue_struct *workqueue; -+}; -+ -+struct vi_resource_nic_info { -+ struct eventq_resource_hardware evq_pages; -+ struct efhw_iopages dmaq_pages[EFRM_VI_RM_DMA_QUEUE_COUNT]; -+}; -+ -+struct vi_resource { -+ /* Some macros make the assumption that the struct efrm_resource is -+ * the first member of a struct vi_resource. */ -+ struct efrm_resource rs; -+ atomic_t evq_refs; /*!< Number of users of the event queue. */ -+ -+ uint32_t bar_mmap_bytes; -+ uint32_t mem_mmap_bytes; -+ -+ int32_t evq_capacity; -+ int32_t dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_COUNT]; -+ -+ uint8_t dmaq_tag[EFRM_VI_RM_DMA_QUEUE_COUNT]; -+ uint16_t flags; -+ -+ /* we keep PT endpoints that have been destroyed on a list -+ * until we have seen their TX and RX DMAQs flush complete -+ * (see Bug 1217) -+ */ -+ struct list_head rx_flush_link; -+ struct list_head tx_flush_link; -+ int rx_flushing; -+ int rx_flush_outstanding; -+ int tx_flushing; -+ uint64_t flush_time; -+ int flush_count; -+ -+ void (*flush_callback_fn)(void *); -+ void *flush_callback_arg; -+ -+ void (*evq_callback_fn) (void *arg, int is_timeout, -+ struct efhw_nic *nic); -+ void *evq_callback_arg; -+ -+ struct vi_resource *evq_virs; /*!< EVQ for DMA queues */ -+ -+ struct efhw_buffer_table_allocation -+ dmaq_buf_tbl_alloc[EFRM_VI_RM_DMA_QUEUE_COUNT]; -+ -+ struct vi_resource_nic_info nic_info; -+}; -+ -+#undef vi_resource -+#define vi_resource(rs1) container_of((rs1), struct vi_resource, rs) -+ -+static inline dma_addr_t -+efrm_eventq_dma_addr(struct vi_resource *virs) -+{ -+ struct eventq_resource_hardware *hw; -+ hw = &virs->nic_info.evq_pages; -+ return efhw_iopages_dma_addr(&hw->iobuff) + hw->iobuff_off; -+} -+ -+#endif /* __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,65 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains private API for VI resource. The API is not designed -+ * to be used outside of the SFC resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __CI_EFRM_VI_RESOURCE_PRIVATE_H__ -+#define __CI_EFRM_VI_RESOURCE_PRIVATE_H__ -+ -+#include -+#include -+ -+extern struct vi_resource_manager *efrm_vi_manager; -+ -+/*************************************************************************/ -+ -+extern void efrm_vi_rm_delayed_free(struct work_struct *data); -+ -+extern void efrm_vi_rm_salvage_flushed_vis(void); -+ -+void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs); -+ -+void efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_index, -+ struct efhw_nic *nic); -+ -+/*! Wakeup handler */ -+extern void efrm_handle_wakeup_event(struct efhw_nic *nic, unsigned id); -+ -+/*! Timeout handler */ -+extern void efrm_handle_timeout_event(struct efhw_nic *nic, unsigned id); -+ -+/*! DMA flush handler */ -+extern void efrm_handle_dmaq_flushed(struct efhw_nic *nic, unsigned id, -+ int rx_flush); -+ -+/*! SRAM update handler */ -+extern void efrm_handle_sram_event(struct efhw_nic *nic); -+ -+#endif /* __CI_EFRM_VI_RESOURCE_PRIVATE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/driver_object.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,328 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains support for the global driver variables. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+/* We use #define rather than static inline here so that the Windows -+ * "prefast" compiler can see its own locking primitive when these -+ * two function are used (and then perform extra checking where they -+ * are used) -+ * -+ * Both macros operate on an irq_flags_t -+*/ -+ -+#define efrm_driver_lock(irqlock_state) \ -+ spin_lock_irqsave(&efrm_nic_tablep->lock, irqlock_state) -+ -+#define efrm_driver_unlock(irqlock_state) \ -+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, \ -+ irqlock_state); -+ -+/* These routines are all methods on the architecturally singleton -+ global variables: efrm_nic_table, efrm_rm_table. -+ -+ I hope we never find a driver model that does not allow global -+ structure variables :) (but that would break almost every driver I've -+ ever seen). -+*/ -+ -+/*! Exported driver state */ -+static struct efrm_nic_table efrm_nic_table; -+struct efrm_nic_table *efrm_nic_tablep; -+EXPORT_SYMBOL(efrm_nic_tablep); -+ -+ -+/* Internal table with resource managers. -+ * We'd like to not export it, but we are still using efrm_rm_table -+ * in the char driver. So, it is declared in the private header with -+ * a purpose. */ -+struct efrm_resource_manager *efrm_rm_table[EFRM_RESOURCE_NUM]; -+EXPORT_SYMBOL(efrm_rm_table); -+ -+ -+/* List of registered nics. */ -+static LIST_HEAD(efrm_nics); -+ -+ -+void efrm_driver_ctor(void) -+{ -+ efrm_nic_tablep = &efrm_nic_table; -+ spin_lock_init(&efrm_nic_tablep->lock); -+ EFRM_TRACE("%s: driver created", __func__); -+} -+ -+void efrm_driver_dtor(void) -+{ -+ EFRM_ASSERT(!efrm_nic_table_held()); -+ -+ spin_lock_destroy(&efrm_nic_tablep->lock); -+ memset(&efrm_nic_table, 0, sizeof(efrm_nic_table)); -+ memset(&efrm_rm_table, 0, sizeof(efrm_rm_table)); -+ EFRM_TRACE("%s: driver deleted", __func__); -+} -+ -+int efrm_driver_register_nic(struct efrm_nic *rnic, int nic_index, -+ int ifindex) -+{ -+ struct efhw_nic *nic = &rnic->efhw_nic; -+ struct efrm_nic_per_vi *vis; -+ int max_vis, rc = 0; -+ irq_flags_t lock_flags; -+ -+ EFRM_ASSERT(nic_index >= 0); -+ EFRM_ASSERT(ifindex >= 0); -+ -+ max_vis = 4096; /* TODO: Get runtime value. */ -+ vis = vmalloc(max_vis * sizeof(rnic->vis[0])); -+ if (vis == NULL) { -+ EFRM_ERR("%s: Out of memory", __func__); -+ return -ENOMEM; -+ } -+ -+ efrm_driver_lock(lock_flags); -+ -+ if (efrm_nic_table_held()) { -+ EFRM_ERR("%s: driver object is in use", __func__); -+ rc = -EBUSY; -+ goto done; -+ } -+ -+ if (efrm_nic_tablep->nic_count == EFHW_MAX_NR_DEVS) { -+ EFRM_ERR("%s: filled up NIC table size %d", __func__, -+ EFHW_MAX_NR_DEVS); -+ rc = -E2BIG; -+ goto done; -+ } -+ -+ rnic->vis = vis; -+ -+ EFRM_ASSERT(efrm_nic_tablep->nic[nic_index] == NULL); -+ efrm_nic_tablep->nic[nic_index] = nic; -+ nic->index = nic_index; -+ nic->ifindex = ifindex; -+ -+ if (efrm_nic_tablep->a_nic == NULL) -+ efrm_nic_tablep->a_nic = nic; -+ -+ efrm_nic_tablep->nic_count++; -+ -+ INIT_LIST_HEAD(&rnic->clients); -+ list_add(&rnic->link, &efrm_nics); -+ -+ efrm_driver_unlock(lock_flags); -+ return 0; -+ -+done: -+ efrm_driver_unlock(lock_flags); -+ vfree(vis); -+ return rc; -+} -+ -+int efrm_driver_unregister_nic(struct efrm_nic *rnic) -+{ -+ struct efhw_nic *nic = &rnic->efhw_nic; -+ int rc = 0; -+ int nic_index = nic->index; -+ irq_flags_t lock_flags; -+ -+ EFRM_ASSERT(nic_index >= 0); -+ -+ efrm_driver_lock(lock_flags); -+ -+ if (efrm_nic_table_held()) { -+ EFRM_ERR("%s: driver object is in use", __func__); -+ rc = -EBUSY; -+ goto done; -+ } -+ if (!list_empty(&rnic->clients)) { -+ EFRM_ERR("%s: nic has active clients", __func__); -+ rc = -EBUSY; -+ goto done; -+ } -+ -+ EFRM_ASSERT(efrm_nic_tablep->nic[nic_index] == nic); -+ EFRM_ASSERT(list_empty(&rnic->clients)); -+ -+ list_del(&rnic->link); -+ -+ nic->index = -1; -+ efrm_nic_tablep->nic[nic_index] = NULL; -+ -+ --efrm_nic_tablep->nic_count; -+ -+ if (efrm_nic_tablep->a_nic == nic) { -+ if (efrm_nic_tablep->nic_count == 0) { -+ efrm_nic_tablep->a_nic = NULL; -+ } else { -+ for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS; -+ nic_index++) { -+ if (efrm_nic_tablep->nic[nic_index] != NULL) -+ efrm_nic_tablep->a_nic = -+ efrm_nic_tablep->nic[nic_index]; -+ } -+ EFRM_ASSERT(efrm_nic_tablep->a_nic); -+ } -+ } -+ -+done: -+ efrm_driver_unlock(lock_flags); -+ return rc; -+} -+ -+ -+int efrm_nic_pre_reset(struct efhw_nic *nic) -+{ -+ struct efrm_nic *rnic = efrm_nic(nic); -+ struct efrm_client *client; -+ struct efrm_resource *rs; -+ struct list_head *client_link; -+ struct list_head *rs_link; -+ irq_flags_t lock_flags; -+ -+ spin_lock_irqsave(&efrm_nic_tablep->lock, lock_flags); -+ list_for_each(client_link, &rnic->clients) { -+ client = container_of(client_link, struct efrm_client, link); -+ EFRM_ERR("%s: client %p", __func__, client); -+ if (client->callbacks->pre_reset) -+ client->callbacks->pre_reset(client, client->user_data); -+ list_for_each(rs_link, &client->resources) { -+ rs = container_of(rs_link, struct efrm_resource, -+ rs_client_link); -+ EFRM_ERR("%s: resource %p", __func__, rs); -+ /* TODO: mark rs defunct */ -+ } -+ } -+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, lock_flags); -+ -+ return 0; -+} -+ -+ -+int efrm_nic_stop(struct efhw_nic *nic) -+{ -+ /* TODO */ -+ return 0; -+} -+ -+ -+int efrm_nic_resume(struct efhw_nic *nic) -+{ -+ /* TODO */ -+ return 0; -+} -+ -+ -+static void efrm_client_nullcb(struct efrm_client *client, void *user_data) -+{ -+} -+ -+static struct efrm_client_callbacks efrm_null_callbacks = { -+ efrm_client_nullcb, -+ efrm_client_nullcb, -+ efrm_client_nullcb -+}; -+ -+ -+int efrm_client_get(int ifindex, struct efrm_client_callbacks *callbacks, -+ void *user_data, struct efrm_client **client_out) -+{ -+ struct efrm_nic *n, *rnic = NULL; -+ irq_flags_t lock_flags; -+ struct list_head *link; -+ struct efrm_client *client; -+ -+ if (callbacks == NULL) -+ callbacks = &efrm_null_callbacks; -+ -+ client = kmalloc(sizeof(*client), GFP_KERNEL); -+ if (client == NULL) -+ return -ENOMEM; -+ -+ spin_lock_irqsave(&efrm_nic_tablep->lock, lock_flags); -+ list_for_each(link, &efrm_nics) { -+ n = container_of(link, struct efrm_nic, link); -+ if (n->efhw_nic.ifindex == ifindex || ifindex < 0) { -+ rnic = n; -+ break; -+ } -+ } -+ if (rnic) { -+ client->user_data = user_data; -+ client->callbacks = callbacks; -+ client->nic = &rnic->efhw_nic; -+ client->ref_count = 1; -+ INIT_LIST_HEAD(&client->resources); -+ list_add(&client->link, &rnic->clients); -+ } -+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, lock_flags); -+ -+ if (rnic == NULL) -+ return -ENODEV; -+ -+ *client_out = client; -+ return 0; -+} -+EXPORT_SYMBOL(efrm_client_get); -+ -+ -+void efrm_client_put(struct efrm_client *client) -+{ -+ irq_flags_t lock_flags; -+ -+ EFRM_ASSERT(client->ref_count > 0); -+ -+ spin_lock_irqsave(&efrm_nic_tablep->lock, lock_flags); -+ if (--client->ref_count > 0) -+ client = NULL; -+ else -+ list_del(&client->link); -+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, lock_flags); -+ kfree(client); -+} -+EXPORT_SYMBOL(efrm_client_put); -+ -+ -+struct efhw_nic *efrm_client_get_nic(struct efrm_client *client) -+{ -+ return client->nic; -+} -+EXPORT_SYMBOL(efrm_client_get_nic); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/driverlink_new.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,260 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains driverlink code which interacts with the sfc network -+ * driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include "linux_resource_internal.h" -+#include "driverlink_api.h" -+#include "kernel_compat.h" -+#include -+ -+#include -+#include -+#include -+ -+/* The DL driver and associated calls */ -+static int efrm_dl_probe(struct efx_dl_device *efrm_dev, -+ const struct net_device *net_dev, -+ const struct efx_dl_device_info *dev_info, -+ const char *silicon_rev); -+ -+static void efrm_dl_remove(struct efx_dl_device *efrm_dev); -+ -+static void efrm_dl_reset_suspend(struct efx_dl_device *efrm_dev); -+ -+static void efrm_dl_reset_resume(struct efx_dl_device *efrm_dev, int ok); -+ -+static void efrm_dl_mtu_changed(struct efx_dl_device *, int); -+static void efrm_dl_event_falcon(struct efx_dl_device *efx_dev, void *p_event); -+ -+static struct efx_dl_driver efrm_dl_driver = { -+ .name = "resource", -+ .probe = efrm_dl_probe, -+ .remove = efrm_dl_remove, -+ .reset_suspend = efrm_dl_reset_suspend, -+ .reset_resume = efrm_dl_reset_resume -+}; -+ -+static void -+init_vi_resource_dimensions(struct vi_resource_dimensions *rd, -+ const struct efx_dl_falcon_resources *res) -+{ -+ rd->evq_timer_min = res->evq_timer_min; -+ rd->evq_timer_lim = res->evq_timer_lim; -+ rd->evq_int_min = res->evq_int_min; -+ rd->evq_int_lim = res->evq_int_lim; -+ rd->rxq_min = res->rxq_min; -+ rd->rxq_lim = res->rxq_lim; -+ rd->txq_min = res->txq_min; -+ rd->txq_lim = res->txq_lim; -+ EFRM_TRACE -+ ("Using evq_int(%d-%d) evq_timer(%d-%d) RXQ(%d-%d) TXQ(%d-%d)", -+ res->evq_int_min, res->evq_int_lim, res->evq_timer_min, -+ res->evq_timer_lim, res->rxq_min, res->rxq_lim, res->txq_min, -+ res->txq_lim); -+} -+ -+static int -+efrm_dl_probe(struct efx_dl_device *efrm_dev, -+ const struct net_device *net_dev, -+ const struct efx_dl_device_info *dev_info, -+ const char *silicon_rev) -+{ -+ struct vi_resource_dimensions res_dim; -+ struct efx_dl_falcon_resources *res; -+ struct linux_efhw_nic *lnic; -+ struct pci_dev *dev; -+ struct efhw_nic *nic; -+ unsigned probe_flags = 0; -+ int non_irq_evq; -+ int rc; -+ -+ efrm_dev->priv = NULL; -+ -+ efx_dl_search_device_info(dev_info, EFX_DL_FALCON_RESOURCES, -+ struct efx_dl_falcon_resources, -+ hdr, res); -+ -+ if (res == NULL) { -+ EFRM_ERR("%s: Unable to find falcon driverlink resources", -+ __func__); -+ return -EINVAL; -+ } -+ -+ if (res->flags & EFX_DL_FALCON_USE_MSI) -+ probe_flags |= NIC_FLAG_TRY_MSI; -+ -+ dev = efrm_dev->pci_dev; -+ if (res->flags & EFX_DL_FALCON_DUAL_FUNC) { -+ unsigned vendor = dev->vendor; -+ EFRM_ASSERT(dev->bus != NULL); -+ dev = NULL; -+ -+ while ((dev = pci_get_device(vendor, FALCON_S_DEVID, dev)) -+ != NULL) { -+ EFRM_ASSERT(dev->bus != NULL); -+ /* With PCIe (since it's point to point) -+ * the slot ID is usually 0 and -+ * the bus ID changes NIC to NIC, so we really -+ * need to check both. */ -+ if (PCI_SLOT(dev->devfn) == -+ PCI_SLOT(efrm_dev->pci_dev->devfn) -+ && dev->bus->number == -+ efrm_dev->pci_dev->bus->number) -+ break; -+ } -+ if (dev == NULL) { -+ EFRM_ERR("%s: Unable to find falcon secondary " -+ "PCI device.", __func__); -+ return -ENODEV; -+ } -+ pci_dev_put(dev); -+ } -+ -+ init_vi_resource_dimensions(&res_dim, res); -+ -+ EFRM_ASSERT(res_dim.evq_timer_lim > res_dim.evq_timer_min); -+ res_dim.evq_timer_lim--; -+ non_irq_evq = res_dim.evq_timer_lim; -+ -+ rc = efrm_nic_add(dev, probe_flags, net_dev->dev_addr, &lnic, -+ res->biu_lock, -+ res->buffer_table_min, res->buffer_table_lim, -+ non_irq_evq, &res_dim); -+ if (rc != 0) -+ return rc; -+ -+ nic = &lnic->efrm_nic.efhw_nic; -+ nic->mtu = net_dev->mtu + ETH_HLEN; -+ nic->net_driver_dev = efrm_dev; -+ nic->ifindex = net_dev->ifindex; -+#ifdef CONFIG_NET_NS -+ nic->nd_net = net_dev->nd_net; -+#endif -+ efrm_dev->priv = nic; -+ -+ /* Register a callback so we're told when MTU changes. -+ * We dynamically allocate efx_dl_callbacks, because -+ * the callbacks that we want depends on the NIC type. -+ */ -+ lnic->dl_callbacks = -+ kmalloc(sizeof(struct efx_dl_callbacks), GFP_KERNEL); -+ if (!lnic->dl_callbacks) { -+ EFRM_ERR("Out of memory (%s)", __func__); -+ efrm_nic_del(lnic); -+ return -ENOMEM; -+ } -+ memset(lnic->dl_callbacks, 0, sizeof(*lnic->dl_callbacks)); -+ lnic->dl_callbacks->mtu_changed = efrm_dl_mtu_changed; -+ -+ if ((res->flags & EFX_DL_FALCON_DUAL_FUNC) == 0) { -+ /* Net driver receives all management events. -+ * Register a callback to receive the ones -+ * we're interested in. */ -+ lnic->dl_callbacks->event = efrm_dl_event_falcon; -+ } -+ -+ rc = efx_dl_register_callbacks(efrm_dev, lnic->dl_callbacks); -+ if (rc < 0) { -+ EFRM_ERR("%s: efx_dl_register_callbacks failed (%d)", -+ __func__, rc); -+ kfree(lnic->dl_callbacks); -+ efrm_nic_del(lnic); -+ return rc; -+ } -+ -+ return 0; -+} -+ -+/* When we unregister ourselves on module removal, this function will be -+ * called for all the devices we claimed */ -+static void efrm_dl_remove(struct efx_dl_device *efrm_dev) -+{ -+ struct efhw_nic *nic = efrm_dev->priv; -+ struct linux_efhw_nic *lnic = linux_efhw_nic(nic); -+ EFRM_TRACE("%s called", __func__); -+ if (lnic->dl_callbacks) { -+ efx_dl_unregister_callbacks(efrm_dev, lnic->dl_callbacks); -+ kfree(lnic->dl_callbacks); -+ } -+ if (efrm_dev->priv) -+ efrm_nic_del(lnic); -+ EFRM_TRACE("%s OK", __func__); -+} -+ -+static void efrm_dl_reset_suspend(struct efx_dl_device *efrm_dev) -+{ -+ EFRM_NOTICE("%s:", __func__); -+} -+ -+static void efrm_dl_reset_resume(struct efx_dl_device *efrm_dev, int ok) -+{ -+ EFRM_NOTICE("%s: ok=%d", __func__, ok); -+} -+ -+int efrm_driverlink_register(void) -+{ -+ EFRM_TRACE("%s:", __func__); -+ return efx_dl_register_driver(&efrm_dl_driver); -+} -+ -+void efrm_driverlink_unregister(void) -+{ -+ EFRM_TRACE("%s:", __func__); -+ efx_dl_unregister_driver(&efrm_dl_driver); -+} -+ -+static void efrm_dl_mtu_changed(struct efx_dl_device *efx_dev, int mtu) -+{ -+ struct efhw_nic *nic = efx_dev->priv; -+ -+ ASSERT_RTNL(); /* Since we're looking at efx_dl_device::port_net_dev */ -+ -+ EFRM_TRACE("%s: old=%d new=%d", __func__, nic->mtu, mtu + ETH_HLEN); -+ /* If this happened we must have agreed to it above */ -+ nic->mtu = mtu + ETH_HLEN; -+} -+ -+static void efrm_dl_event_falcon(struct efx_dl_device *efx_dev, void *p_event) -+{ -+ struct efhw_nic *nic = efx_dev->priv; -+ struct linux_efhw_nic *lnic = linux_efhw_nic(nic); -+ efhw_event_t *ev = p_event; -+ -+ switch (FALCON_EVENT_CODE(ev)) { -+ case FALCON_EVENT_CODE_CHAR: -+ falcon_handle_char_event(nic, lnic->ev_handlers, ev); -+ break; -+ default: -+ EFRM_WARN("%s: unknown event type=%x", __func__, -+ (unsigned)FALCON_EVENT_CODE(ev)); -+ break; -+ } -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/efrm_internal.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,41 @@ -+#ifndef __EFRM_INTERNAL_H__ -+#define __EFRM_INTERNAL_H__ -+ -+ -+struct filter_resource { -+ struct efrm_resource rs; -+ struct vi_resource *pt; -+ int filter_idx; -+}; -+ -+#define filter_resource(rs1) container_of((rs1), struct filter_resource, rs) -+ -+ -+struct efrm_client { -+ void *user_data; -+ struct list_head link; -+ struct efrm_client_callbacks *callbacks; -+ struct efhw_nic *nic; -+ int ref_count; -+ struct list_head resources; -+}; -+ -+ -+extern void efrm_client_add_resource(struct efrm_client *, -+ struct efrm_resource *); -+ -+extern int efrm_buffer_table_size(void); -+ -+ -+static inline void efrm_resource_init(struct efrm_resource *rs, -+ int type, int instance) -+{ -+ EFRM_ASSERT(instance >= 0); -+ EFRM_ASSERT(type >= 0 && type < EFRM_RESOURCE_NUM); -+ rs->rs_ref_count = 1; -+ rs->rs_handle.handle = (type << 28u) | -+ (((unsigned)jiffies & 0xfff) << 16) | instance; -+} -+ -+ -+#endif /* __EFRM_INTERNAL_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/efx_vi_shm.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,707 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides implementation of EFX VI API, used from Xen -+ * acceleration driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include "linux_resource_internal.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include "kernel_compat.h" -+ -+#if EFX_VI_STATIC_FILTERS -+struct filter_list_t { -+ struct filter_list_t *next; -+ struct filter_resource *fres; -+}; -+#endif -+ -+struct efx_vi_state { -+ struct vi_resource *vi_res; -+ -+ int ifindex; -+ struct efrm_client *efrm_client; -+ struct efhw_nic *nic; -+ -+ void (*callback_fn)(void *arg, int is_timeout); -+ void *callback_arg; -+ -+ struct completion flush_completion; -+ -+#if EFX_VI_STATIC_FILTERS -+ struct filter_list_t fres[EFX_VI_STATIC_FILTERS]; -+ struct filter_list_t *free_fres; -+ struct filter_list_t *used_fres; -+#endif -+}; -+ -+static void efx_vi_flush_complete(void *state_void) -+{ -+ struct efx_vi_state *state = (struct efx_vi_state *)state_void; -+ -+ complete(&state->flush_completion); -+} -+ -+static inline int alloc_ep(struct efx_vi_state *state) -+{ -+ int rc; -+ -+ rc = efrm_vi_resource_alloc(state->efrm_client, NULL, EFHW_VI_JUMBO_EN, -+ efx_vi_eventq_size, -+ FALCON_DMA_Q_DEFAULT_TX_SIZE, -+ FALCON_DMA_Q_DEFAULT_RX_SIZE, -+ 0, 0, &state->vi_res, NULL, NULL, NULL, -+ NULL); -+ if (rc < 0) { -+ EFRM_ERR("%s: ERROR efrm_vi_resource_alloc error %d", -+ __func__, rc); -+ return rc; -+ } -+ -+ efrm_vi_register_flush_callback(state->vi_res, &efx_vi_flush_complete, -+ (void *)state); -+ -+ return 0; -+} -+ -+static int free_ep(struct efx_vi_state *efx_state) -+{ -+ efrm_vi_resource_release(efx_state->vi_res); -+ -+ return 0; -+} -+ -+#if EFX_VI_STATIC_FILTERS -+static int efx_vi_alloc_static_filters(struct efx_vi_state *efx_state) -+{ -+ int i; -+ int rc; -+ -+ efx_state->free_fres = efx_state->used_fres = NULL; -+ -+ for (i = 0; i < EFX_VI_STATIC_FILTERS; i++) { -+ rc = efrm_filter_resource_alloc(efx_state->vi_res, -+ &efx_state->fres[i].fres); -+ if (rc < 0) { -+ EFRM_ERR("%s: efrm_filter_resource_alloc failed: %d", -+ __func__, rc); -+ while (i > 0) { -+ i--; -+ efrm_filter_resource_release(efx_state-> -+ fres[i].fres); -+ } -+ efx_state->free_fres = NULL; -+ return rc; -+ } -+ efx_state->fres[i].next = efx_state->free_fres; -+ efx_state->free_fres = &efx_state->fres[i]; -+ } -+ -+ return 0; -+} -+#endif -+ -+int efx_vi_alloc(struct efx_vi_state **vih_out, int ifindex) -+{ -+ struct efx_vi_state *efx_state; -+ int rc; -+ -+ efx_state = kmalloc(sizeof(struct efx_vi_state), GFP_KERNEL); -+ -+ if (!efx_state) { -+ EFRM_ERR("%s: failed to allocate memory for efx_vi_state", -+ __func__); -+ rc = -ENOMEM; -+ goto fail; -+ } -+ -+ efx_state->ifindex = ifindex; -+ rc = efrm_client_get(ifindex, NULL, NULL, &efx_state->efrm_client); -+ if (rc < 0) { -+ EFRM_ERR("%s: efrm_client_get(%d) failed: %d", __func__, -+ ifindex, rc); -+ rc = -ENODEV; -+ goto fail_no_ifindex; -+ } -+ efx_state->nic = efrm_client_get_nic(efx_state->efrm_client); -+ -+ init_completion(&efx_state->flush_completion); -+ -+ /* basically allocate_pt_endpoint() */ -+ rc = alloc_ep(efx_state); -+ if (rc) { -+ EFRM_ERR("%s: alloc_ep failed: %d", __func__, rc); -+ goto fail_no_pt; -+ } -+#if EFX_VI_STATIC_FILTERS -+ /* Statically allocate a set of filter resources - removes the -+ restriction on not being able to use efx_vi_filter() from -+ in_atomic() */ -+ rc = efx_vi_alloc_static_filters(efx_state); -+ if (rc) -+ goto fail_no_filters; -+#endif -+ -+ *vih_out = efx_state; -+ -+ return 0; -+#if EFX_VI_STATIC_FILTERS -+fail_no_filters: -+ free_ep(efx_state); -+#endif -+fail_no_pt: -+ efrm_client_put(efx_state->efrm_client); -+fail_no_ifindex: -+ kfree(efx_state); -+fail: -+ return rc; -+} -+EXPORT_SYMBOL(efx_vi_alloc); -+ -+void efx_vi_free(struct efx_vi_state *vih) -+{ -+ struct efx_vi_state *efx_state = vih; -+ -+ /* TODO flush dma channels, init dma queues?. See ef_free_vnic() */ -+#if EFX_VI_STATIC_FILTERS -+ int i; -+ -+ for (i = 0; i < EFX_VI_STATIC_FILTERS; i++) -+ efrm_filter_resource_release(efx_state->fres[i].fres); -+#endif -+ -+ if (efx_state->vi_res) -+ free_ep(efx_state); -+ -+ efrm_client_put(efx_state->efrm_client); -+ -+ kfree(efx_state); -+} -+EXPORT_SYMBOL(efx_vi_free); -+ -+void efx_vi_reset(struct efx_vi_state *vih) -+{ -+ struct efx_vi_state *efx_state = vih; -+ -+ efrm_pt_flush(efx_state->vi_res); -+ -+ while (wait_for_completion_timeout(&efx_state->flush_completion, HZ) -+ == 0) -+ efrm_vi_resource_flush_retry(efx_state->vi_res); -+ -+ /* Bosch the eventq */ -+ efrm_eventq_reset(efx_state->vi_res); -+ return; -+} -+EXPORT_SYMBOL(efx_vi_reset); -+ -+static void -+efx_vi_eventq_callback(void *context, int is_timeout, struct efhw_nic *nic) -+{ -+ struct efx_vi_state *efx_state = (struct efx_vi_state *)context; -+ -+ EFRM_ASSERT(efx_state->callback_fn); -+ -+ return efx_state->callback_fn(efx_state->callback_arg, is_timeout); -+} -+ -+int -+efx_vi_eventq_register_callback(struct efx_vi_state *vih, -+ void (*callback)(void *context, int is_timeout), -+ void *context) -+{ -+ struct efx_vi_state *efx_state = vih; -+ -+ efx_state->callback_fn = callback; -+ efx_state->callback_arg = context; -+ -+ /* Register the eventq timeout event callback */ -+ efrm_eventq_register_callback(efx_state->vi_res, -+ efx_vi_eventq_callback, efx_state); -+ -+ return 0; -+} -+EXPORT_SYMBOL(efx_vi_eventq_register_callback); -+ -+int efx_vi_eventq_kill_callback(struct efx_vi_state *vih) -+{ -+ struct efx_vi_state *efx_state = vih; -+ -+ if (efx_state->vi_res->evq_callback_fn) -+ efrm_eventq_kill_callback(efx_state->vi_res); -+ -+ efx_state->callback_fn = NULL; -+ efx_state->callback_arg = NULL; -+ -+ return 0; -+} -+EXPORT_SYMBOL(efx_vi_eventq_kill_callback); -+ -+struct efx_vi_dma_map_state { -+ struct efhw_buffer_table_allocation bt_handle; -+ int n_pages; -+ dma_addr_t *dma_addrs; -+}; -+ -+int -+efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages, -+ int n_pages, struct efx_vi_dma_map_state **dmh_out) -+{ -+ struct efx_vi_state *efx_state = vih; -+ int order = fls(n_pages - 1), rc, i, evq_id; -+ dma_addr_t dma_addr; -+ struct efx_vi_dma_map_state *dm_state; -+ -+ if (n_pages != (1 << order)) { -+ EFRM_WARN("%s: Can only allocate buffers in power of 2 " -+ "sizes (not %d)", __func__, n_pages); -+ return -EINVAL; -+ } -+ -+ dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL); -+ if (!dm_state) -+ return -ENOMEM; -+ -+ dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages, -+ GFP_KERNEL); -+ if (!dm_state->dma_addrs) { -+ kfree(dm_state); -+ return -ENOMEM; -+ } -+ -+ rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle); -+ if (rc < 0) { -+ kfree(dm_state->dma_addrs); -+ kfree(dm_state); -+ return rc; -+ } -+ -+ evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle); -+ for (i = 0; i < n_pages; i++) { -+ /* TODO do we need to get_page() here ? */ -+ -+ dma_addr = pci_map_page(linux_efhw_nic(efx_state->nic)-> -+ pci_dev, pages[i], 0, PAGE_SIZE, -+ PCI_DMA_TODEVICE); -+ -+ efrm_buffer_table_set(&dm_state->bt_handle, efx_state->nic, -+ i, dma_addr, evq_id); -+ -+ dm_state->dma_addrs[i] = dma_addr; -+ -+ /* Would be nice to not have to call commit each time, but -+ * comment says there are hardware restrictions on how often -+ * you can go without it, so do this to be safe */ -+ efrm_buffer_table_commit(); -+ } -+ -+ dm_state->n_pages = n_pages; -+ -+ *dmh_out = dm_state; -+ -+ return 0; -+} -+EXPORT_SYMBOL(efx_vi_dma_map_pages); -+ -+/* Function needed as Xen can't get pages for grants in dom0, but can -+ get dma address */ -+int -+efx_vi_dma_map_addrs(struct efx_vi_state *vih, -+ unsigned long long *bus_dev_addrs, -+ int n_pages, struct efx_vi_dma_map_state **dmh_out) -+{ -+ struct efx_vi_state *efx_state = vih; -+ int order = fls(n_pages - 1), rc, i, evq_id; -+ dma_addr_t dma_addr; -+ struct efx_vi_dma_map_state *dm_state; -+ -+ if (n_pages != (1 << order)) { -+ EFRM_WARN("%s: Can only allocate buffers in power of 2 " -+ "sizes (not %d)", __func__, n_pages); -+ return -EINVAL; -+ } -+ -+ dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL); -+ if (!dm_state) -+ return -ENOMEM; -+ -+ dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages, -+ GFP_KERNEL); -+ if (!dm_state->dma_addrs) { -+ kfree(dm_state); -+ return -ENOMEM; -+ } -+ -+ rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle); -+ if (rc < 0) { -+ kfree(dm_state->dma_addrs); -+ kfree(dm_state); -+ return rc; -+ } -+ -+ evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle); -+#if 0 -+ EFRM_WARN("%s: mapping %d pages to evq %d, bt_ids %d-%d\n", -+ __func__, n_pages, evq_id, -+ dm_state->bt_handle.base, -+ dm_state->bt_handle.base + n_pages); -+#endif -+ for (i = 0; i < n_pages; i++) { -+ -+ dma_addr = (dma_addr_t)bus_dev_addrs[i]; -+ -+ efrm_buffer_table_set(&dm_state->bt_handle, efx_state->nic, -+ i, dma_addr, evq_id); -+ -+ dm_state->dma_addrs[i] = dma_addr; -+ -+ /* Would be nice to not have to call commit each time, but -+ * comment says there are hardware restrictions on how often -+ * you can go without it, so do this to be safe */ -+ efrm_buffer_table_commit(); -+ } -+ -+ dm_state->n_pages = n_pages; -+ -+ *dmh_out = dm_state; -+ -+ return 0; -+} -+EXPORT_SYMBOL(efx_vi_dma_map_addrs); -+ -+void -+efx_vi_dma_unmap_pages(struct efx_vi_state *vih, -+ struct efx_vi_dma_map_state *dmh) -+{ -+ struct efx_vi_state *efx_state = vih; -+ struct efx_vi_dma_map_state *dm_state = -+ (struct efx_vi_dma_map_state *)dmh; -+ int i; -+ -+ efrm_buffer_table_free(&dm_state->bt_handle); -+ -+ for (i = 0; i < dm_state->n_pages; ++i) -+ pci_unmap_page(linux_efhw_nic(efx_state->nic)->pci_dev, -+ dm_state->dma_addrs[i], PAGE_SIZE, -+ PCI_DMA_TODEVICE); -+ -+ kfree(dm_state->dma_addrs); -+ kfree(dm_state); -+ -+ return; -+} -+EXPORT_SYMBOL(efx_vi_dma_unmap_pages); -+ -+void -+efx_vi_dma_unmap_addrs(struct efx_vi_state *vih, -+ struct efx_vi_dma_map_state *dmh) -+{ -+ struct efx_vi_dma_map_state *dm_state = -+ (struct efx_vi_dma_map_state *)dmh; -+ -+ efrm_buffer_table_free(&dm_state->bt_handle); -+ -+ kfree(dm_state->dma_addrs); -+ kfree(dm_state); -+ -+ return; -+} -+EXPORT_SYMBOL(efx_vi_dma_unmap_addrs); -+ -+unsigned -+efx_vi_dma_get_map_addr(struct efx_vi_state *vih, -+ struct efx_vi_dma_map_state *dmh) -+{ -+ struct efx_vi_dma_map_state *dm_state = -+ (struct efx_vi_dma_map_state *)dmh; -+ -+ return EFHW_BUFFER_ADDR(dm_state->bt_handle.base, 0); -+} -+EXPORT_SYMBOL(efx_vi_dma_get_map_addr); -+ -+#if EFX_VI_STATIC_FILTERS -+static int -+get_filter(struct efx_vi_state *efx_state, -+ efrm_resource_handle_t pthandle, struct filter_resource **fres_out) -+{ -+ struct filter_list_t *flist; -+ if (efx_state->free_fres == NULL) -+ return -ENOMEM; -+ else { -+ flist = efx_state->free_fres; -+ efx_state->free_fres = flist->next; -+ flist->next = efx_state->used_fres; -+ efx_state->used_fres = flist; -+ *fres_out = flist->fres; -+ return 0; -+ } -+} -+#endif -+ -+static void -+release_filter(struct efx_vi_state *efx_state, struct filter_resource *fres) -+{ -+#if EFX_VI_STATIC_FILTERS -+ struct filter_list_t *flist = efx_state->used_fres, *prev = NULL; -+ while (flist) { -+ if (flist->fres == fres) { -+ if (prev) -+ prev->next = flist->next; -+ else -+ efx_state->used_fres = flist->next; -+ flist->next = efx_state->free_fres; -+ efx_state->free_fres = flist; -+ return; -+ } -+ prev = flist; -+ flist = flist->next; -+ } -+ EFRM_ERR("%s: couldn't find filter", __func__); -+#else -+ return efrm_filter_resource_release(fres); -+#endif -+} -+ -+int -+efx_vi_filter(struct efx_vi_state *vih, int protocol, -+ unsigned ip_addr_be32, int port_le16, -+ struct filter_resource_t **fh_out) -+{ -+ struct efx_vi_state *efx_state = vih; -+ struct filter_resource *uninitialized_var(frs); -+ int rc; -+ -+#if EFX_VI_STATIC_FILTERS -+ rc = get_filter(efx_state, efx_state->vi_res->rs.rs_handle, &frs); -+#else -+ rc = efrm_filter_resource_alloc(efx_state->vi_res, &frs); -+#endif -+ if (rc < 0) -+ return rc; -+ -+ /* Add the hardware filter. We pass in the source port and address -+ * as 0 (wildcard) to minimise the number of filters needed. */ -+ if (protocol == IPPROTO_TCP) { -+ rc = efrm_filter_resource_tcp_set(frs, 0, 0, ip_addr_be32, -+ port_le16); -+ } else { -+ rc = efrm_filter_resource_udp_set(frs, 0, 0, ip_addr_be32, -+ port_le16); -+ } -+ -+ *fh_out = (struct filter_resource_t *)frs; -+ -+ return rc; -+} -+EXPORT_SYMBOL(efx_vi_filter); -+ -+int -+efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh) -+{ -+ struct efx_vi_state *efx_state = vih; -+ struct filter_resource *frs = (struct filter_resource *)fh; -+ int rc; -+ -+ rc = efrm_filter_resource_clear(frs); -+ release_filter(efx_state, frs); -+ -+ return rc; -+} -+EXPORT_SYMBOL(efx_vi_filter_stop); -+ -+int -+efx_vi_hw_resource_get_virt(struct efx_vi_state *vih, -+ struct efx_vi_hw_resource_metadata *mdata, -+ struct efx_vi_hw_resource *hw_res_array, -+ int *length) -+{ -+ EFRM_NOTICE("%s: TODO!", __func__); -+ -+ return 0; -+} -+EXPORT_SYMBOL(efx_vi_hw_resource_get_virt); -+ -+int -+efx_vi_hw_resource_get_phys(struct efx_vi_state *vih, -+ struct efx_vi_hw_resource_metadata *mdata, -+ struct efx_vi_hw_resource *hw_res_array, -+ int *length) -+{ -+ struct efx_vi_state *efx_state = vih; -+ struct linux_efhw_nic *lnic = linux_efhw_nic(efx_state->nic); -+ unsigned long phys = lnic->ctr_ap_pci_addr; -+ struct efrm_resource *ep_res = &efx_state->vi_res->rs; -+ unsigned ep_mmap_bytes; -+ int i; -+ -+ if (*length < EFX_VI_HW_RESOURCE_MAXSIZE) -+ return -EINVAL; -+ -+ mdata->nic_arch = efx_state->nic->devtype.arch; -+ mdata->nic_variant = efx_state->nic->devtype.variant; -+ mdata->nic_revision = efx_state->nic->devtype.revision; -+ -+ mdata->evq_order = -+ efx_state->vi_res->nic_info.evq_pages.iobuff.order; -+ mdata->evq_offs = efx_state->vi_res->nic_info.evq_pages.iobuff_off; -+ mdata->evq_capacity = efx_vi_eventq_size; -+ mdata->instance = EFRM_RESOURCE_INSTANCE(ep_res->rs_handle); -+ mdata->rx_capacity = FALCON_DMA_Q_DEFAULT_RX_SIZE; -+ mdata->tx_capacity = FALCON_DMA_Q_DEFAULT_TX_SIZE; -+ -+ ep_mmap_bytes = FALCON_DMA_Q_DEFAULT_MMAP; -+ EFRM_ASSERT(ep_mmap_bytes == PAGE_SIZE * 2); -+ -+#ifndef NDEBUG -+ { -+ /* Sanity about doorbells */ -+ unsigned long tx_dma_page_addr, rx_dma_page_addr; -+ -+ /* get rx doorbell address */ -+ rx_dma_page_addr = -+ phys + falcon_rx_dma_page_addr(mdata->instance); -+ /* get tx doorbell address */ -+ tx_dma_page_addr = -+ phys + falcon_tx_dma_page_addr(mdata->instance); -+ -+ /* Check the lower bits of the TX doorbell will be -+ * consistent. */ -+ EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & -+ FALCON_DMA_PAGE_MASK) == -+ (TX_DESC_UPD_REG_PAGE123K_OFST & -+ FALCON_DMA_PAGE_MASK)); -+ -+ /* Check the lower bits of the RX doorbell will be -+ * consistent. */ -+ EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & -+ FALCON_DMA_PAGE_MASK) == -+ (RX_DESC_UPD_REG_PAGE123K_OFST & -+ FALCON_DMA_PAGE_MASK)); -+ -+ /* Check that the doorbells will be in the same page. */ -+ EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK) == -+ (RX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK)); -+ -+ /* Check that the doorbells are in the same page. */ -+ EFRM_ASSERT((tx_dma_page_addr & PAGE_MASK) == -+ (rx_dma_page_addr & PAGE_MASK)); -+ -+ /* Check that the TX doorbell offset is correct. */ -+ EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) == -+ (tx_dma_page_addr & ~PAGE_MASK)); -+ -+ /* Check that the RX doorbell offset is correct. */ -+ EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) == -+ (rx_dma_page_addr & ~PAGE_MASK)); -+ } -+#endif -+ -+ i = 0; -+ hw_res_array[i].type = EFX_VI_HW_RESOURCE_TXDMAQ; -+ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; -+ hw_res_array[i].more_to_follow = 0; -+ hw_res_array[i].length = PAGE_SIZE; -+ hw_res_array[i].address = -+ (unsigned long)efx_state->vi_res->nic_info. -+ dmaq_pages[EFRM_VI_RM_DMA_QUEUE_TX].kva; -+ -+ i++; -+ hw_res_array[i].type = EFX_VI_HW_RESOURCE_RXDMAQ; -+ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; -+ hw_res_array[i].more_to_follow = 0; -+ hw_res_array[i].length = PAGE_SIZE; -+ hw_res_array[i].address = -+ (unsigned long)efx_state->vi_res->nic_info. -+ dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva; -+ -+ i++; -+ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER; -+ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; -+ hw_res_array[i].more_to_follow = 0; -+ hw_res_array[i].length = PAGE_SIZE; -+ hw_res_array[i].address = -+ (unsigned long)phys + falcon_timer_page_addr(mdata->instance); -+ -+ /* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */ -+ -+ i++; -+ switch (efx_state->nic->devtype.variant) { -+ case 'A': -+ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR; -+ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; -+ hw_res_array[i].more_to_follow = 0; -+ hw_res_array[i].length = PAGE_SIZE; -+ hw_res_array[i].address = (unsigned long)phys + -+ EVQ_RPTR_REG_OFST + -+ (FALCON_REGISTER128 * mdata->instance); -+ break; -+ case 'B': -+ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET; -+ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; -+ hw_res_array[i].more_to_follow = 0; -+ hw_res_array[i].length = PAGE_SIZE; -+ hw_res_array[i].address = -+ (unsigned long)FALCON_EVQ_RPTR_REG_P0; -+ break; -+ default: -+ EFRM_ASSERT(0); -+ break; -+ } -+ -+ i++; -+ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQMEMKVA; -+ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_IOBUFFER; -+ hw_res_array[i].more_to_follow = 0; -+ hw_res_array[i].length = PAGE_SIZE; -+ hw_res_array[i].address = (unsigned long)efx_state->vi_res-> -+ nic_info.evq_pages.iobuff.kva; -+ -+ i++; -+ hw_res_array[i].type = EFX_VI_HW_RESOURCE_BELLPAGE; -+ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; -+ hw_res_array[i].more_to_follow = 0; -+ hw_res_array[i].length = PAGE_SIZE; -+ hw_res_array[i].address = -+ (unsigned long)(phys + -+ falcon_tx_dma_page_addr(mdata->instance)) -+ >> PAGE_SHIFT; -+ -+ i++; -+ -+ EFRM_ASSERT(i <= *length); -+ -+ *length = i; -+ -+ return 0; -+} -+EXPORT_SYMBOL(efx_vi_hw_resource_get_phys); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/eventq.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,321 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains event queue support. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define KEVENTQ_MAGIC 0x07111974 -+ -+/*! Helper function to allocate the iobuffer needed by an eventq -+ * - it ensures the eventq has the correct alignment for the NIC -+ * -+ * \param rm Event-queue resource manager -+ * \param instance Event-queue instance (index) -+ * \param buf_bytes Requested size of eventq -+ * \return < 0 if iobuffer allocation fails -+ */ -+int -+efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic, -+ struct eventq_resource_hardware *h, -+ int evq_instance, unsigned buf_bytes) -+{ -+ unsigned int page_order; -+ int rc; -+ -+ /* Allocate an iobuffer. */ -+ page_order = get_order(buf_bytes); -+ -+ h->iobuff_off = 0; -+ -+ EFHW_TRACE("allocating eventq size %x", -+ 1u << (page_order + PAGE_SHIFT)); -+ rc = efhw_iopages_alloc(nic, &h->iobuff, page_order); -+ if (rc < 0) { -+ EFHW_WARN("%s: failed to allocate %u pages", -+ __func__, 1u << page_order); -+ return rc; -+ } -+ -+ /* Set the eventq pages to match EFHW_CLEAR_EVENT() */ -+ if (EFHW_CLEAR_EVENT_VALUE) -+ memset(efhw_iopages_ptr(&h->iobuff) + h->iobuff_off, -+ EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE); -+ -+ EFHW_TRACE("%s: allocated %u pages", __func__, 1u << (page_order)); -+ -+ /* For Falcon the NIC is programmed with the base buffer address of a -+ * contiguous region of buffer space. This means that larger than a -+ * PAGE event queues can be expected to allocate even when the host's -+ * physical memory is fragmented */ -+ EFHW_ASSERT(efhw_nic_have_hw(nic)); -+ EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order); -+ -+ /* Initialise the buffer table entries. */ -+ falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base, -+ efhw_iopages_dma_addr(&h->iobuff) + -+ h->iobuff_off, EFHW_NIC_PAGE_SIZE, 0, -+ 1 << page_order, 0); -+ -+ if (evq_instance >= FALCON_EVQ_TBL_RESERVED) -+ falcon_nic_buffer_table_confirm(nic); -+ return 0; -+} -+ -+/********************************************************************** -+ * Kernel event queue management. -+ */ -+ -+/* Values for [struct efhw_keventq::lock] field. */ -+#define KEVQ_UNLOCKED 0 -+#define KEVQ_LOCKED 1 -+#define KEVQ_RECHECK 2 -+ -+int -+efhw_keventq_ctor(struct efhw_nic *nic, int instance, -+ struct efhw_keventq *evq, -+ struct efhw_ev_handler *ev_handlers) -+{ -+ int rc; -+ unsigned buf_bytes = evq->hw.capacity * sizeof(efhw_event_t); -+ -+ evq->instance = instance; -+ evq->ev_handlers = ev_handlers; -+ -+ /* allocate an IObuffer for the eventq */ -+ rc = efhw_nic_event_queue_alloc_iobuffer(nic, &evq->hw, evq->instance, -+ buf_bytes); -+ if (rc < 0) -+ return rc; -+ -+ /* Zero the timer-value for this queue. -+ AND Tell the nic about the event queue. */ -+ efhw_nic_event_queue_enable(nic, evq->instance, evq->hw.capacity, -+ efhw_iopages_dma_addr(&evq->hw.iobuff) + -+ evq->hw.iobuff_off, -+ evq->hw.buf_tbl_alloc.base, -+ 1 /* interrupting */); -+ -+ evq->lock = KEVQ_UNLOCKED; -+ evq->evq_base = efhw_iopages_ptr(&evq->hw.iobuff) + evq->hw.iobuff_off; -+ evq->evq_ptr = 0; -+ evq->evq_mask = (evq->hw.capacity * sizeof(efhw_event_t)) - 1u; -+ -+ EFHW_TRACE("%s: [%d] base=%p end=%p", __func__, evq->instance, -+ evq->evq_base, evq->evq_base + buf_bytes); -+ -+ return 0; -+} -+ -+void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq) -+{ -+ EFHW_ASSERT(evq); -+ -+ EFHW_TRACE("%s: [%d]", __func__, evq->instance); -+ -+ /* Zero the timer-value for this queue. -+ And Tell NIC to stop using this event queue. */ -+ efhw_nic_event_queue_disable(nic, evq->instance, 0); -+ -+ /* free the pages used by the eventq itself */ -+ efhw_iopages_free(nic, &evq->hw.iobuff); -+} -+ -+void -+efhw_handle_txdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h, -+ efhw_event_t *evp) -+{ -+ int instance = (int)FALCON_EVENT_TX_FLUSH_Q_ID(evp); -+ EFHW_TRACE("%s: instance=%d", __func__, instance); -+ -+ if (!h->dmaq_flushed_fn) { -+ EFHW_WARN("%s: no handler registered", __func__); -+ return; -+ } -+ -+ h->dmaq_flushed_fn(nic, instance, false); -+} -+ -+void -+efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h, -+ efhw_event_t *evp) -+{ -+ unsigned instance = (unsigned)FALCON_EVENT_RX_FLUSH_Q_ID(evp); -+ EFHW_TRACE("%s: instance=%d", __func__, instance); -+ -+ if (!h->dmaq_flushed_fn) { -+ EFHW_WARN("%s: no handler registered", __func__); -+ return; -+ } -+ -+ h->dmaq_flushed_fn(nic, instance, true); -+} -+ -+void -+efhw_handle_wakeup_event(struct efhw_nic *nic, struct efhw_ev_handler *h, -+ efhw_event_t *evp) -+{ -+ unsigned instance = (unsigned)FALCON_EVENT_WAKE_EVQ_ID(evp); -+ -+ if (!h->wakeup_fn) { -+ EFHW_WARN("%s: no handler registered", __func__); -+ return; -+ } -+ -+ h->wakeup_fn(nic, instance); -+} -+ -+void -+efhw_handle_timeout_event(struct efhw_nic *nic, struct efhw_ev_handler *h, -+ efhw_event_t *evp) -+{ -+ unsigned instance = (unsigned)FALCON_EVENT_WAKE_EVQ_ID(evp); -+ -+ if (!h->timeout_fn) { -+ EFHW_WARN("%s: no handler registered", __func__); -+ return; -+ } -+ -+ h->timeout_fn(nic, instance); -+} -+ -+/********************************************************************** -+ * Kernel event queue event handling. -+ */ -+ -+int efhw_keventq_poll(struct efhw_nic *nic, struct efhw_keventq *q) -+{ -+ efhw_event_t *ev; -+ int l, count = 0; -+ -+ EFHW_ASSERT(nic); -+ EFHW_ASSERT(q); -+ EFHW_ASSERT(q->ev_handlers); -+ -+ /* Acquire the lock, or mark the queue as needing re-checking. */ -+ for (;;) { -+ l = q->lock; -+ if (l == KEVQ_UNLOCKED) { -+ if ((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l) -+ break; -+ } else if (l == KEVQ_LOCKED) { -+ if ((int)cmpxchg(&q->lock, l, KEVQ_RECHECK) == l) -+ return 0; -+ } else { /* already marked for re-checking */ -+ EFHW_ASSERT(l == KEVQ_RECHECK); -+ return 0; -+ } -+ } -+ -+ if (unlikely(EFHW_EVENT_OVERFLOW(q, q))) -+ goto overflow; -+ -+ ev = EFHW_EVENT_PTR(q, q, 0); -+ -+#ifndef NDEBUG -+ if (!EFHW_IS_EVENT(ev)) -+ EFHW_TRACE("%s: %d NO EVENTS!", __func__, q->instance); -+#endif -+ -+ for (;;) { -+ /* Convention for return codes for handlers is: -+ ** 0 - no error, event consumed -+ ** 1 - no error, event not consumed -+ ** -ve - error, event not consumed -+ */ -+ if (likely(EFHW_IS_EVENT(ev))) { -+ count++; -+ -+ switch (FALCON_EVENT_CODE(ev)) { -+ -+ case FALCON_EVENT_CODE_CHAR: -+ falcon_handle_char_event(nic, q->ev_handlers, -+ ev); -+ break; -+ -+ default: -+ EFHW_ERR("efhw_keventq_poll: [%d] UNEXPECTED " -+ "EVENT:"FALCON_EVENT_FMT, -+ q->instance, -+ FALCON_EVENT_PRI_ARG(*ev)); -+ } -+ -+ EFHW_CLEAR_EVENT(ev); -+ EFHW_EVENTQ_NEXT(q); -+ -+ ev = EFHW_EVENT_PTR(q, q, 0); -+ } else { -+ /* No events left. Release the lock (checking if we -+ * need to re-poll to avoid race). */ -+ l = q->lock; -+ if (l == KEVQ_LOCKED) { -+ if ((int)cmpxchg(&q->lock, l, KEVQ_UNLOCKED) -+ == l) { -+ EFHW_TRACE -+ ("efhw_keventq_poll: %d clean exit", -+ q->instance); -+ goto clean_exit; -+ } -+ } -+ -+ /* Potentially more work to do. */ -+ l = q->lock; -+ EFHW_ASSERT(l == KEVQ_RECHECK); -+ EFHW_TEST((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l); -+ EFHW_TRACE("efhw_keventq_poll: %d re-poll required", -+ q->instance); -+ } -+ } -+ -+ /* shouldn't get here */ -+ EFHW_ASSERT(0); -+ -+overflow: -+ /* ?? Oh dear. Should we poll everything that could have possibly -+ ** happened? Or merely cry out in anguish... -+ */ -+ EFHW_WARN("efhw_keventq_poll: %d ***** OVERFLOW nic %d *****", -+ q->instance, nic->index); -+ -+ q->lock = KEVQ_UNLOCKED; -+ return count; -+ -+clean_exit: -+ /* Ack the processed events so that this event queue can potentially -+ raise interrupts again */ -+ falcon_nic_evq_ack(nic, q->instance, -+ (EFHW_EVENT_OFFSET(q, q, 0) / sizeof(efhw_event_t)), -+ false); -+ return count; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/falcon.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,2525 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains Falcon hardware support. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Workarounds and options -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/* Keep a software copy of the filter table and check for duplicates. */ -+#define FALCON_FULL_FILTER_CACHE 1 -+ -+/* Read filters back from the hardware to detect corruption. */ -+#define FALCON_VERIFY_FILTERS 0 -+ -+/* Options */ -+#define RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL 8 /* default search limit */ -+#define RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD 8 /* default search limit */ -+#define RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL 8 /* default search limit */ -+#define RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD 8 /* default search limit */ -+ -+#define FALCON_MAC_SET_TYPE_BY_SPEED 0 -+ -+/* FIXME: We should detect mode at runtime. */ -+#define FALCON_BUFFER_TABLE_FULL_MODE 1 -+ -+/* "Fudge factors" - difference between programmed value and actual depth */ -+#define RX_FILTER_CTL_SRCH_FUDGE_WILD 3 /* increase the search limit */ -+#define RX_FILTER_CTL_SRCH_FUDGE_FULL 1 /* increase the search limit */ -+#define TX_FILTER_CTL_SRCH_FUDGE_WILD 3 /* increase the search limit */ -+#define TX_FILTER_CTL_SRCH_FUDGE_FULL 1 /* increase the search limit */ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Debug Macros -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#define _DEBUG_SYM_ static -+ -+ /*---------------------------------------------------------------------------- -+ * -+ * Macros and forward declarations -+ * -+ *--------------------------------------------------------------------------*/ -+ -+#define FALCON_REGION_NUM 4 /* number of supported memory regions */ -+ -+#define FALCON_BUFFER_TBL_HALF_BYTES 4 -+#define FALCON_BUFFER_TBL_FULL_BYTES 8 -+ -+/* Shadow buffer table - hack for testing only */ -+#if FALCON_BUFFER_TABLE_FULL_MODE == 0 -+# define FALCON_USE_SHADOW_BUFFER_TABLE 1 -+#else -+# define FALCON_USE_SHADOW_BUFFER_TABLE 0 -+#endif -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Header assertion checks -+ * -+ *---------------------------------------------------------------------------*/ -+ -+#define FALCON_ASSERT_VALID() /* nothing yet */ -+ -+/* Falcon has a 128bit register model but most registers have useful -+ defaults or only implement a small number of bits. Some registers -+ can be programmed 32bits UNLOCKED all others should be interlocked -+ against other threads within the same protection domain. -+ -+ Aim is for software to perform the minimum number of writes and -+ also to minimise the read-modify-write activity (which generally -+ indicates a lack of clarity in the use model). -+ -+ Registers which are programmed in this module are listed below -+ together with the method of access. Care must be taken to ensure -+ remain adequate if the register spec changes. -+ -+ All 128bits programmed -+ FALCON_BUFFER_TBL_HALF -+ RX_FILTER_TBL -+ TX_DESC_PTR_TBL -+ RX_DESC_PTR_TBL -+ DRV_EV_REG -+ -+ All 64bits programmed -+ FALCON_BUFFER_TBL_FULL -+ -+ 32 bits are programmed (UNLOCKED) -+ EVQ_RPTR_REG -+ -+ Low 64bits programmed remainder are written with a random number -+ RX_DC_CFG_REG -+ TX_DC_CFG_REG -+ SRM_RX_DC_CFG_REG -+ SRM_TX_DC_CFG_REG -+ BUF_TBL_CFG_REG -+ BUF_TBL_UPD_REG -+ SRM_UPD_EVQ_REG -+ EVQ_PTR_TBL -+ TIMER_CMD_REG -+ TX_PACE_TBL -+ FATAL_INTR_REG -+ INT_EN_REG (When enabling interrupts) -+ TX_FLUSH_DESCQ_REG -+ RX_FLUSH_DESCQ -+ -+ Read Modify Write on low 32bits remainder are written with a random number -+ INT_EN_REG (When sending a driver interrupt) -+ DRIVER_REGX -+ -+ Read Modify Write on low 64bits remainder are written with a random number -+ SRM_CFG_REG_OFST -+ RX_CFG_REG_OFST -+ RX_FILTER_CTL_REG -+ -+ Read Modify Write on full 128bits -+ TXDP_RESERVED_REG (aka TXDP_UNDOCUMENTED) -+ TX_CFG_REG -+ -+*/ -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * DMAQ low-level register interface -+ * -+ *---------------------------------------------------------------------------*/ -+ -+static unsigned dmaq_sizes[] = { -+ 512, -+ EFHW_1K, -+ EFHW_2K, -+ EFHW_4K, -+}; -+ -+#define N_DMAQ_SIZES (sizeof(dmaq_sizes) / sizeof(dmaq_sizes[0])) -+ -+static inline ulong falcon_dma_tx_q_offset(struct efhw_nic *nic, unsigned dmaq) -+{ -+ EFHW_ASSERT(dmaq < nic->num_dmaqs); -+ return TX_DESC_PTR_TBL_OFST + dmaq * FALCON_REGISTER128; -+} -+ -+static inline uint falcon_dma_tx_q_size_index(uint dmaq_size) -+{ -+ uint i; -+ -+ /* size must be one of the various options, otherwise we assert */ -+ for (i = 0; i < N_DMAQ_SIZES; i++) { -+ if (dmaq_size == dmaq_sizes[i]) -+ break; -+ } -+ EFHW_ASSERT(i < N_DMAQ_SIZES); -+ return i; -+} -+ -+static void -+falcon_dmaq_tx_q_init(struct efhw_nic *nic, -+ uint dmaq, uint evq_id, uint own_id, -+ uint tag, uint dmaq_size, uint buf_idx, uint flags) -+{ -+ FALCON_LOCK_DECL; -+ uint index, desc_type; -+ uint64_t val1, val2, val3; -+ ulong offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ /* Q attributes */ -+ int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_TX_HDIG_EN) != 0); -+ int iscsi_ddig_en = ((flags & EFHW_VI_ISCSI_TX_DDIG_EN) != 0); -+ int csum_ip_dis = ((flags & EFHW_VI_TX_IP_CSUM_DIS) != 0); -+ int csum_tcp_dis = ((flags & EFHW_VI_TX_TCPUDP_CSUM_DIS) != 0); -+ int non_ip_drop_dis = ((flags & EFHW_VI_TX_TCPUDP_ONLY) == 0); -+ -+ /* initialise the TX descriptor queue pointer table */ -+ -+ /* NB physical vs buffer addressing is determined by the Queue ID. */ -+ -+ offset = falcon_dma_tx_q_offset(nic, dmaq); -+ index = falcon_dma_tx_q_size_index(dmaq_size); -+ -+ /* allow VI flag to override this queue's descriptor type */ -+ desc_type = (flags & EFHW_VI_TX_PHYS_ADDR_EN) ? 0 : 1; -+ -+ /* bug9403: It is dangerous to allow buffer-addressed queues to -+ * have owner_id=0. */ -+ EFHW_ASSERT((own_id > 0) || desc_type == 0); -+ -+ /* dword 1 */ -+ __DWCHCK(TX_DESCQ_FLUSH_LBN, TX_DESCQ_FLUSH_WIDTH); -+ __DWCHCK(TX_DESCQ_TYPE_LBN, TX_DESCQ_TYPE_WIDTH); -+ __DWCHCK(TX_DESCQ_SIZE_LBN, TX_DESCQ_SIZE_WIDTH); -+ __DWCHCK(TX_DESCQ_LABEL_LBN, TX_DESCQ_LABEL_WIDTH); -+ __DWCHCK(TX_DESCQ_OWNER_ID_LBN, TX_DESCQ_OWNER_ID_WIDTH); -+ -+ __LWCHK(TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH); -+ -+ __RANGECHCK(1, TX_DESCQ_FLUSH_WIDTH); -+ __RANGECHCK(desc_type, TX_DESCQ_TYPE_WIDTH); -+ __RANGECHCK(index, TX_DESCQ_SIZE_WIDTH); -+ __RANGECHCK(tag, TX_DESCQ_LABEL_WIDTH); -+ __RANGECHCK(own_id, TX_DESCQ_OWNER_ID_WIDTH); -+ __RANGECHCK(evq_id, TX_DESCQ_EVQ_ID_WIDTH); -+ -+ val1 = ((desc_type << TX_DESCQ_TYPE_LBN) | -+ (index << TX_DESCQ_SIZE_LBN) | -+ (tag << TX_DESCQ_LABEL_LBN) | -+ (own_id << TX_DESCQ_OWNER_ID_LBN) | -+ (__LOW(evq_id, TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH))); -+ -+ /* dword 2 */ -+ __DW2CHCK(TX_DESCQ_BUF_BASE_ID_LBN, TX_DESCQ_BUF_BASE_ID_WIDTH); -+ __RANGECHCK(buf_idx, TX_DESCQ_BUF_BASE_ID_WIDTH); -+ -+ val2 = ((__HIGH(evq_id, TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH)) | -+ (buf_idx << __DW2(TX_DESCQ_BUF_BASE_ID_LBN))); -+ -+ /* dword 3 */ -+ __DW3CHCK(TX_ISCSI_HDIG_EN_LBN, TX_ISCSI_HDIG_EN_WIDTH); -+ __DW3CHCK(TX_ISCSI_DDIG_EN_LBN, TX_ISCSI_DDIG_EN_WIDTH); -+ __RANGECHCK(iscsi_hdig_en, TX_ISCSI_HDIG_EN_WIDTH); -+ __RANGECHCK(iscsi_ddig_en, TX_ISCSI_DDIG_EN_WIDTH); -+ -+ val3 = ((iscsi_hdig_en << __DW3(TX_ISCSI_HDIG_EN_LBN)) | -+ (iscsi_ddig_en << __DW3(TX_ISCSI_DDIG_EN_LBN)) | -+ (1 << __DW3(TX_DESCQ_EN_LBN))); /* queue enable bit */ -+ -+ switch (nic->devtype.variant) { -+ case 'B': -+ __DW3CHCK(TX_NON_IP_DROP_DIS_B0_LBN, -+ TX_NON_IP_DROP_DIS_B0_WIDTH); -+ __DW3CHCK(TX_IP_CHKSM_DIS_B0_LBN, TX_IP_CHKSM_DIS_B0_WIDTH); -+ __DW3CHCK(TX_TCP_CHKSM_DIS_B0_LBN, TX_TCP_CHKSM_DIS_B0_WIDTH); -+ -+ val3 |= ((non_ip_drop_dis << __DW3(TX_NON_IP_DROP_DIS_B0_LBN))| -+ (csum_ip_dis << __DW3(TX_IP_CHKSM_DIS_B0_LBN)) | -+ (csum_tcp_dis << __DW3(TX_TCP_CHKSM_DIS_B0_LBN))); -+ break; -+ case 'A': -+ if (csum_ip_dis || csum_tcp_dis || !non_ip_drop_dis) -+ EFHW_WARN -+ ("%s: bad settings for A1 csum_ip_dis=%d " -+ "csum_tcp_dis=%d non_ip_drop_dis=%d", -+ __func__, csum_ip_dis, -+ csum_tcp_dis, non_ip_drop_dis); -+ break; -+ default: -+ EFHW_ASSERT(0); -+ break; -+ } -+ -+ EFHW_TRACE("%s: txq %x evq %u tag %x id %x buf %x " -+ "%x:%x:%x->%" PRIx64 ":%" PRIx64 ":%" PRIx64, -+ __func__, -+ dmaq, evq_id, tag, own_id, buf_idx, dmaq_size, -+ iscsi_hdig_en, iscsi_ddig_en, val1, val2, val3); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ return; -+} -+ -+static inline ulong -+falcon_dma_rx_q_offset(struct efhw_nic *nic, unsigned dmaq) -+{ -+ EFHW_ASSERT(dmaq < nic->num_dmaqs); -+ return RX_DESC_PTR_TBL_OFST + dmaq * FALCON_REGISTER128; -+} -+ -+static void -+falcon_dmaq_rx_q_init(struct efhw_nic *nic, -+ uint dmaq, uint evq_id, uint own_id, -+ uint tag, uint dmaq_size, uint buf_idx, uint flags) -+{ -+ FALCON_LOCK_DECL; -+ uint i, desc_type = 1; -+ uint64_t val1, val2, val3; -+ ulong offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ /* Q attributes */ -+#if BUG5762_WORKAROUND -+ int jumbo = 1; /* Queues must not have mixed types */ -+#else -+ int jumbo = ((flags & EFHW_VI_JUMBO_EN) != 0); -+#endif -+ int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_RX_HDIG_EN) != 0); -+ int iscsi_ddig_en = ((flags & EFHW_VI_ISCSI_RX_DDIG_EN) != 0); -+ -+ /* initialise the TX descriptor queue pointer table */ -+ offset = falcon_dma_rx_q_offset(nic, dmaq); -+ -+ /* size must be one of the various options, otherwise we assert */ -+ for (i = 0; i < N_DMAQ_SIZES; i++) { -+ if (dmaq_size == dmaq_sizes[i]) -+ break; -+ } -+ EFHW_ASSERT(i < N_DMAQ_SIZES); -+ -+ /* allow VI flag to override this queue's descriptor type */ -+ desc_type = (flags & EFHW_VI_RX_PHYS_ADDR_EN) ? 0 : 1; -+ -+ /* bug9403: It is dangerous to allow buffer-addressed queues to have -+ * owner_id=0 */ -+ EFHW_ASSERT((own_id > 0) || desc_type == 0); -+ -+ /* dword 1 */ -+ __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH); -+ __DWCHCK(RX_DESCQ_JUMBO_LBN, RX_DESCQ_JUMBO_WIDTH); -+ __DWCHCK(RX_DESCQ_TYPE_LBN, RX_DESCQ_TYPE_WIDTH); -+ __DWCHCK(RX_DESCQ_SIZE_LBN, RX_DESCQ_SIZE_WIDTH); -+ __DWCHCK(RX_DESCQ_LABEL_LBN, RX_DESCQ_LABEL_WIDTH); -+ __DWCHCK(RX_DESCQ_OWNER_ID_LBN, RX_DESCQ_OWNER_ID_WIDTH); -+ -+ __LWCHK(RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH); -+ -+ __RANGECHCK(1, RX_DESCQ_EN_WIDTH); -+ __RANGECHCK(jumbo, RX_DESCQ_JUMBO_WIDTH); -+ __RANGECHCK(desc_type, RX_DESCQ_TYPE_WIDTH); -+ __RANGECHCK(i, RX_DESCQ_SIZE_WIDTH); -+ __RANGECHCK(tag, RX_DESCQ_LABEL_WIDTH); -+ __RANGECHCK(own_id, RX_DESCQ_OWNER_ID_WIDTH); -+ __RANGECHCK(evq_id, RX_DESCQ_EVQ_ID_WIDTH); -+ -+ val1 = ((1 << RX_DESCQ_EN_LBN) | -+ (jumbo << RX_DESCQ_JUMBO_LBN) | -+ (desc_type << RX_DESCQ_TYPE_LBN) | -+ (i << RX_DESCQ_SIZE_LBN) | -+ (tag << RX_DESCQ_LABEL_LBN) | -+ (own_id << RX_DESCQ_OWNER_ID_LBN) | -+ (__LOW(evq_id, RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH))); -+ -+ /* dword 2 */ -+ __DW2CHCK(RX_DESCQ_BUF_BASE_ID_LBN, RX_DESCQ_BUF_BASE_ID_WIDTH); -+ __RANGECHCK(buf_idx, RX_DESCQ_BUF_BASE_ID_WIDTH); -+ -+ val2 = ((__HIGH(evq_id, RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH)) | -+ (buf_idx << __DW2(RX_DESCQ_BUF_BASE_ID_LBN))); -+ -+ /* dword 3 */ -+ __DW3CHCK(RX_ISCSI_HDIG_EN_LBN, RX_ISCSI_HDIG_EN_WIDTH); -+ __DW3CHCK(RX_ISCSI_DDIG_EN_LBN, RX_ISCSI_DDIG_EN_WIDTH); -+ __RANGECHCK(iscsi_hdig_en, RX_ISCSI_HDIG_EN_WIDTH); -+ __RANGECHCK(iscsi_ddig_en, RX_ISCSI_DDIG_EN_WIDTH); -+ -+ val3 = (iscsi_hdig_en << __DW3(RX_ISCSI_HDIG_EN_LBN)) | -+ (iscsi_ddig_en << __DW3(RX_ISCSI_DDIG_EN_LBN)); -+ -+ EFHW_TRACE("%s: rxq %x evq %u tag %x id %x buf %x %s " -+ "%x:%x:%x -> %" PRIx64 ":%" PRIx64 ":%" PRIx64, -+ __func__, -+ dmaq, evq_id, tag, own_id, buf_idx, -+ jumbo ? "jumbo" : "normal", dmaq_size, -+ iscsi_hdig_en, iscsi_ddig_en, val1, val2, val3); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ return; -+} -+ -+static void falcon_dmaq_tx_q_disable(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ uint64_t val1, val2, val3; -+ ulong offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ /* initialise the TX descriptor queue pointer table */ -+ -+ offset = falcon_dma_tx_q_offset(nic, dmaq); -+ -+ /* dword 1 */ -+ __DWCHCK(TX_DESCQ_TYPE_LBN, TX_DESCQ_TYPE_WIDTH); -+ -+ val1 = ((uint64_t) 1 << TX_DESCQ_TYPE_LBN); -+ -+ /* dword 2 */ -+ val2 = 0; -+ -+ /* dword 3 */ -+ val3 = (0 << __DW3(TX_DESCQ_EN_LBN)); /* queue enable bit */ -+ -+ EFHW_TRACE("%s: %x->%" PRIx64 ":%" PRIx64 ":%" PRIx64, -+ __func__, dmaq, val1, val2, val3); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ return; -+} -+ -+static void falcon_dmaq_rx_q_disable(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ uint64_t val1, val2, val3; -+ ulong offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ /* initialise the TX descriptor queue pointer table */ -+ offset = falcon_dma_rx_q_offset(nic, dmaq); -+ -+ /* dword 1 */ -+ __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH); -+ __DWCHCK(RX_DESCQ_TYPE_LBN, RX_DESCQ_TYPE_WIDTH); -+ -+ val1 = ((0 << RX_DESCQ_EN_LBN) | (1 << RX_DESCQ_TYPE_LBN)); -+ -+ /* dword 2 */ -+ val2 = 0; -+ -+ /* dword 3 */ -+ val3 = 0; -+ -+ EFHW_TRACE("falcon_dmaq_rx_q_disable: %x->%" -+ PRIx64 ":%" PRIx64 ":%" PRIx64, -+ dmaq, val1, val2, val3); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ return; -+} -+ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Buffer Table low-level register interface -+ * -+ *---------------------------------------------------------------------------*/ -+ -+/*! Convert a (potentially) 64-bit physical address to 32-bits. Every use -+** of this function is a place where we're not 64-bit clean. -+*/ -+static inline uint32_t dma_addr_to_u32(dma_addr_t addr) -+{ -+ /* Top bits had better be zero! */ -+ EFHW_ASSERT(addr == (addr & 0xffffffff)); -+ return (uint32_t) addr; -+} -+ -+static inline uint32_t -+falcon_nic_buffer_table_entry32_mk(dma_addr_t dma_addr, int own_id) -+{ -+ uint32_t dma_addr32 = FALCON_BUFFER_4K_PAGE(dma_addr_to_u32(dma_addr)); -+ -+ /* don't do this to me */ -+ EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32); -+ EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN == -+ BUF_OWNER_ID_HBUF_EVEN_LBN + 32); -+ -+ EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_WIDTH == -+ BUF_OWNER_ID_HBUF_EVEN_WIDTH); -+ EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_WIDTH == BUF_ADR_HBUF_EVEN_WIDTH); -+ -+ __DWCHCK(BUF_ADR_HBUF_EVEN_LBN, BUF_ADR_HBUF_EVEN_WIDTH); -+ __DWCHCK(BUF_OWNER_ID_HBUF_EVEN_LBN, BUF_OWNER_ID_HBUF_EVEN_WIDTH); -+ -+ __RANGECHCK(dma_addr32, BUF_ADR_HBUF_EVEN_WIDTH); -+ __RANGECHCK(own_id, BUF_OWNER_ID_HBUF_EVEN_WIDTH); -+ -+ return (dma_addr32 << BUF_ADR_HBUF_EVEN_LBN) | -+ (own_id << BUF_OWNER_ID_HBUF_EVEN_LBN); -+} -+ -+static inline uint64_t -+falcon_nic_buffer_table_entry64_mk(dma_addr_t dma_addr, -+ int bufsz, /* bytes */ -+ int region, int own_id) -+{ -+ __DW2CHCK(IP_DAT_BUF_SIZE_LBN, IP_DAT_BUF_SIZE_WIDTH); -+ __DW2CHCK(BUF_ADR_REGION_LBN, BUF_ADR_REGION_WIDTH); -+ __LWCHK(BUF_ADR_FBUF_LBN, BUF_ADR_FBUF_WIDTH); -+ __DWCHCK(BUF_OWNER_ID_FBUF_LBN, BUF_OWNER_ID_FBUF_WIDTH); -+ -+ EFHW_ASSERT((bufsz == EFHW_4K) || (bufsz == EFHW_8K)); -+ -+ dma_addr = (dma_addr >> 12) & __FALCON_MASK64(BUF_ADR_FBUF_WIDTH); -+ -+ __RANGECHCK(dma_addr, BUF_ADR_FBUF_WIDTH); -+ __RANGECHCK(1, IP_DAT_BUF_SIZE_WIDTH); -+ __RANGECHCK(region, BUF_ADR_REGION_WIDTH); -+ __RANGECHCK(own_id, BUF_OWNER_ID_FBUF_WIDTH); -+ -+ return ((uint64_t) (bufsz == EFHW_8K) << IP_DAT_BUF_SIZE_LBN) | -+ ((uint64_t) region << BUF_ADR_REGION_LBN) | -+ ((uint64_t) dma_addr << BUF_ADR_FBUF_LBN) | -+ ((uint64_t) own_id << BUF_OWNER_ID_FBUF_LBN); -+} -+ -+static inline void -+_falcon_nic_buffer_table_set32(struct efhw_nic *nic, -+ dma_addr_t dma_addr, uint bufsz, -+ uint region, /* not used */ -+ int own_id, int buffer_id) -+{ -+ /* programming the half table needs to be done in pairs. */ -+ uint64_t entry, val, shift; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ volatile char __iomem *offset; -+ -+ EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32); -+ EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN == -+ BUF_OWNER_ID_HBUF_EVEN_LBN + 32); -+ -+ shift = (buffer_id & 1) ? 32 : 0; -+ -+ offset = (efhw_kva + BUF_HALF_TBL_OFST + -+ ((buffer_id & ~1) * FALCON_BUFFER_TBL_HALF_BYTES)); -+ -+ entry = falcon_nic_buffer_table_entry32_mk(dma_addr_to_u32(dma_addr), -+ own_id); -+ -+#if FALCON_USE_SHADOW_BUFFER_TABLE -+ val = _falcon_buffer_table[buffer_id & ~1]; -+#else -+ /* This will not work unless we've completed -+ * the buffer table updates */ -+ falcon_read_q(offset, &val); -+#endif -+ val &= ~(((uint64_t) 0xffffffff) << shift); -+ val |= (entry << shift); -+ -+ EFHW_TRACE("%s[%x]: %lx:%x:%" PRIx64 "->%x = %" -+ PRIx64, __func__, buffer_id, (unsigned long) dma_addr, -+ own_id, entry, (unsigned)(offset - efhw_kva), val); -+ -+ /* Falcon requires that access to this register is serialised */ -+ falcon_write_q(offset, val); -+ -+ /* NB. No mmiowb(). Caller should do that e.g by calling commit */ -+ -+#if FALCON_USE_SHADOW_BUFFER_TABLE -+ _falcon_buffer_table[buffer_id & ~1] = val; -+#endif -+ -+ /* Confirm the entry if the event queues haven't been set up. */ -+ if (!nic->irq_handler) { -+ uint64_t new_val; -+ int count = 0; -+ while (1) { -+ mmiowb(); -+ falcon_read_q(offset, &new_val); -+ if (new_val == val) -+ break; -+ count++; -+ if (count > 1000) { -+ EFHW_WARN("%s: poll Timeout", __func__); -+ break; -+ } -+ udelay(1); -+ } -+ } -+} -+ -+static inline void -+_falcon_nic_buffer_table_set64(struct efhw_nic *nic, -+ dma_addr_t dma_addr, uint bufsz, -+ uint region, int own_id, int buffer_id) -+{ -+ volatile char __iomem *offset; -+ uint64_t entry; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ EFHW_ASSERT(region < FALCON_REGION_NUM); -+ -+ EFHW_ASSERT((bufsz == EFHW_4K) || -+ (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE)); -+ -+ offset = (efhw_kva + BUF_FULL_TBL_OFST + -+ (buffer_id * FALCON_BUFFER_TBL_FULL_BYTES)); -+ -+ entry = falcon_nic_buffer_table_entry64_mk(dma_addr, bufsz, region, -+ own_id); -+ -+ EFHW_TRACE("%s[%x]: %lx:bufsz=%x:region=%x:ownid=%x", -+ __func__, buffer_id, (unsigned long) dma_addr, bufsz, -+ region, own_id); -+ -+ EFHW_TRACE("%s: BUF[%x]:NIC[%x]->%" PRIx64, -+ __func__, buffer_id, -+ (unsigned int)(offset - efhw_kva), entry); -+ -+ /* Falcon requires that access to this register is serialised */ -+ falcon_write_q(offset, entry); -+ -+ /* NB. No mmiowb(). Caller should do that e.g by calling commit */ -+ -+ /* Confirm the entry if the event queues haven't been set up. */ -+ if (!nic->irq_handler) { -+ uint64_t new_entry; -+ int count = 0; -+ while (1) { -+ mmiowb(); -+ falcon_read_q(offset, &new_entry); -+ if (new_entry == entry) -+ return; -+ count++; -+ if (count > 1000) { -+ EFHW_WARN("%s: poll Timeout waiting for " -+ "value %"PRIx64 -+ " (last was %"PRIx64")", -+ __func__, entry, new_entry); -+ break; -+ } -+ udelay(1); -+ } -+ } -+} -+ -+#if FALCON_BUFFER_TABLE_FULL_MODE -+#define _falcon_nic_buffer_table_set _falcon_nic_buffer_table_set64 -+#else -+#define _falcon_nic_buffer_table_set _falcon_nic_buffer_table_set32 -+#endif -+ -+static inline void _falcon_nic_buffer_table_commit(struct efhw_nic *nic) -+{ -+ /* MUST be called holding the FALCON_LOCK */ -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ uint64_t cmd; -+ -+ EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST); -+ -+ __DW2CHCK(BUF_UPD_CMD_LBN, BUF_UPD_CMD_WIDTH); -+ __RANGECHCK(1, BUF_UPD_CMD_WIDTH); -+ -+ cmd = ((uint64_t) 1 << BUF_UPD_CMD_LBN); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ falcon_write_qq(efhw_kva + BUF_TBL_UPD_REG_OFST, -+ cmd, FALCON_ATOMIC_UPD_REG); -+ mmiowb(); -+ -+ nic->buf_commit_outstanding++; -+ EFHW_TRACE("COMMIT REQ out=%d", nic->buf_commit_outstanding); -+} -+ -+static void falcon_nic_buffer_table_commit(struct efhw_nic *nic) -+{ -+ /* nothing to do */ -+} -+ -+static inline void -+_falcon_nic_buffer_table_clear(struct efhw_nic *nic, int buffer_id, int num) -+{ -+ uint64_t cmd; -+ uint64_t start_id = buffer_id; -+ uint64_t end_id = buffer_id + num - 1; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ volatile char __iomem *offset = (efhw_kva + BUF_TBL_UPD_REG_OFST); -+ -+ EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST); -+ -+#if !FALCON_BUFFER_TABLE_FULL_MODE -+ /* buffer_ids in half buffer mode reference pairs of buffers */ -+ EFHW_ASSERT(buffer_id % 1 == 0); -+ EFHW_ASSERT(num % 1 == 0); -+ start_id = start_id >> 1; -+ end_id = end_id >> 1; -+#endif -+ -+ EFHW_ASSERT(num >= 1); -+ -+ __DWCHCK(BUF_CLR_START_ID_LBN, BUF_CLR_START_ID_WIDTH); -+ __DW2CHCK(BUF_CLR_END_ID_LBN, BUF_CLR_END_ID_WIDTH); -+ -+ __DW2CHCK(BUF_CLR_CMD_LBN, BUF_CLR_CMD_WIDTH); -+ __RANGECHCK(1, BUF_CLR_CMD_WIDTH); -+ -+ __RANGECHCK(start_id, BUF_CLR_START_ID_WIDTH); -+ __RANGECHCK(end_id, BUF_CLR_END_ID_WIDTH); -+ -+ cmd = (((uint64_t) 1 << BUF_CLR_CMD_LBN) | -+ (start_id << BUF_CLR_START_ID_LBN) | -+ (end_id << BUF_CLR_END_ID_LBN)); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ falcon_write_qq(offset, cmd, FALCON_ATOMIC_UPD_REG); -+ mmiowb(); -+ -+ nic->buf_commit_outstanding++; -+ EFHW_TRACE("COMMIT CLEAR out=%d", nic->buf_commit_outstanding); -+} -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Events low-level register interface -+ * -+ *---------------------------------------------------------------------------*/ -+ -+static unsigned eventq_sizes[] = { -+ 512, -+ EFHW_1K, -+ EFHW_2K, -+ EFHW_4K, -+ EFHW_8K, -+ EFHW_16K, -+ EFHW_32K -+}; -+ -+#define N_EVENTQ_SIZES (sizeof(eventq_sizes) / sizeof(eventq_sizes[0])) -+ -+static inline void falcon_nic_srm_upd_evq(struct efhw_nic *nic, int evq) -+{ -+ /* set up the eventq which will receive events from the SRAM module. -+ * i.e buffer table updates and clears, TX and RX aperture table -+ * updates */ -+ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ EFHW_BUILD_ASSERT(SRM_UPD_EVQ_REG_OFST == SRM_UPD_EVQ_REG_KER_OFST); -+ -+ __DWCHCK(SRM_UPD_EVQ_ID_LBN, SRM_UPD_EVQ_ID_WIDTH); -+ __RANGECHCK(evq, SRM_UPD_EVQ_ID_WIDTH); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + SRM_UPD_EVQ_REG_OFST, -+ ((uint64_t) evq << SRM_UPD_EVQ_ID_LBN), -+ FALCON_ATOMIC_SRPM_UDP_EVQ_REG); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+static void -+falcon_nic_evq_ptr_tbl(struct efhw_nic *nic, -+ uint evq, /* evq id */ -+ uint enable, /* 1 to enable, 0 to disable */ -+ uint buf_base_id,/* Buffer table base for EVQ */ -+ uint evq_size /* Number of events */) -+{ -+ FALCON_LOCK_DECL; -+ uint i, val; -+ ulong offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ /* size must be one of the various options, otherwise we assert */ -+ for (i = 0; i < N_EVENTQ_SIZES; i++) { -+ if (evq_size <= eventq_sizes[i]) -+ break; -+ } -+ EFHW_ASSERT(i < N_EVENTQ_SIZES); -+ -+ __DWCHCK(EVQ_BUF_BASE_ID_LBN, EVQ_BUF_BASE_ID_WIDTH); -+ __DWCHCK(EVQ_SIZE_LBN, EVQ_SIZE_WIDTH); -+ __DWCHCK(EVQ_EN_LBN, EVQ_EN_WIDTH); -+ -+ __RANGECHCK(i, EVQ_SIZE_WIDTH); -+ __RANGECHCK(buf_base_id, EVQ_BUF_BASE_ID_WIDTH); -+ __RANGECHCK(1, EVQ_EN_WIDTH); -+ -+ /* if !enable then only evq needs to be correct, although valid -+ * values need to be passed in for other arguments to prevent -+ * assertions */ -+ -+ val = ((i << EVQ_SIZE_LBN) | (buf_base_id << EVQ_BUF_BASE_ID_LBN) | -+ (enable ? (1 << EVQ_EN_LBN) : 0)); -+ -+ EFHW_ASSERT(evq < nic->num_evqs); -+ -+ offset = EVQ_PTR_TBL_CHAR_OFST; -+ offset += evq * FALCON_REGISTER128; -+ -+ EFHW_TRACE("%s: evq %u en=%x:buf=%x:size=%x->%x at %lx", -+ __func__, evq, enable, buf_base_id, evq_size, val, -+ offset); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, val, FALCON_ATOMIC_PTR_TBL_REG); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ -+ /* caller must wait for an update done event before writing any more -+ table entries */ -+ -+ return; -+} -+ -+void -+falcon_nic_evq_ack(struct efhw_nic *nic, -+ uint evq, /* evq id */ -+ uint rptr, /* new read pointer update */ -+ bool wakeup /* request a wakeup event if ptr's != */ -+ ) -+{ -+ uint val; -+ ulong offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4); -+ -+ __DWCHCK(EVQ_RPTR_LBN, EVQ_RPTR_WIDTH); -+ __RANGECHCK(rptr, EVQ_RPTR_WIDTH); -+ -+ val = (rptr << EVQ_RPTR_LBN); -+ -+ EFHW_ASSERT(evq < nic->num_evqs); -+ -+ if (evq < FALCON_EVQ_CHAR) { -+ offset = EVQ_RPTR_REG_KER_OFST; -+ offset += evq * FALCON_REGISTER128; -+ -+ EFHW_ASSERT(!wakeup); /* don't try this at home */ -+ } else { -+ offset = EVQ_RPTR_REG_OFST + (FALCON_EVQ_CHAR * -+ FALCON_REGISTER128); -+ offset += (evq - FALCON_EVQ_CHAR) * FALCON_REGISTER128; -+ -+ /* nothing to do for interruptless event queues which do -+ * not want a wakeup */ -+ if (evq != FALCON_EVQ_CHAR && !wakeup) -+ return; -+ } -+ -+ EFHW_TRACE("%s: %x %x %x->%x", __func__, evq, rptr, wakeup, val); -+ -+ writel(val, efhw_kva + offset); -+ mmiowb(); -+} -+ -+/*---------------------------------------------------------------------------*/ -+ -+static inline void -+falcon_drv_ev(struct efhw_nic *nic, uint64_t data, uint qid) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ /* send an event from one driver to the other */ -+ EFHW_BUILD_ASSERT(DRV_EV_REG_KER_OFST == DRV_EV_REG_OFST); -+ EFHW_BUILD_ASSERT(DRV_EV_DATA_LBN == 0); -+ EFHW_BUILD_ASSERT(DRV_EV_DATA_WIDTH == 64); -+ EFHW_BUILD_ASSERT(DRV_EV_QID_LBN == 64); -+ EFHW_BUILD_ASSERT(DRV_EV_QID_WIDTH == 12); -+ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + DRV_EV_REG_OFST, data, qid); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+_DEBUG_SYM_ void -+falcon_ab_timer_tbl_set(struct efhw_nic *nic, -+ uint evq, /* timer id */ -+ uint mode, /* mode bits */ -+ uint countdown /* counting value to set */) -+{ -+ FALCON_LOCK_DECL; -+ uint val; -+ ulong offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ EFHW_BUILD_ASSERT(TIMER_VAL_LBN == 0); -+ -+ __DWCHCK(TIMER_MODE_LBN, TIMER_MODE_WIDTH); -+ __DWCHCK(TIMER_VAL_LBN, TIMER_VAL_WIDTH); -+ -+ __RANGECHCK(mode, TIMER_MODE_WIDTH); -+ __RANGECHCK(countdown, TIMER_VAL_WIDTH); -+ -+ val = ((mode << TIMER_MODE_LBN) | (countdown << TIMER_VAL_LBN)); -+ -+ if (evq < FALCON_EVQ_CHAR) { -+ offset = TIMER_CMD_REG_KER_OFST; -+ offset += evq * EFHW_8K; /* PAGE mapped register */ -+ } else { -+ offset = TIMER_TBL_OFST; -+ offset += evq * FALCON_REGISTER128; -+ } -+ EFHW_ASSERT(evq < nic->num_evqs); -+ -+ EFHW_TRACE("%s: evq %u mode %x (%s) time %x -> %08x", -+ __func__, evq, mode, -+ mode == 0 ? "DISABLE" : -+ mode == 1 ? "IMMED" : -+ mode == 2 ? (evq < 5 ? "HOLDOFF" : "RX_TRIG") : -+ "", countdown, val); -+ -+ /* Falcon requires 128 bit atomic access for this register when -+ * accessed from the driver. User access to timers is paged mapped -+ */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, val, FALCON_ATOMIC_TIMER_CMD_REG); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ return; -+} -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * Rate pacing - Low level interface -+ * -+ *--------------------------------------------------------------------*/ -+void falcon_nic_pace(struct efhw_nic *nic, uint dmaq, uint pace) -+{ -+ /* Pace specified in 2^(units of microseconds). This is the minimum -+ additional delay imposed over and above the IPG. -+ -+ Pacing only available on the virtual interfaces -+ */ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ ulong offset; -+ -+ if (pace > 20) -+ pace = 20; /* maxm supported value */ -+ -+ __DWCHCK(TX_PACE_LBN, TX_PACE_WIDTH); -+ __RANGECHCK(pace, TX_PACE_WIDTH); -+ -+ switch (nic->devtype.variant) { -+ case 'A': -+ EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_A1); -+ offset = TX_PACE_TBL_A1_OFST; -+ offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_A1) * 16; -+ break; -+ case 'B': -+ /* Would be nice to assert this, but as dmaq is unsigned and -+ * TX_PACE_TBL_FIRST_QUEUE_B0 is 0, it makes no sense -+ * EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_B0); -+ */ -+ offset = TX_PACE_TBL_B0_OFST; -+ offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_B0) * 16; -+ break; -+ default: -+ EFHW_ASSERT(0); -+ offset = 0; -+ break; -+ } -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, pace, FALCON_ATOMIC_PACE_REG); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ -+ EFHW_TRACE("%s: txq %d offset=%lx pace=2^%x", -+ __func__, dmaq, offset, pace); -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * Interrupt - Low level interface -+ * -+ *--------------------------------------------------------------------*/ -+ -+static void falcon_nic_handle_fatal_int(struct efhw_nic *nic) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ uint64_t val; -+ -+ offset = (efhw_kva + FATAL_INTR_REG_OFST); -+ -+ /* Falcon requires 32 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ val = readl(offset); -+ FALCON_LOCK_UNLOCK(nic); -+ -+ /* ?? BUG3249 - need to disable illegal address interrupt */ -+ /* ?? BUG3114 - need to backport interrupt storm protection code */ -+ EFHW_ERR("fatal interrupt: %s%s%s%s%s%s%s%s%s%s%s%s[%" PRIx64 "]", -+ val & (1 << PCI_BUSERR_INT_CHAR_LBN) ? "PCI-bus-error " : "", -+ val & (1 << SRAM_OOB_INT_CHAR_LBN) ? "SRAM-oob " : "", -+ val & (1 << BUFID_OOB_INT_CHAR_LBN) ? "bufid-oob " : "", -+ val & (1 << MEM_PERR_INT_CHAR_LBN) ? "int-parity " : "", -+ val & (1 << RBUF_OWN_INT_CHAR_LBN) ? "rx-bufid-own " : "", -+ val & (1 << TBUF_OWN_INT_CHAR_LBN) ? "tx-bufid-own " : "", -+ val & (1 << RDESCQ_OWN_INT_CHAR_LBN) ? "rx-desc-own " : "", -+ val & (1 << TDESCQ_OWN_INT_CHAR_LBN) ? "tx-desc-own " : "", -+ val & (1 << EVQ_OWN_INT_CHAR_LBN) ? "evq-own " : "", -+ val & (1 << EVFF_OFLO_INT_CHAR_LBN) ? "evq-fifo " : "", -+ val & (1 << ILL_ADR_INT_CHAR_LBN) ? "ill-addr " : "", -+ val & (1 << SRM_PERR_INT_CHAR_LBN) ? "sram-parity " : "", val); -+} -+ -+static void falcon_nic_interrupt_hw_enable(struct efhw_nic *nic) -+{ -+ FALCON_LOCK_DECL; -+ uint val; -+ volatile char __iomem *offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_WIDTH == 1); -+ -+ if (nic->flags & NIC_FLAG_NO_INTERRUPT) -+ return; -+ -+ offset = (efhw_kva + INT_EN_REG_CHAR_OFST); -+ val = 1 << DRV_INT_EN_CHAR_LBN; -+ -+ EFHW_NOTICE("%s: %x -> %x", __func__, (int)(offset - efhw_kva), -+ val); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(offset, val, FALCON_ATOMIC_INT_EN_REG); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+static void falcon_nic_interrupt_hw_disable(struct efhw_nic *nic) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_WIDTH == 1); -+ EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == 0); -+ EFHW_BUILD_ASSERT(SRAM_PERR_INT_CHAR_WIDTH == 1); -+ EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_LBN == 0); -+ EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_LBN == SRAM_PERR_INT_CHAR_LBN); -+ EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == DRV_INT_EN_CHAR_LBN); -+ -+ if (nic->flags & NIC_FLAG_NO_INTERRUPT) -+ return; -+ -+ offset = (efhw_kva + INT_EN_REG_CHAR_OFST); -+ -+ EFHW_NOTICE("%s: %x -> 0", __func__, (int)(offset - efhw_kva)); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(offset, 0, FALCON_ATOMIC_INT_EN_REG); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+static void falcon_nic_irq_addr_set(struct efhw_nic *nic, dma_addr_t dma_addr) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *offset; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ -+ offset = (efhw_kva + INT_ADR_REG_CHAR_OFST); -+ -+ EFHW_NOTICE("%s: %x -> " DMA_ADDR_T_FMT, __func__, -+ (int)(offset - efhw_kva), dma_addr); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(offset, dma_addr, FALCON_ATOMIC_INT_ADR_REG); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * RXDP - low level interface -+ * -+ *--------------------------------------------------------------------*/ -+ -+void -+falcon_nic_set_rx_usr_buf_size(struct efhw_nic *nic, int usr_buf_bytes) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ uint64_t val, val2, usr_buf_size = usr_buf_bytes / 32; -+ int rubs_lbn, rubs_width, roec_lbn; -+ -+ EFHW_BUILD_ASSERT(RX_CFG_REG_OFST == RX_CFG_REG_KER_OFST); -+ -+ switch (nic->devtype.variant) { -+ default: -+ EFHW_ASSERT(0); -+ /* Fall-through to avoid compiler warnings. */ -+ case 'A': -+ rubs_lbn = RX_USR_BUF_SIZE_A1_LBN; -+ rubs_width = RX_USR_BUF_SIZE_A1_WIDTH; -+ roec_lbn = RX_OWNERR_CTL_A1_LBN; -+ break; -+ case 'B': -+ rubs_lbn = RX_USR_BUF_SIZE_B0_LBN; -+ rubs_width = RX_USR_BUF_SIZE_B0_WIDTH; -+ roec_lbn = RX_OWNERR_CTL_B0_LBN; -+ break; -+ } -+ -+ __DWCHCK(rubs_lbn, rubs_width); -+ __QWCHCK(roec_lbn, 1); -+ __RANGECHCK(usr_buf_size, rubs_width); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_qq(efhw_kva + RX_CFG_REG_OFST, &val, &val2); -+ -+ val &= ~((__FALCON_MASK64(rubs_width)) << rubs_lbn); -+ val |= (usr_buf_size << rubs_lbn); -+ -+ /* shouldn't be needed for a production driver */ -+ val |= ((uint64_t) 1 << roec_lbn); -+ -+ falcon_write_qq(efhw_kva + RX_CFG_REG_OFST, val, val2); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+EXPORT_SYMBOL(falcon_nic_set_rx_usr_buf_size); -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * TXDP - low level interface -+ * -+ *--------------------------------------------------------------------*/ -+ -+_DEBUG_SYM_ void falcon_nic_tx_cfg(struct efhw_nic *nic, int unlocked) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ uint64_t val1, val2; -+ -+ EFHW_BUILD_ASSERT(TX_CFG_REG_OFST == TX_CFG_REG_KER_OFST); -+ __DWCHCK(TX_OWNERR_CTL_LBN, TX_OWNERR_CTL_WIDTH); -+ __DWCHCK(TX_NON_IP_DROP_DIS_LBN, TX_NON_IP_DROP_DIS_WIDTH); -+ -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_qq(efhw_kva + TX_CFG_REG_OFST, &val1, &val2); -+ -+ /* Will flag fatal interrupts on owner id errors. This should not be -+ on for production code because there is otherwise a denial of -+ serivce attack possible */ -+ val1 |= (1 << TX_OWNERR_CTL_LBN); -+ -+ /* Setup user queue TCP/UDP only packet security */ -+ if (unlocked) -+ val1 |= (1 << TX_NON_IP_DROP_DIS_LBN); -+ else -+ val1 &= ~(1 << TX_NON_IP_DROP_DIS_LBN); -+ -+ falcon_write_qq(efhw_kva + TX_CFG_REG_OFST, val1, val2); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * Random thresholds - Low level interface (Would like these to be op -+ * defaults wherever possible) -+ * -+ *--------------------------------------------------------------------*/ -+ -+void falcon_nic_pace_cfg(struct efhw_nic *nic, int fb_base, int bin_thresh) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ unsigned offset = 0; -+ uint64_t val; -+ -+ __DWCHCK(TX_PACE_FB_BASE_LBN, TX_PACE_FB_BASE_WIDTH); -+ __DWCHCK(TX_PACE_BIN_TH_LBN, TX_PACE_BIN_TH_WIDTH); -+ -+ switch (nic->devtype.variant) { -+ case 'A': offset = TX_PACE_REG_A1_OFST; break; -+ case 'B': offset = TX_PACE_REG_B0_OFST; break; -+ default: EFHW_ASSERT(0); break; -+ } -+ -+ val = (0x15 << TX_PACE_SB_NOTAF_LBN); -+ val |= (0xb << TX_PACE_SB_AF_LBN); -+ -+ val |= ((fb_base & __FALCON_MASK64(TX_PACE_FB_BASE_WIDTH)) << -+ TX_PACE_FB_BASE_LBN); -+ val |= ((bin_thresh & __FALCON_MASK64(TX_PACE_BIN_TH_WIDTH)) << -+ TX_PACE_BIN_TH_LBN); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + offset, val, 0); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+ -+/********************************************************************** -+ * Implementation of the HAL. ******************************************** -+ **********************************************************************/ -+ -+/*---------------------------------------------------------------------------- -+ * -+ * Initialisation and configuration discovery -+ * -+ *---------------------------------------------------------------------------*/ -+ -+static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable) -+{ -+ /* create a buffer for the irq channel */ -+ int rc; -+ -+ if (enable) { -+ rc = efhw_iopage_alloc(nic, &nic->irq_iobuff); -+ if (rc < 0) -+ return rc; -+ -+ falcon_nic_irq_addr_set(nic, -+ efhw_iopage_dma_addr(&nic->irq_iobuff)); -+ } else { -+ if (efhw_iopage_is_valid(&nic->irq_iobuff)) -+ efhw_iopage_free(nic, &nic->irq_iobuff); -+ -+ efhw_iopage_mark_invalid(&nic->irq_iobuff); -+ falcon_nic_irq_addr_set(nic, 0); -+ } -+ -+ EFHW_TRACE("%s: %lx %sable", __func__, -+ (unsigned long) efhw_iopage_dma_addr(&nic->irq_iobuff), -+ enable ? "en" : "dis"); -+ -+ return 0; -+} -+ -+static void falcon_nic_close_hardware(struct efhw_nic *nic) -+{ -+ /* check we are in possession of some hardware */ -+ if (!efhw_nic_have_hw(nic)) -+ return; -+ -+ falcon_nic_init_irq_channel(nic, 0); -+ falcon_nic_filter_dtor(nic); -+ -+ EFHW_NOTICE("%s:", __func__); -+} -+ -+static int -+falcon_nic_init_hardware(struct efhw_nic *nic, -+ struct efhw_ev_handler *ev_handlers, -+ const uint8_t *mac_addr, int non_irq_evq) -+{ -+ int rc; -+ -+ /* header sanity checks */ -+ FALCON_ASSERT_VALID(); -+ -+ /* Initialise supporting modules */ -+ rc = falcon_nic_filter_ctor(nic); -+ if (rc < 0) -+ return rc; -+ -+#if FALCON_USE_SHADOW_BUFFER_TABLE -+ CI_ZERO_ARRAY(_falcon_buffer_table, FALCON_BUFFER_TBL_NUM); -+#endif -+ -+ /* Initialise the top level hardware blocks */ -+ memcpy(nic->mac_addr, mac_addr, ETH_ALEN); -+ -+ EFHW_TRACE("%s:", __func__); -+ -+ /* nic.c:efhw_nic_init marks all the interrupt units as unused. -+ -+ ?? TODO we should be able to request the non-interrupting event -+ queue and the net driver's (for a net driver that is using libefhw) -+ additional RSS queues here. -+ -+ Result would be that that net driver could call -+ nic.c:efhw_nic_allocate_common_hardware_resources() and that the -+ IFDEF FALCON's can be removed from -+ nic.c:efhw_nic_allocate_common_hardware_resources() -+ */ -+ nic->irq_unit = INT_EN_REG_CHAR_OFST; -+ -+ /***************************************************************** -+ * The rest of this function deals with initialization of the NICs -+ * hardware (as opposed to the initialization of the -+ * struct efhw_nic data structure */ -+ -+ /* char driver grabs SRM events onto the non interrupting -+ * event queue */ -+ falcon_nic_srm_upd_evq(nic, non_irq_evq); -+ -+ /* RXDP tweaks */ -+ -+ /* ?? bug2396 rx_cfg should be ok so long as the net driver -+ * always pushes buffers big enough for the link MTU */ -+ -+ /* set the RX buffer cutoff size to be the same as PAGE_SIZE. -+ * Use this value when we think that there will be a lot of -+ * jumbo frames. -+ * -+ * The default value 1600 is useful when packets are small, -+ * but would means that jumbo frame RX queues would need more -+ * descriptors pushing */ -+ falcon_nic_set_rx_usr_buf_size(nic, FALCON_RX_USR_BUF_SIZE); -+ -+ /* TXDP tweaks */ -+ /* ?? bug2396 looks ok */ -+ falcon_nic_tx_cfg(nic, /*unlocked(for non-UDP/TCP)= */ 0); -+ falcon_nic_pace_cfg(nic, 4, 2); -+ -+ /* ?? bug2396 -+ * netdriver must load first or else must RMW this register */ -+ falcon_nic_rx_filter_ctl_set(nic, RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL, -+ RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD, -+ RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL, -+ RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD); -+ -+ if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) { -+ rc = efhw_keventq_ctor(nic, FALCON_EVQ_CHAR, -+ &nic->interrupting_evq, ev_handlers); -+ if (rc < 0) { -+ EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d", -+ __func__, rc, FALCON_EVQ_CHAR); -+ return rc; -+ } -+ } -+ rc = efhw_keventq_ctor(nic, non_irq_evq, -+ &nic->non_interrupting_evq, NULL); -+ if (rc < 0) { -+ EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d", -+ __func__, rc, non_irq_evq); -+ return rc; -+ } -+ -+ /* allocate IRQ channel */ -+ rc = falcon_nic_init_irq_channel(nic, 1); -+ /* ignore failure at user-level for eftest */ -+ if ((rc < 0) && !(nic->options & NIC_OPT_EFTEST)) -+ return rc; -+ -+ return 0; -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * Interrupt -+ * -+ *--------------------------------------------------------------------*/ -+ -+static void -+falcon_nic_interrupt_enable(struct efhw_nic *nic) -+{ -+ struct efhw_keventq *q; -+ unsigned rdptr; -+ -+ if (nic->flags & NIC_FLAG_NO_INTERRUPT) -+ return; -+ -+ /* Enable driver interrupts */ -+ EFHW_NOTICE("%s: enable master interrupt", __func__); -+ falcon_nic_interrupt_hw_enable(nic); -+ -+ /* An interrupting eventq must start of day ack its read pointer */ -+ q = &nic->interrupting_evq; -+ rdptr = EFHW_EVENT_OFFSET(q, q, 1) / sizeof(efhw_event_t); -+ falcon_nic_evq_ack(nic, FALCON_EVQ_CHAR, rdptr, false); -+ EFHW_NOTICE("%s: ACK evq[%d]:%x", __func__, -+ FALCON_EVQ_CHAR, rdptr); -+} -+ -+static void falcon_nic_interrupt_disable(struct efhw_nic *nic) -+{ -+ /* NB. No need to check for NIC_FLAG_NO_INTERRUPT, as -+ ** falcon_nic_interrupt_hw_disable() will do it. */ -+ falcon_nic_interrupt_hw_disable(nic); -+} -+ -+static void -+falcon_nic_set_interrupt_moderation(struct efhw_nic *nic, int evq, -+ uint32_t val) -+{ -+ if (evq < 0) -+ evq = FALCON_EVQ_CHAR; -+ -+ falcon_ab_timer_tbl_set(nic, evq, TIMER_MODE_INT_HLDOFF, val / 5); -+} -+ -+static inline void legacy_irq_ack(struct efhw_nic *nic) -+{ -+ EFHW_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); -+ -+ if (!(nic->flags & NIC_FLAG_MSI)) { -+ writel(1, EFHW_KVA(nic) + INT_ACK_REG_CHAR_A1_OFST); -+ mmiowb(); -+ /* ?? FIXME: We should be doing a read here to ensure IRQ is -+ * thoroughly acked before we return from ISR. */ -+ } -+} -+ -+static int falcon_nic_interrupt(struct efhw_nic *nic) -+{ -+ uint32_t *syserr_ptr = -+ (uint32_t *) efhw_iopage_ptr(&nic->irq_iobuff); -+ int handled = 0; -+ int done_ack = 0; -+ -+ EFHW_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); -+ EFHW_ASSERT(syserr_ptr); -+ -+ /* FIFO fill level interrupt - just log it. */ -+ if (unlikely(*(syserr_ptr + (DW0_OFST / 4)))) { -+ EFHW_WARN("%s: *** FIFO *** %x", __func__, -+ *(syserr_ptr + (DW0_OFST / 4))); -+ *(syserr_ptr + (DW0_OFST / 4)) = 0; -+ handled++; -+ } -+ -+ /* Fatal interrupts. */ -+ if (unlikely(*(syserr_ptr + (DW2_OFST / 4)))) { -+ *(syserr_ptr + (DW2_OFST / 4)) = 0; -+ falcon_nic_handle_fatal_int(nic); -+ handled++; -+ } -+ -+ /* Event queue interrupt. For legacy interrupts we have to check -+ * that the interrupt is for us, because it could be shared. */ -+ if (*(syserr_ptr + (DW1_OFST / 4))) { -+ *(syserr_ptr + (DW1_OFST / 4)) = 0; -+ /* ACK must come before callback to handler fn. */ -+ legacy_irq_ack(nic); -+ done_ack = 1; -+ handled++; -+ if (nic->irq_handler) -+ nic->irq_handler(nic, 0); -+ } -+ -+ if (unlikely(!done_ack)) { -+ if (!handled) -+ /* Shared interrupt line (hopefully). */ -+ return 0; -+ legacy_irq_ack(nic); -+ } -+ -+ EFHW_TRACE("%s: handled %d", __func__, handled); -+ return 1; -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * Event Management - and SW event posting -+ * -+ *--------------------------------------------------------------------*/ -+ -+static void -+falcon_nic_event_queue_enable(struct efhw_nic *nic, uint evq, uint evq_size, -+ dma_addr_t q_base_addr, /* not used */ -+ uint buf_base_id, int interrupting) -+{ -+ EFHW_ASSERT(nic); -+ -+ /* Whether or not queue has an interrupt depends on -+ * instance number and h/w variant, so [interrupting] is -+ * ignored. -+ */ -+ falcon_ab_timer_tbl_set(nic, evq, 0/*disable*/, 0); -+ -+ falcon_nic_evq_ptr_tbl(nic, evq, 1, buf_base_id, evq_size); -+ EFHW_TRACE("%s: enable evq %u size %u", __func__, evq, evq_size); -+} -+ -+static void -+falcon_nic_event_queue_disable(struct efhw_nic *nic, uint evq, int timer_only) -+{ -+ EFHW_ASSERT(nic); -+ -+ falcon_ab_timer_tbl_set(nic, evq, 0 /* disable */ , 0); -+ -+ if (!timer_only) -+ falcon_nic_evq_ptr_tbl(nic, evq, 0, 0, 0); -+ EFHW_TRACE("%s: disenable evq %u", __func__, evq); -+} -+ -+static void -+falcon_nic_wakeup_request(struct efhw_nic *nic, dma_addr_t q_base_addr, -+ int next_i, int evq) -+{ -+ EFHW_ASSERT(evq > FALCON_EVQ_CHAR); -+ falcon_nic_evq_ack(nic, evq, next_i, true); -+ EFHW_TRACE("%s: evq %d next_i %d", __func__, evq, next_i); -+} -+ -+static void falcon_nic_sw_event(struct efhw_nic *nic, int data, int evq) -+{ -+ uint64_t ev_data = data; -+ -+ ev_data &= ~FALCON_EVENT_CODE_MASK; -+ ev_data |= FALCON_EVENT_CODE_SW; -+ -+ falcon_drv_ev(nic, ev_data, evq); -+ EFHW_NOTICE("%s: evq[%d]->%x", __func__, evq, data); -+} -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * Buffer table - helpers -+ * -+ *--------------------------------------------------------------------*/ -+ -+#define FALCON_LAZY_COMMIT_HWM (FALCON_BUFFER_UPD_MAX - 16) -+ -+/* Note re.: -+ * falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic) -+ * falcon_nic_buffer_table_update_poll(struct efhw_nic *nic) -+ * falcon_nic_buffer_table_confirm(struct efhw_nic *nic) -+ * -- these are no-ops in the user-level driver because it would need to -+ * coordinate with the real driver on the number of outstanding commits. -+ * -+ * An exception is made for eftest apps, which manage the hardware without -+ * using the char driver. -+ */ -+ -+static inline void falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic) -+{ -+ /* Do nothing if operating in synchronous mode. */ -+ if (!nic->irq_handler) -+ return; -+} -+ -+static inline void falcon_nic_buffer_table_update_poll(struct efhw_nic *nic) -+{ -+ FALCON_LOCK_DECL; -+ int count = 0, rc = 0; -+ -+ /* We can be called here early days */ -+ if (!nic->irq_handler) -+ return; -+ -+ /* If we need to gather buffer update events then poll the -+ non-interrupting event queue */ -+ -+ /* For each _buffer_table_commit there will be an update done -+ event. We don't keep track of how many buffers each commit has -+ committed, just make sure that all the expected events have been -+ gathered */ -+ FALCON_LOCK_LOCK(nic); -+ -+ EFHW_TRACE("%s: %d", __func__, nic->buf_commit_outstanding); -+ -+ while (nic->buf_commit_outstanding > 0) { -+ /* we're not expecting to handle any events that require -+ * upcalls into the core driver */ -+ struct efhw_ev_handler handler; -+ memset(&handler, 0, sizeof(handler)); -+ nic->non_interrupting_evq.ev_handlers = &handler; -+ rc = efhw_keventq_poll(nic, &nic->non_interrupting_evq); -+ nic->non_interrupting_evq.ev_handlers = NULL; -+ -+ if (rc < 0) { -+ EFHW_ERR("%s: poll ERROR (%d:%d) ***** ", -+ __func__, rc, -+ nic->buf_commit_outstanding); -+ goto out; -+ } -+ -+ FALCON_LOCK_UNLOCK(nic); -+ -+ if (count++) -+ udelay(1); -+ -+ if (count > 1000) { -+ EFHW_WARN("%s: poll Timeout ***** (%d)", __func__, -+ nic->buf_commit_outstanding); -+ nic->buf_commit_outstanding = 0; -+ return; -+ } -+ FALCON_LOCK_LOCK(nic); -+ } -+ -+out: -+ FALCON_LOCK_UNLOCK(nic); -+ return; -+} -+ -+void falcon_nic_buffer_table_confirm(struct efhw_nic *nic) -+{ -+ /* confirm buffer table updates - should be used for items where -+ loss of data would be unacceptable. E.g for the buffers that back -+ an event or DMA queue */ -+ FALCON_LOCK_DECL; -+ -+ /* Do nothing if operating in synchronous mode. */ -+ if (!nic->irq_handler) -+ return; -+ -+ FALCON_LOCK_LOCK(nic); -+ -+ _falcon_nic_buffer_table_commit(nic); -+ -+ FALCON_LOCK_UNLOCK(nic); -+ -+ falcon_nic_buffer_table_update_poll(nic); -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * Buffer table - API -+ * -+ *--------------------------------------------------------------------*/ -+ -+static void -+falcon_nic_buffer_table_clear(struct efhw_nic *nic, int buffer_id, int num) -+{ -+ FALCON_LOCK_DECL; -+ FALCON_LOCK_LOCK(nic); -+ _falcon_nic_buffer_table_clear(nic, buffer_id, num); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+static void -+falcon_nic_buffer_table_set(struct efhw_nic *nic, dma_addr_t dma_addr, -+ uint bufsz, uint region, -+ int own_id, int buffer_id) -+{ -+ FALCON_LOCK_DECL; -+ -+ EFHW_ASSERT(region < FALCON_REGION_NUM); -+ -+ EFHW_ASSERT((bufsz == EFHW_4K) || -+ (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE)); -+ -+ falcon_nic_buffer_table_update_poll(nic); -+ -+ FALCON_LOCK_LOCK(nic); -+ -+ _falcon_nic_buffer_table_set(nic, dma_addr, bufsz, region, own_id, -+ buffer_id); -+ -+ falcon_nic_buffer_table_lazy_commit(nic); -+ -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+void -+falcon_nic_buffer_table_set_n(struct efhw_nic *nic, int buffer_id, -+ dma_addr_t dma_addr, uint bufsz, uint region, -+ int n_pages, int own_id) -+{ -+ /* used to set up a contiguous range of buffers */ -+ FALCON_LOCK_DECL; -+ -+ EFHW_ASSERT(region < FALCON_REGION_NUM); -+ -+ EFHW_ASSERT((bufsz == EFHW_4K) || -+ (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE)); -+ -+ while (n_pages--) { -+ -+ falcon_nic_buffer_table_update_poll(nic); -+ -+ FALCON_LOCK_LOCK(nic); -+ -+ _falcon_nic_buffer_table_set(nic, dma_addr, bufsz, region, -+ own_id, buffer_id++); -+ -+ falcon_nic_buffer_table_lazy_commit(nic); -+ -+ FALCON_LOCK_UNLOCK(nic); -+ -+ dma_addr += bufsz; -+ } -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * DMA Queues - mid level API -+ * -+ *--------------------------------------------------------------------*/ -+ -+#if BUG5302_WORKAROUND -+ -+/* Tx queues can get stuck if the software write pointer is set to an index -+ * beyond the configured size of the queue, such that they will not flush. -+ * This code can be run before attempting a flush; it will detect the bogus -+ * value and reset it. This fixes most instances of this problem, although -+ * sometimes it does not work, or we may not detect it in the first place, -+ * if the out-of-range value was replaced by an in-range value earlier. -+ * (In those cases we have to apply a bigger hammer later, if we see that -+ * the queue is still not flushing.) -+ */ -+static void -+falcon_check_for_bogus_tx_dma_wptr(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ uint64_t val_low64, val_high64; -+ uint64_t size, hwptr, swptr, val; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ ulong offset = falcon_dma_tx_q_offset(nic, dmaq); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64); -+ FALCON_LOCK_UNLOCK(nic); -+ -+ size = (val_low64 >> TX_DESCQ_SIZE_LBN) -+ & __FALCON_MASK64(TX_DESCQ_SIZE_WIDTH); -+ size = (1 << size) * 512; -+ hwptr = (val_high64 >> __DW3(TX_DESCQ_HW_RPTR_LBN)) -+ & __FALCON_MASK64(TX_DESCQ_HW_RPTR_WIDTH); -+ swptr = (val_low64 >> TX_DESCQ_SW_WPTR_LBN) -+ & __FALCON_MASK64(__LW2(TX_DESCQ_SW_WPTR_LBN)); -+ val = (val_high64) -+ & -+ __FALCON_MASK64(__DW3 -+ (TX_DESCQ_SW_WPTR_LBN + TX_DESCQ_SW_WPTR_WIDTH)); -+ val = val << __LW2(TX_DESCQ_SW_WPTR_LBN); -+ swptr = swptr | val; -+ -+ if (swptr >= size) { -+ EFHW_WARN("Resetting bad write pointer for TXQ[%d]", dmaq); -+ writel((uint32_t) ((hwptr + 0) & (size - 1)), -+ efhw_kva + falcon_tx_dma_page_addr(dmaq) + 12); -+ mmiowb(); -+ } -+} -+ -+/* Here's that "bigger hammer": we reset all the pointers (hardware read, -+ * hardware descriptor cache read, software write) to zero. -+ */ -+void falcon_clobber_tx_dma_ptrs(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ uint64_t val_low64, val_high64; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ ulong offset = falcon_dma_tx_q_offset(nic, dmaq); -+ -+ EFHW_WARN("Recovering stuck TXQ[%d]", dmaq); -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64); -+ val_high64 &= ~(__FALCON_MASK64(TX_DESCQ_HW_RPTR_WIDTH) -+ << __DW3(TX_DESCQ_HW_RPTR_LBN)); -+ val_high64 &= ~(__FALCON_MASK64(TX_DC_HW_RPTR_WIDTH) -+ << __DW3(TX_DC_HW_RPTR_LBN)); -+ falcon_write_qq(efhw_kva + offset, val_low64, val_high64); -+ mmiowb(); -+ writel(0, efhw_kva + falcon_tx_dma_page_addr(dmaq) + 12); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+#endif -+ -+static inline int -+__falcon_really_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ uint val; -+ -+ EFHW_BUILD_ASSERT(TX_FLUSH_DESCQ_REG_KER_OFST == -+ TX_FLUSH_DESCQ_REG_OFST); -+ -+ __DWCHCK(TX_FLUSH_DESCQ_CMD_LBN, TX_FLUSH_DESCQ_CMD_WIDTH); -+ __DWCHCK(TX_FLUSH_DESCQ_LBN, TX_FLUSH_DESCQ_WIDTH); -+ __RANGECHCK(dmaq, TX_FLUSH_DESCQ_WIDTH); -+ -+ val = ((1 << TX_FLUSH_DESCQ_CMD_LBN) | (dmaq << TX_FLUSH_DESCQ_LBN)); -+ -+ EFHW_TRACE("TX DMA flush[%d]", dmaq); -+ -+#if BUG5302_WORKAROUND -+ falcon_check_for_bogus_tx_dma_wptr(nic, dmaq); -+#endif -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + TX_FLUSH_DESCQ_REG_OFST, -+ val, FALCON_ATOMIC_TX_FLUSH_DESCQ); -+ -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ return 0; -+} -+ -+static inline int -+__falcon_is_tx_dma_channel_flushed(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ uint64_t val_low64, val_high64; -+ uint64_t enable, flush_pending; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ ulong offset = falcon_dma_tx_q_offset(nic, dmaq); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64); -+ FALCON_LOCK_UNLOCK(nic); -+ -+ /* should see one of three values for these 2 bits -+ * 1, queue enabled no flush pending -+ * - i.e. first flush request -+ * 2, queue enabled, flush pending -+ * - i.e. request to reflush before flush finished -+ * 3, queue disabled (no flush pending) -+ * - flush complete -+ */ -+ __DWCHCK(TX_DESCQ_FLUSH_LBN, TX_DESCQ_FLUSH_WIDTH); -+ __DW3CHCK(TX_DESCQ_EN_LBN, TX_DESCQ_EN_WIDTH); -+ enable = val_high64 & (1 << __DW3(TX_DESCQ_EN_LBN)); -+ flush_pending = val_low64 & (1 << TX_DESCQ_FLUSH_LBN); -+ -+ if (enable && !flush_pending) -+ return 0; -+ -+ EFHW_TRACE("%d, %s: %s, %sflush pending", dmaq, __func__, -+ enable ? "enabled" : "disabled", -+ flush_pending ? "" : "NO "); -+ /* still in progress */ -+ if (enable && flush_pending) -+ return -EALREADY; -+ -+ return -EAGAIN; -+} -+ -+static int falcon_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq) -+{ -+ int rc; -+ rc = __falcon_is_tx_dma_channel_flushed(nic, dmaq); -+ if (rc < 0) { -+ EFHW_WARN("%s: failed %d", __func__, rc); -+ return rc; -+ } -+ return __falcon_really_flush_tx_dma_channel(nic, dmaq); -+} -+ -+static int -+__falcon_really_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ uint val; -+ -+ EFHW_BUILD_ASSERT(RX_FLUSH_DESCQ_REG_KER_OFST == -+ RX_FLUSH_DESCQ_REG_OFST); -+ -+ __DWCHCK(RX_FLUSH_DESCQ_CMD_LBN, RX_FLUSH_DESCQ_CMD_WIDTH); -+ __DWCHCK(RX_FLUSH_DESCQ_LBN, RX_FLUSH_DESCQ_WIDTH); -+ __RANGECHCK(dmaq, RX_FLUSH_DESCQ_WIDTH); -+ -+ val = ((1 << RX_FLUSH_DESCQ_CMD_LBN) | (dmaq << RX_FLUSH_DESCQ_LBN)); -+ -+ EFHW_TRACE("RX DMA flush[%d]", dmaq); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_write_qq(efhw_kva + RX_FLUSH_DESCQ_REG_OFST, val, -+ FALCON_ATOMIC_RX_FLUSH_DESCQ); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+ return 0; -+} -+ -+static inline int -+__falcon_is_rx_dma_channel_flushed(struct efhw_nic *nic, uint dmaq) -+{ -+ FALCON_LOCK_DECL; -+ uint64_t val; -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ ulong offset = falcon_dma_rx_q_offset(nic, dmaq); -+ -+ /* Falcon requires 128 bit atomic access for this register */ -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_q(efhw_kva + offset, &val); -+ FALCON_LOCK_UNLOCK(nic); -+ -+ __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH); -+ -+ /* is it enabled? */ -+ return (val & (1 << RX_DESCQ_EN_LBN)) -+ ? 0 : -EAGAIN; -+} -+ -+static int falcon_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq) -+{ -+ int rc; -+ rc = __falcon_is_rx_dma_channel_flushed(nic, dmaq); -+ if (rc < 0) { -+ EFHW_ERR("%s: failed %d", __func__, rc); -+ return rc; -+ } -+ return __falcon_really_flush_rx_dma_channel(nic, dmaq); -+} -+ -+/*-------------------------------------------------------------------- -+ * -+ * Falcon specific event callbacks -+ * -+ *--------------------------------------------------------------------*/ -+ -+int -+falcon_handle_char_event(struct efhw_nic *nic, struct efhw_ev_handler *h, -+ efhw_event_t *ev) -+{ -+ EFHW_TRACE("DRIVER EVENT: "FALCON_EVENT_FMT, -+ FALCON_EVENT_PRI_ARG(*ev)); -+ -+ switch (FALCON_EVENT_DRIVER_SUBCODE(ev)) { -+ -+ case TX_DESCQ_FLS_DONE_EV_DECODE: -+ EFHW_TRACE("TX[%d] flushed", -+ (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev)); -+ efhw_handle_txdmaq_flushed(nic, h, ev); -+ break; -+ -+ case RX_DESCQ_FLS_DONE_EV_DECODE: -+ EFHW_TRACE("RX[%d] flushed", -+ (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev)); -+ efhw_handle_rxdmaq_flushed(nic, h, ev); -+ break; -+ -+ case SRM_UPD_DONE_EV_DECODE: -+ nic->buf_commit_outstanding = -+ max(0, nic->buf_commit_outstanding - 1); -+ EFHW_TRACE("COMMIT DONE %d", nic->buf_commit_outstanding); -+ break; -+ -+ case EVQ_INIT_DONE_EV_DECODE: -+ EFHW_TRACE("%sEVQ INIT", ""); -+ break; -+ -+ case WAKE_UP_EV_DECODE: -+ EFHW_TRACE("%sWAKE UP", ""); -+ efhw_handle_wakeup_event(nic, h, ev); -+ break; -+ -+ case TIMER_EV_DECODE: -+ EFHW_TRACE("%sTIMER", ""); -+ efhw_handle_timeout_event(nic, h, ev); -+ break; -+ -+ case RX_DESCQ_FLSFF_OVFL_EV_DECODE: -+ /* This shouldn't happen. */ -+ EFHW_ERR("%s: RX flush fifo overflowed", __func__); -+ return -EINVAL; -+ -+ default: -+ EFHW_TRACE("UNKOWN DRIVER EVENT: " FALCON_EVENT_FMT, -+ FALCON_EVENT_PRI_ARG(*ev)); -+ break; -+ } -+ return 0; -+} -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * Filter search depth control -+ * -+ *--------------------------------------------------------------------*/ -+ -+ -+#define Q0_READ(q0, name) \ -+ ((unsigned)(((q0) >> name##_LBN) & (__FALCON_MASK64(name##_WIDTH)))) -+#define Q0_MASK(name) \ -+ ((__FALCON_MASK64(name##_WIDTH)) << name##_LBN) -+#define Q0_VALUE(name, value) \ -+ (((uint64_t)(value)) << name##_LBN) -+ -+#define Q1_READ(q1, name) \ -+ ((unsigned)(((q1) >> (name##_LBN - 64)) & \ -+ (__FALCON_MASK64(name##_WIDTH)))) -+#define Q1_MASK(name) \ -+ ((__FALCON_MASK64(name##_WIDTH)) << (name##_LBN - 64)) -+#define Q1_VALUE(name, value) \ -+ (((uint64_t)(value)) << (name##_LBN - 64)) -+ -+ -+void -+falcon_nic_get_rx_filter_search_limits(struct efhw_nic *nic, -+ struct efhw_filter_search_limits *lim, -+ int use_raw_values) -+{ -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ FALCON_LOCK_DECL; -+ uint64_t q0, q1; -+ unsigned ff = (use_raw_values ? 0 : RX_FILTER_CTL_SRCH_FUDGE_FULL); -+ unsigned wf = (use_raw_values ? 0 : RX_FILTER_CTL_SRCH_FUDGE_WILD); -+ -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_qq(efhw_kva + RX_FILTER_CTL_REG_OFST, &q0, &q1); -+ FALCON_LOCK_UNLOCK(nic); -+ -+ lim->tcp_full = Q0_READ(q0, TCP_FULL_SRCH_LIMIT) - ff; -+ lim->tcp_wild = Q0_READ(q0, TCP_WILD_SRCH_LIMIT) - wf; -+ lim->udp_full = Q0_READ(q0, UDP_FULL_SRCH_LIMIT) - ff; -+ lim->udp_wild = Q0_READ(q0, UDP_WILD_SRCH_LIMIT) - wf; -+} -+EXPORT_SYMBOL(falcon_nic_get_rx_filter_search_limits); -+ -+ -+void -+falcon_nic_set_rx_filter_search_limits(struct efhw_nic *nic, -+ struct efhw_filter_search_limits *lim, -+ int use_raw_values) -+{ -+ volatile char __iomem *efhw_kva = EFHW_KVA(nic); -+ FALCON_LOCK_DECL; -+ uint64_t q0, q1; -+ unsigned ff = (use_raw_values ? 0 : RX_FILTER_CTL_SRCH_FUDGE_FULL); -+ unsigned wf = (use_raw_values ? 0 : RX_FILTER_CTL_SRCH_FUDGE_WILD); -+ -+ FALCON_LOCK_LOCK(nic); -+ falcon_read_qq(efhw_kva + RX_FILTER_CTL_REG_OFST, &q0, &q1); -+ -+ q0 &= ~Q0_MASK(TCP_FULL_SRCH_LIMIT); -+ q0 &= ~Q0_MASK(TCP_WILD_SRCH_LIMIT); -+ q0 &= ~Q0_MASK(UDP_FULL_SRCH_LIMIT); -+ q0 &= ~Q0_MASK(UDP_WILD_SRCH_LIMIT); -+ q0 |= Q0_VALUE(TCP_FULL_SRCH_LIMIT, lim->tcp_full + ff); -+ q0 |= Q0_VALUE(TCP_WILD_SRCH_LIMIT, lim->tcp_wild + wf); -+ q0 |= Q0_VALUE(UDP_FULL_SRCH_LIMIT, lim->udp_full + ff); -+ q0 |= Q0_VALUE(UDP_WILD_SRCH_LIMIT, lim->udp_wild + wf); -+ nic->tcp_full_srch.max = lim->tcp_full + ff -+ - RX_FILTER_CTL_SRCH_FUDGE_FULL; -+ nic->tcp_wild_srch.max = lim->tcp_wild + wf -+ - RX_FILTER_CTL_SRCH_FUDGE_WILD; -+ nic->udp_full_srch.max = lim->udp_full + ff -+ - RX_FILTER_CTL_SRCH_FUDGE_FULL; -+ nic->udp_wild_srch.max = lim->udp_wild + wf -+ - RX_FILTER_CTL_SRCH_FUDGE_WILD; -+ -+ falcon_write_qq(efhw_kva + RX_FILTER_CTL_REG_OFST, q0, q1); -+ mmiowb(); -+ FALCON_LOCK_UNLOCK(nic); -+} -+EXPORT_SYMBOL(falcon_nic_set_rx_filter_search_limits); -+ -+ -+#undef READ_Q0 -+#undef Q0_MASK -+#undef Q0_VALUE -+#undef READ_Q1 -+#undef Q1_MASK -+#undef Q1_VALUE -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * New unified filter API -+ * -+ *--------------------------------------------------------------------*/ -+ -+ -+#if FALCON_FULL_FILTER_CACHE -+static inline struct efhw_filter_spec * -+filter_spec_cache_entry(struct efhw_nic *nic, int filter_idx) -+{ -+ EFHW_ASSERT(nic->filter_spec_cache); -+ return &nic->filter_spec_cache[filter_idx]; -+} -+#endif -+ -+ -+static int filter_is_active(struct efhw_nic *nic, int filter_idx) -+{ -+ return nic->filter_in_use[filter_idx]; -+} -+ -+ -+static void set_filter_cache_entry(struct efhw_nic *nic, -+ struct efhw_filter_spec *spec, -+ int filter_idx) -+{ -+ nic->filter_in_use[filter_idx] = 1; -+#if FALCON_FULL_FILTER_CACHE -+ memcpy(filter_spec_cache_entry(nic, filter_idx), spec, -+ sizeof(struct efhw_filter_spec)); -+#endif -+} -+ -+ -+static void clear_filter_cache_entry(struct efhw_nic *nic, -+ int filter_idx) -+{ -+ nic->filter_in_use[filter_idx] = 0; -+#if FALCON_FULL_FILTER_CACHE -+ memset(filter_spec_cache_entry(nic, filter_idx), 0, -+ sizeof(struct efhw_filter_spec)); -+#endif -+} -+ -+ -+#if FALCON_FULL_FILTER_CACHE -+static int filter_is_duplicate(struct efhw_nic *nic, -+ struct efhw_filter_spec *spec, int filter_idx) -+{ -+ struct efhw_filter_spec *cmp; -+ -+ cmp = filter_spec_cache_entry(nic, filter_idx); -+ -+ EFHW_ASSERT(filter_is_active(nic, filter_idx)); -+ -+ return (spec->saddr_le32 == cmp->saddr_le32) && -+ (spec->daddr_le32 == cmp->daddr_le32) && -+ (spec->sport_le16 == cmp->sport_le16) && -+ (spec->dport_le16 == cmp->dport_le16) && -+ (spec->tcp == cmp->tcp) && -+ (spec->full == cmp->full); -+} -+#endif -+ -+ -+static void common_build_ip_filter(struct efhw_nic *nic, int tcp, int full, -+ int rss, int scatter, uint dmaq_id, -+ unsigned saddr_le32, unsigned sport_le16, -+ unsigned daddr_le32, unsigned dport_le16, -+ uint64_t *q0, uint64_t *q1) -+{ -+ uint64_t v1, v2, v3, v4; -+ unsigned tmp_port_le16; -+ -+ if (!full) { -+ saddr_le32 = 0; -+ sport_le16 = 0; -+ if (!tcp) { -+ tmp_port_le16 = sport_le16; -+ sport_le16 = dport_le16; -+ dport_le16 = tmp_port_le16; -+ } -+ } -+ -+ v4 = (((!tcp) << __DW4(TCP_UDP_0_LBN)) | -+ (dmaq_id << __DW4(RXQ_ID_0_LBN))); -+ -+ switch (nic->devtype.variant) { -+ case 'A': -+ EFHW_ASSERT(!rss); -+ break; -+ case 'B': -+ v4 |= scatter << __DW4(SCATTER_EN_0_B0_LBN); -+ v4 |= rss << __DW4(RSS_EN_0_B0_LBN); -+ break; -+ default: -+ EFHW_ASSERT(0); -+ break; -+ } -+ -+ v3 = daddr_le32; -+ v2 = ((dport_le16 << __DW2(DEST_PORT_TCP_0_LBN)) | -+ (__HIGH(saddr_le32, SRC_IP_0_LBN, SRC_IP_0_WIDTH))); -+ v1 = ((__LOW(saddr_le32, SRC_IP_0_LBN, SRC_IP_0_WIDTH)) | -+ (sport_le16 << SRC_TCP_DEST_UDP_0_LBN)); -+ -+ *q0 = (v2 << 32) | v1; -+ *q1 = (v4 << 32) | v3; -+} -+ -+ -+static void build_filter(struct efhw_nic *nic, struct efhw_filter_spec *spec, -+ unsigned *key, unsigned *tbl_size, -+ struct efhw_filter_depth **depth, -+ uint64_t *q0, uint64_t *q1) -+{ -+ *key = falcon_hash_get_ip_key(spec->saddr_le32, -+ spec->sport_le16, -+ spec->daddr_le32, -+ spec->dport_le16, -+ spec->tcp, -+ spec->full); -+ *tbl_size = nic->ip_filter_tbl_size; -+ if (spec->tcp && spec->full) -+ *depth = &nic->tcp_full_srch; -+ else if (spec->tcp && !spec->full) -+ *depth = &nic->tcp_wild_srch; -+ else if (!spec->tcp && spec->full) -+ *depth = &nic->udp_full_srch; -+ else -+ *depth = &nic->udp_wild_srch; -+ common_build_ip_filter(nic, spec->tcp, spec->full, -+ spec->rss, spec->scatter, -+ spec->dmaq_id, -+ spec->saddr_le32, -+ spec->sport_le16, -+ spec->daddr_le32, -+ spec->dport_le16, -+ q0, q1); -+} -+ -+ -+#if FALCON_VERIFY_FILTERS -+static void verify_filters(struct efhw_nic *nic) -+{ -+ unsigned table_offset, table_stride; -+ unsigned i, dummy_key, dummy_tbl_size; -+ struct efhw_filter_depth *dummy_depth; -+ unsigned filter_tbl_size; -+ struct efhw_filter_spec *spec; -+ uint64_t q0_expect, q1_expect, q0_got, q1_got; -+ -+ filter_tbl_size = nic->ip_filter_tbl_size; -+ table_offset = RX_FILTER_TBL0_OFST; -+ table_stride = 2 * FALCON_REGISTER128; -+ -+ for (i = 0; i < filter_tbl_size; i++) { -+ if (!filter_is_active(nic, type, i)) -+ continue; -+ -+ spec = filter_spec_cache_entry(nic, type, i); -+ -+ build_filter(nic, spec, &dummy_key, &dummy_tbl_size, -+ &dummy_depth, &q0_expect, &q1_expect); -+ -+ falcon_read_qq(EFHW_KVA(nic) + table_offset + i * table_stride, -+ &q0_got, &q1_got); -+ -+ if ((q0_got != q0_expect) || (q1_got != q1_expect)) { -+ falcon_write_qq(EFHW_KVA(nic) + 0x300, -+ q0_got, q1_got); -+ EFHW_ERR("ERROR: RX-filter[%d][%d] was " -+ "%"PRIx64":%" PRIx64" expected " -+ "%"PRIx64":%"PRIx64, -+ nic->index, i, q0_got, q1_got, -+ q0_expect, q1_expect); -+ } -+ } -+} -+#endif -+ -+ -+static void write_filter_table_entry(struct efhw_nic *nic, -+ unsigned filter_idx, -+ uint64_t q0, uint64_t q1) -+{ -+ unsigned table_offset, table_stride, offset; -+ -+ EFHW_ASSERT(filter_idx < nic->ip_filter_tbl_size); -+ table_offset = RX_FILTER_TBL0_OFST; -+ table_stride = 2 * FALCON_REGISTER128; -+ -+ offset = table_offset + filter_idx * table_stride; -+ falcon_write_qq(EFHW_KVA(nic) + offset, q0, q1); -+ mmiowb(); -+ -+#if FALCON_VERIFY_FILTERS -+ { -+ uint64_t q0read, q1read; -+ -+ /* Read a different entry first - ensure BIU flushed shadow */ -+ falcon_read_qq(EFHW_KVA(nic) + offset + 0x10, &q0read, &q1read); -+ falcon_read_qq(EFHW_KVA(nic) + offset, &q0read, &q1read); -+ EFHW_ASSERT(q0read == q0); -+ EFHW_ASSERT(q1read == q1); -+ -+ verify_filters(nic, type); -+ } -+#endif -+} -+ -+ -+static int falcon_nic_filter_set(struct efhw_nic *nic, -+ struct efhw_filter_spec *spec, -+ int *filter_idx_out) -+{ -+ FALCON_LOCK_DECL; -+ unsigned key = 0, tbl_size = 0, hash1, hash2, k; -+ struct efhw_filter_depth *depth = NULL; -+ int filter_idx = -1; -+ int rc = 0; -+ uint64_t q0, q1; -+ -+ build_filter(nic, spec, &key, &tbl_size, &depth, &q0, &q1); -+ -+ if (tbl_size == 0) -+ return -EINVAL; -+ -+ EFHW_TRACE("%s: depth->max=%d", __func__, depth->max); -+ -+ hash1 = falcon_hash_function1(key, tbl_size); -+ hash2 = falcon_hash_function2(key, tbl_size); -+ -+ FALCON_LOCK_LOCK(nic); -+ -+ for (k = 0; k < depth->max; k++) { -+ filter_idx = falcon_hash_iterator(hash1, hash2, k, tbl_size); -+ if (!filter_is_active(nic, filter_idx)) -+ break; -+#if FALCON_FULL_FILTER_CACHE -+ if (filter_is_duplicate(nic, spec, filter_idx)) { -+ EFHW_WARN("%s: ERROR: duplicate filter (disabling " -+ "interrupts)", __func__); -+ falcon_nic_interrupt_hw_disable(nic); -+ rc = -EINVAL; -+ goto fail1; -+ } -+#endif -+ } -+ if (k == depth->max) { -+ rc = -EADDRINUSE; -+ filter_idx = -1; -+ goto fail1; -+ } else if (depth->needed < (k + 1)) { -+ depth->needed = k + 1; -+ } -+ -+ EFHW_ASSERT(filter_idx < (int)tbl_size); -+ -+ set_filter_cache_entry(nic, spec, filter_idx); -+ write_filter_table_entry(nic, filter_idx, q0, q1); -+ -+ ++nic->ip_filter_tbl_used; -+ -+ *filter_idx_out = filter_idx; -+ -+ EFHW_TRACE("%s: filter index %d rxq %u set in %u", -+ __func__, filter_idx, spec->dmaq_id, k); -+ -+fail1: -+ FALCON_LOCK_UNLOCK(nic); -+ return rc; -+} -+ -+ -+static void falcon_nic_filter_clear(struct efhw_nic *nic, -+ int filter_idx) -+{ -+ FALCON_LOCK_DECL; -+ -+ if (filter_idx < 0) -+ return; -+ -+ FALCON_LOCK_LOCK(nic); -+ if (filter_is_active(nic, filter_idx)) { -+ if (--nic->ip_filter_tbl_used == 0) { -+ nic->tcp_full_srch.needed = 0; -+ nic->tcp_wild_srch.needed = 0; -+ nic->udp_full_srch.needed = 0; -+ nic->udp_wild_srch.needed = 0; -+ } -+ } -+ clear_filter_cache_entry(nic, filter_idx); -+ write_filter_table_entry(nic, filter_idx, 0, 0); -+ FALCON_LOCK_UNLOCK(nic); -+} -+ -+ -+int -+falcon_nic_filter_ctor(struct efhw_nic *nic) -+{ -+ nic->ip_filter_tbl_size = 8 * 1024; -+ nic->ip_filter_tbl_used = 0; -+ -+ nic->tcp_full_srch.needed = 0; -+ nic->tcp_full_srch.max = RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL -+ - RX_FILTER_CTL_SRCH_FUDGE_FULL; -+ nic->tcp_wild_srch.needed = 0; -+ nic->tcp_wild_srch.max = RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD -+ - RX_FILTER_CTL_SRCH_FUDGE_WILD; -+ nic->udp_full_srch.needed = 0; -+ nic->udp_full_srch.max = RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL -+ - RX_FILTER_CTL_SRCH_FUDGE_FULL; -+ nic->udp_wild_srch.needed = 0; -+ nic->udp_wild_srch.max = RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD -+ - RX_FILTER_CTL_SRCH_FUDGE_WILD; -+ -+ nic->filter_in_use = vmalloc(FALCON_FILTER_TBL_NUM); -+ if (nic->filter_in_use == NULL) -+ return -ENOMEM; -+ memset(nic->filter_in_use, 0, FALCON_FILTER_TBL_NUM); -+#if FALCON_FULL_FILTER_CACHE -+ nic->filter_spec_cache = vmalloc(FALCON_FILTER_TBL_NUM -+ * sizeof(struct efhw_filter_spec)); -+ if (nic->filter_spec_cache == NULL) -+ return -ENOMEM; -+ memset(nic->filter_spec_cache, 0, FALCON_FILTER_TBL_NUM -+ * sizeof(struct efhw_filter_spec)); -+#endif -+ -+ return 0; -+} -+ -+ -+void -+falcon_nic_filter_dtor(struct efhw_nic *nic) -+{ -+#if FALCON_FULL_FILTER_CACHE -+ if (nic->filter_spec_cache) -+ vfree(nic->filter_spec_cache); -+#endif -+ if (nic->filter_in_use) -+ vfree(nic->filter_in_use); -+} -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * Compatibility with old filter API -+ * -+ *--------------------------------------------------------------------*/ -+ -+void -+falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full, -+ uint32_t *tcp_wild, -+ uint32_t *udp_full, uint32_t *udp_wild) -+{ -+ struct efhw_filter_search_limits lim; -+ -+ falcon_nic_get_rx_filter_search_limits(nic, &lim, 0); -+ *tcp_full = (uint32_t)lim.tcp_full; -+ *tcp_wild = (uint32_t)lim.tcp_wild; -+ *udp_full = (uint32_t)lim.udp_full; -+ *udp_wild = (uint32_t)lim.udp_wild; -+} -+EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_get); -+ -+ -+void -+falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full, -+ uint32_t tcp_wild, -+ uint32_t udp_full, uint32_t udp_wild) -+{ -+ struct efhw_filter_search_limits lim; -+ -+ lim.tcp_full = (unsigned)tcp_full; -+ lim.tcp_wild = (unsigned)tcp_wild; -+ lim.udp_full = (unsigned)udp_full; -+ lim.udp_wild = (unsigned)udp_wild; -+ falcon_nic_set_rx_filter_search_limits(nic, &lim, 0); -+} -+EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_set); -+ -+ -+static int -+falcon_nic_ipfilter_set(struct efhw_nic *nic, int type, int *_filter_idx, -+ int dmaq, -+ unsigned saddr_be32, unsigned sport_be16, -+ unsigned daddr_be32, unsigned dport_be16) -+{ -+ struct efhw_filter_spec spec; -+ -+ spec.dmaq_id = dmaq; -+ spec.saddr_le32 = ntohl(saddr_be32); -+ spec.daddr_le32 = ntohl(daddr_be32); -+ spec.sport_le16 = ntohs((unsigned short) sport_be16); -+ spec.dport_le16 = ntohs((unsigned short) dport_be16); -+ spec.tcp = ((type & EFHW_IP_FILTER_TYPE_TCP_MASK) != 0); -+ spec.full = ((type & EFHW_IP_FILTER_TYPE_FULL_MASK) != 0); -+ spec.rss = ((type & EFHW_IP_FILTER_TYPE_RSS_B0_MASK) != 0); -+ spec.scatter = ((type & EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK) == 0); -+ return falcon_nic_filter_set(nic, &spec, _filter_idx); -+} -+ -+static void falcon_nic_ipfilter_clear(struct efhw_nic *nic, int filter_idx) -+{ -+ falcon_nic_filter_clear(nic, filter_idx); -+} -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * Abstraction Layer Hooks -+ * -+ *--------------------------------------------------------------------*/ -+ -+struct efhw_func_ops falcon_char_functional_units = { -+ falcon_nic_close_hardware, -+ falcon_nic_init_hardware, -+ falcon_nic_interrupt, -+ falcon_nic_interrupt_enable, -+ falcon_nic_interrupt_disable, -+ falcon_nic_set_interrupt_moderation, -+ falcon_nic_event_queue_enable, -+ falcon_nic_event_queue_disable, -+ falcon_nic_wakeup_request, -+ falcon_nic_sw_event, -+ falcon_nic_ipfilter_set, -+ falcon_nic_ipfilter_clear, -+ falcon_dmaq_tx_q_init, -+ falcon_dmaq_rx_q_init, -+ falcon_dmaq_tx_q_disable, -+ falcon_dmaq_rx_q_disable, -+ falcon_flush_tx_dma_channel, -+ falcon_flush_rx_dma_channel, -+ falcon_nic_buffer_table_set, -+ falcon_nic_buffer_table_set_n, -+ falcon_nic_buffer_table_clear, -+ falcon_nic_buffer_table_commit, -+ falcon_nic_filter_set, -+ falcon_nic_filter_clear, -+}; -+ -+ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/falcon_hash.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,159 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains EtherFabric NIC hash algorithms implementation. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+ -+ -+static unsigned int -+common_get_ip_key(unsigned int src_ip, unsigned int src_port, -+ unsigned int dest_ip, unsigned int dest_port, -+ int tcp, int full, int tx, unsigned int masked_q_id) -+{ -+ -+ unsigned int tmp_port, result; -+ -+ EFHW_ASSERT(tcp == 0 || tcp == 1); -+ EFHW_ASSERT(full == 0 || full == 1); -+ EFHW_ASSERT(masked_q_id < (1 << 10)); -+ -+ /* m=masked_q_id(TX)/0(RX) u=UDP S,D=src/dest addr s,d=src/dest port -+ * -+ * Wildcard filters have src(TX)/dest(RX) addr and port = 0; -+ * and UDP wildcard filters have the src and dest port fields swapped. -+ * -+ * Addr/port fields are little-endian. -+ * -+ * 3322222222221111111111 -+ * 10987654321098765432109876543210 -+ * -+ * 000000000000000000000mmmmmmmmmmu ^ -+ * DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD ^ -+ * ddddddddddddddddSSSSSSSSSSSSSSSS ^ -+ * SSSSSSSSSSSSSSSSssssssssssssssss -+ */ -+ -+ if (!tx) -+ masked_q_id = 0; -+ -+ if (!full) { -+ if (tx) { -+ dest_ip = 0; -+ dest_port = 0; -+ } else { -+ src_ip = 0; -+ src_port = 0; -+ } -+ if (!tcp) { -+ tmp_port = src_port; -+ src_port = dest_port; -+ dest_port = tmp_port; -+ } -+ } -+ -+ result = ((masked_q_id << 1) | (!tcp)) ^ -+ (dest_ip) ^ -+ (((dest_port & 0xffff) << 16) | ((src_ip >> 16) & 0xffff)) ^ -+ (((src_ip & 0xffff) << 16) | (src_port & 0xffff)); -+ -+ EFHW_TRACE("%s: IP %s %s %x", __func__, tcp ? "TCP" : "UDP", -+ full ? "Full" : "Wildcard", result); -+ -+ return result; -+} -+ -+ -+unsigned int -+falcon_hash_get_ip_key(unsigned int src_ip, unsigned int src_port, -+ unsigned int dest_ip, unsigned int dest_port, -+ int tcp, int full) -+{ -+ return common_get_ip_key(src_ip, src_port, dest_ip, dest_port, tcp, -+ full, 0, 0); -+} -+ -+ -+/* This function generates the First Hash key */ -+unsigned int falcon_hash_function1(unsigned int key, unsigned int nfilters) -+{ -+ -+ unsigned short int lfsr_reg; -+ unsigned int tmp_key; -+ int index; -+ -+ unsigned short int lfsr_input; -+ unsigned short int single_bit_key; -+ unsigned short int bit16_lfsr; -+ unsigned short int bit3_lfsr; -+ -+ lfsr_reg = 0xFFFF; -+ tmp_key = key; -+ -+ /* For Polynomial equation X^16+X^3+1 */ -+ for (index = 0; index < 32; index++) { -+ /* Get the bit from key and shift the key */ -+ single_bit_key = (tmp_key & 0x80000000) >> 31; -+ tmp_key = tmp_key << 1; -+ -+ /* get the Tap bits to XOR operation */ -+ bit16_lfsr = (lfsr_reg & 0x8000) >> 15; -+ bit3_lfsr = (lfsr_reg & 0x0004) >> 2; -+ -+ /* Get the Input value to the LFSR */ -+ lfsr_input = ((bit16_lfsr ^ bit3_lfsr) ^ single_bit_key); -+ -+ /* Shift and store out of the two TAPs */ -+ lfsr_reg = lfsr_reg << 1; -+ lfsr_reg = lfsr_reg | (lfsr_input & 0x0001); -+ -+ } -+ -+ lfsr_reg = lfsr_reg & (nfilters - 1); -+ -+ return lfsr_reg; -+} -+ -+/* This function generates the Second Hash */ -+unsigned int -+falcon_hash_function2(unsigned int key, unsigned int nfilters) -+{ -+ return (unsigned int)(((unsigned long long)key * 2 - 1) & -+ (nfilters - 1)); -+} -+ -+/* This function iterates through the hash table */ -+unsigned int -+falcon_hash_iterator(unsigned int hash1, unsigned int hash2, -+ unsigned int n_search, unsigned int nfilters) -+{ -+ return (hash1 + (n_search * hash2)) & (nfilters - 1); -+} -+ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/filter_resource.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,250 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains filters support. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+ -+struct filter_resource_manager { -+ struct efrm_resource_manager rm; -+ struct kfifo *free_ids; -+}; -+ -+static struct filter_resource_manager *efrm_filter_manager; -+ -+ -+void efrm_filter_resource_free(struct filter_resource *frs) -+{ -+ struct efhw_nic *nic = frs->rs.rs_client->nic; -+ int id; -+ -+ EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 1); -+ -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__, -+ EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle)); -+ -+ efhw_nic_ipfilter_clear(nic, frs->filter_idx); -+ frs->filter_idx = -1; -+ efrm_vi_resource_release(frs->pt); -+ -+ /* Free this filter. */ -+ id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle); -+ EFRM_VERIFY_EQ(kfifo_put(efrm_filter_manager->free_ids, -+ (unsigned char *)&id, sizeof(id)), -+ sizeof(id)); -+ -+ efrm_client_put(frs->rs.rs_client); -+ EFRM_DO_DEBUG(memset(frs, 0, sizeof(*frs))); -+ kfree(frs); -+} -+EXPORT_SYMBOL(efrm_filter_resource_free); -+ -+ -+void efrm_filter_resource_release(struct filter_resource *frs) -+{ -+ if (__efrm_resource_release(&frs->rs)) -+ efrm_filter_resource_free(frs); -+} -+EXPORT_SYMBOL(efrm_filter_resource_release); -+ -+ -+static void filter_rm_dtor(struct efrm_resource_manager *rm) -+{ -+ EFRM_TRACE("%s:", __func__); -+ -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm); -+ EFRM_ASSERT(&efrm_filter_manager->rm == rm); -+ -+ kfifo_vfree(efrm_filter_manager->free_ids); -+ EFRM_TRACE("%s: done", __func__); -+} -+ -+/**********************************************************************/ -+/**********************************************************************/ -+/**********************************************************************/ -+ -+int efrm_create_filter_resource_manager(struct efrm_resource_manager **rm_out) -+{ -+ int rc; -+ -+ EFRM_ASSERT(rm_out); -+ -+ efrm_filter_manager = -+ kmalloc(sizeof(struct filter_resource_manager), GFP_KERNEL); -+ if (efrm_filter_manager == 0) -+ return -ENOMEM; -+ memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager)); -+ -+ rc = efrm_resource_manager_ctor(&efrm_filter_manager->rm, -+ filter_rm_dtor, "FILTER", -+ EFRM_RESOURCE_FILTER); -+ if (rc < 0) -+ goto fail1; -+ -+ /* Create a pool of free instances */ -+ rc = efrm_kfifo_id_ctor(&efrm_filter_manager->free_ids, -+ 0, EFHW_IP_FILTER_NUM, -+ &efrm_filter_manager->rm.rm_lock); -+ if (rc != 0) -+ goto fail2; -+ -+ *rm_out = &efrm_filter_manager->rm; -+ EFRM_TRACE("%s: filter resources created - %d IDs", -+ __func__, kfifo_len(efrm_filter_manager->free_ids)); -+ return 0; -+ -+fail2: -+ efrm_resource_manager_dtor(&efrm_filter_manager->rm); -+fail1: -+ memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager)); -+ kfree(efrm_filter_manager); -+ return rc; -+ -+} -+ -+ -+int efrm_filter_resource_clear(struct filter_resource *frs) -+{ -+ struct efhw_nic *nic = frs->rs.rs_client->nic; -+ -+ efhw_nic_ipfilter_clear(nic, frs->filter_idx); -+ frs->filter_idx = -1; -+ return 0; -+} -+EXPORT_SYMBOL(efrm_filter_resource_clear); -+ -+ -+int -+__efrm_filter_resource_set(struct filter_resource *frs, int type, -+ unsigned saddr, uint16_t sport, -+ unsigned daddr, uint16_t dport) -+{ -+ struct efhw_nic *nic = frs->rs.rs_client->nic; -+ int vi_instance; -+ -+ EFRM_ASSERT(frs); -+ -+ if (efrm_nic_tablep->a_nic->devtype.variant >= 'B' && -+ (frs->pt->flags & EFHW_VI_JUMBO_EN) == 0) -+ type |= EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK; -+ vi_instance = EFRM_RESOURCE_INSTANCE(frs->pt->rs.rs_handle); -+ -+ return efhw_nic_ipfilter_set(nic, type, &frs->filter_idx, -+ vi_instance, saddr, sport, daddr, dport); -+} -+EXPORT_SYMBOL(__efrm_filter_resource_set);; -+ -+ -+int -+efrm_filter_resource_alloc(struct vi_resource *vi_parent, -+ struct filter_resource **frs_out) -+{ -+ struct filter_resource *frs; -+ int rc, instance; -+ -+ EFRM_ASSERT(frs_out); -+ EFRM_ASSERT(efrm_filter_manager); -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm); -+ EFRM_ASSERT(vi_parent != NULL); -+ EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) == -+ EFRM_RESOURCE_VI); -+ -+ /* Allocate resource data structure. */ -+ frs = kmalloc(sizeof(struct filter_resource), GFP_KERNEL); -+ if (!frs) -+ return -ENOMEM; -+ -+ /* Allocate an instance. */ -+ rc = kfifo_get(efrm_filter_manager->free_ids, -+ (unsigned char *)&instance, sizeof(instance)); -+ if (rc != sizeof(instance)) { -+ EFRM_TRACE("%s: out of instances", __func__); -+ EFRM_ASSERT(rc == 0); -+ rc = -EBUSY; -+ goto fail1; -+ } -+ -+ /* Initialise the resource DS. */ -+ efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance); -+ frs->pt = vi_parent; -+ efrm_resource_ref(&frs->pt->rs); -+ frs->filter_idx = -1; -+ -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " VI %d", __func__, -+ EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle), -+ EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle)); -+ -+ efrm_client_add_resource(vi_parent->rs.rs_client, &frs->rs); -+ *frs_out = frs; -+ return 0; -+ -+fail1: -+ memset(frs, 0, sizeof(*frs)); -+ kfree(frs); -+ return rc; -+} -+EXPORT_SYMBOL(efrm_filter_resource_alloc); -+ -+ -+int efrm_filter_resource_instance(struct filter_resource *frs) -+{ -+ return EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle); -+} -+EXPORT_SYMBOL(efrm_filter_resource_instance); -+ -+ -+struct efrm_resource * -+efrm_filter_resource_to_resource(struct filter_resource *frs) -+{ -+ return &frs->rs; -+} -+EXPORT_SYMBOL(efrm_filter_resource_to_resource); -+ -+ -+struct filter_resource * -+efrm_filter_resource_from_resource(struct efrm_resource *rs) -+{ -+ return filter_resource(rs); -+} -+EXPORT_SYMBOL(efrm_filter_resource_from_resource); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/iobufset_resource.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,404 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains non-contiguous I/O buffers support. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+ -+#define EFRM_IOBUFSET_MAX_NUM_INSTANCES 0x00010000 -+ -+struct iobufset_resource_manager { -+ struct efrm_resource_manager rm; -+ struct kfifo *free_ids; -+}; -+ -+struct iobufset_resource_manager *efrm_iobufset_manager; -+ -+#define iobsrs(rs1) iobufset_resource(rs1) -+ -+/* Returns size of iobufset resource data structure. */ -+static inline size_t iobsrs_size(int n_pages) -+{ -+ return offsetof(struct iobufset_resource, bufs) + -+ n_pages * sizeof(struct efhw_iopage); -+} -+ -+void efrm_iobufset_resource_free(struct iobufset_resource *rs) -+{ -+ unsigned int i; -+ int id; -+ -+ EFRM_RESOURCE_ASSERT_VALID(&rs->rs, 1); -+ -+ if (!rs->linked && rs->buf_tbl_alloc.base != (unsigned) -1) -+ efrm_buffer_table_free(&rs->buf_tbl_alloc); -+ -+ /* see comment on call to efhw_iopage_alloc in the alloc routine above -+ for discussion on use of efrm_nic_tablep->a_nic here */ -+ EFRM_ASSERT(efrm_nic_tablep->a_nic); -+ if (rs->linked) { -+ /* Nothing to do. */ -+ } else if (rs->chunk_order == 0) { -+ for (i = 0; i < rs->n_bufs; ++i) -+ efhw_iopage_free(efrm_nic_tablep->a_nic, &rs->bufs[i]); -+ } else { -+ /* it is important that this is executed in increasing page -+ * order because some implementations of -+ * efhw_iopages_init_from_iopage() assume this */ -+ for (i = 0; i < rs->n_bufs; -+ i += rs->pages_per_contiguous_chunk) { -+ struct efhw_iopages iopages; -+ efhw_iopages_init_from_iopage(&iopages, &rs->bufs[i], -+ rs->chunk_order); -+ efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages); -+ } -+ } -+ -+ /* free the instance number */ -+ id = EFRM_RESOURCE_INSTANCE(rs->rs.rs_handle); -+ EFRM_VERIFY_EQ(kfifo_put(efrm_iobufset_manager->free_ids, -+ (unsigned char *)&id, sizeof(id)), sizeof(id)); -+ -+ efrm_vi_resource_release(rs->evq); -+ if (rs->linked) -+ efrm_iobufset_resource_release(rs->linked); -+ -+ efrm_client_put(rs->rs.rs_client); -+ if (iobsrs_size(rs->n_bufs) < PAGE_SIZE) { -+ EFRM_DO_DEBUG(memset(rs, 0, sizeof(*rs))); -+ kfree(rs); -+ } else { -+ EFRM_DO_DEBUG(memset(rs, 0, sizeof(*rs))); -+ vfree(rs); -+ } -+} -+EXPORT_SYMBOL(efrm_iobufset_resource_free); -+ -+ -+void efrm_iobufset_resource_release(struct iobufset_resource *iobrs) -+{ -+ if (__efrm_resource_release(&iobrs->rs)) -+ efrm_iobufset_resource_free(iobrs); -+} -+EXPORT_SYMBOL(efrm_iobufset_resource_release); -+ -+ -+ -+int -+efrm_iobufset_resource_alloc(int32_t n_pages, -+ int32_t pages_per_contiguous_chunk, -+ struct vi_resource *vi_evq, -+ struct iobufset_resource *linked, -+ bool phys_addr_mode, -+ struct iobufset_resource **iobrs_out) -+{ -+ struct iobufset_resource *iobrs; -+ int rc, instance, object_size; -+ unsigned int i; -+ -+ EFRM_ASSERT(iobrs_out); -+ EFRM_ASSERT(efrm_iobufset_manager); -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_iobufset_manager->rm); -+ EFRM_RESOURCE_ASSERT_VALID(&vi_evq->rs, 0); -+ EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_evq->rs.rs_handle) == -+ EFRM_RESOURCE_VI); -+ EFRM_ASSERT(efrm_nic_tablep->a_nic); -+ -+ if (linked) { -+ /* This resource will share properties and memory with -+ * another. Only difference is that we'll program it into -+ * the buffer table of another nic. -+ */ -+ n_pages = linked->n_bufs; -+ pages_per_contiguous_chunk = linked->pages_per_contiguous_chunk; -+ phys_addr_mode = linked->buf_tbl_alloc.base == (unsigned) -1; -+ } -+ -+ /* allocate the resource data structure. */ -+ object_size = iobsrs_size(n_pages); -+ if (object_size < PAGE_SIZE) { -+ /* this should be OK from a tasklet */ -+ /* Necessary to do atomic alloc() as this -+ can be called from a weird-ass iSCSI context that is -+ !in_interrupt but is in_atomic - See BUG3163 */ -+ iobrs = kmalloc(object_size, GFP_ATOMIC); -+ } else { /* can't do this within a tasklet */ -+#ifndef NDEBUG -+ if (in_interrupt() || in_atomic()) { -+ EFRM_ERR("%s(): alloc->u.iobufset.in_n_pages=%d", -+ __func__, n_pages); -+ EFRM_ASSERT(!in_interrupt()); -+ EFRM_ASSERT(!in_atomic()); -+ } -+#endif -+ iobrs = (struct iobufset_resource *) vmalloc(object_size); -+ } -+ if (iobrs == NULL) { -+ EFRM_WARN("%s: failed to allocate container", __func__); -+ rc = -ENOMEM; -+ goto fail1; -+ } -+ -+ /* Allocate an instance number. */ -+ rc = kfifo_get(efrm_iobufset_manager->free_ids, -+ (unsigned char *)&instance, sizeof(instance)); -+ if (rc != sizeof(instance)) { -+ EFRM_WARN("%s: out of instances", __func__); -+ EFRM_ASSERT(rc == 0); -+ rc = -EBUSY; -+ goto fail3; -+ } -+ -+ efrm_resource_init(&iobrs->rs, EFRM_RESOURCE_IOBUFSET, instance); -+ -+ iobrs->evq = vi_evq; -+ iobrs->linked = linked; -+ iobrs->n_bufs = n_pages; -+ iobrs->pages_per_contiguous_chunk = pages_per_contiguous_chunk; -+ iobrs->chunk_order = fls(iobrs->pages_per_contiguous_chunk - 1); -+ iobrs->buf_tbl_alloc.base = (unsigned) -1; -+ -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %u pages", __func__, -+ EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle), iobrs->n_bufs); -+ -+ /* Allocate the iobuffers. */ -+ if (linked) { -+ memcpy(iobrs->bufs, linked->bufs, -+ iobrs->n_bufs * sizeof(iobrs->bufs[0])); -+ } else if (iobrs->chunk_order == 0) { -+ memset(iobrs->bufs, 0, iobrs->n_bufs * sizeof(iobrs->bufs[0])); -+ for (i = 0; i < iobrs->n_bufs; ++i) { -+ /* due to bug2426 we have to specifiy a NIC when -+ * allocating a DMAable page, which is a bit messy. -+ * For now we assume that if the page is suitable -+ * (e.g. DMAable) by one nic (efrm_nic_tablep->a_nic), -+ * it is suitable for all NICs. -+ * XXX I bet that breaks in Solaris. -+ */ -+ rc = efhw_iopage_alloc(efrm_nic_tablep->a_nic, -+ &iobrs->bufs[i]); -+ if (rc < 0) { -+ EFRM_WARN("%s: failed (rc %d) to allocate " -+ "page (i=%u)", __func__, rc, i); -+ goto fail4; -+ } -+ } -+ } else { -+ struct efhw_iopages iopages; -+ unsigned j; -+ -+ memset(iobrs->bufs, 0, iobrs->n_bufs * sizeof(iobrs->bufs[0])); -+ for (i = 0; i < iobrs->n_bufs; -+ i += iobrs->pages_per_contiguous_chunk) { -+ rc = efhw_iopages_alloc(efrm_nic_tablep->a_nic, -+ &iopages, iobrs->chunk_order); -+ if (rc < 0) { -+ EFRM_WARN("%s: failed (rc %d) to allocate " -+ "pages (i=%u order %d)", -+ __func__, rc, i, -+ iobrs->chunk_order); -+ goto fail4; -+ } -+ for (j = 0; j < iobrs->pages_per_contiguous_chunk; -+ j++) { -+ /* some implementation of -+ * efhw_iopage_init_from_iopages() rely on -+ * this function being called for -+ * _all_ pages in the chunk */ -+ efhw_iopage_init_from_iopages( -+ &iobrs->bufs[i + j], -+ &iopages, j); -+ } -+ } -+ } -+ -+ if (!phys_addr_mode) { -+ unsigned owner_id = EFAB_VI_RESOURCE_INSTANCE(iobrs->evq); -+ -+ if (!linked) { -+ /* Allocate space in the NIC's buffer table. */ -+ rc = efrm_buffer_table_alloc(fls(iobrs->n_bufs - 1), -+ &iobrs->buf_tbl_alloc); -+ if (rc < 0) { -+ EFRM_WARN("%s: failed (%d) to alloc %d buffer " -+ "table entries", __func__, rc, -+ iobrs->n_bufs); -+ goto fail5; -+ } -+ EFRM_ASSERT(((unsigned)1 << iobrs->buf_tbl_alloc.order) -+ >= (unsigned) iobrs->n_bufs); -+ } else { -+ iobrs->buf_tbl_alloc = linked->buf_tbl_alloc; -+ } -+ -+ /* Initialise the buffer table entries. */ -+ for (i = 0; i < iobrs->n_bufs; ++i) { -+ /*\ ?? \TODO burst them! */ -+ efrm_buffer_table_set(&iobrs->buf_tbl_alloc, -+ vi_evq->rs.rs_client->nic, -+ i, -+ efhw_iopage_dma_addr(&iobrs-> -+ bufs[i]), -+ owner_id); -+ } -+ efrm_buffer_table_commit(); -+ } -+ -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %d pages @ " -+ EFHW_BUFFER_ADDR_FMT, __func__, -+ EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle), -+ iobrs->n_bufs, EFHW_BUFFER_ADDR(iobrs->buf_tbl_alloc.base, -+ 0)); -+ efrm_resource_ref(&iobrs->evq->rs); -+ if (linked != NULL) -+ efrm_resource_ref(&linked->rs); -+ efrm_client_add_resource(vi_evq->rs.rs_client, &iobrs->rs); -+ *iobrs_out = iobrs; -+ return 0; -+ -+fail5: -+ i = iobrs->n_bufs; -+fail4: -+ /* see comment on call to efhw_iopage_alloc above for a discussion -+ * on use of efrm_nic_tablep->a_nic here */ -+ if (linked) { -+ /* Nothing to do. */ -+ } else if (iobrs->chunk_order == 0) { -+ while (i--) { -+ struct efhw_iopage *page = &iobrs->bufs[i]; -+ efhw_iopage_free(efrm_nic_tablep->a_nic, page); -+ } -+ } else { -+ unsigned int j; -+ for (j = 0; j < i; j += iobrs->pages_per_contiguous_chunk) { -+ struct efhw_iopages iopages; -+ -+ EFRM_ASSERT(j % iobrs->pages_per_contiguous_chunk -+ == 0); -+ /* it is important that this is executed in increasing -+ * page order because some implementations of -+ * efhw_iopages_init_from_iopage() assume this */ -+ efhw_iopages_init_from_iopage(&iopages, -+ &iobrs->bufs[j], -+ iobrs->chunk_order); -+ efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages); -+ } -+ } -+fail3: -+ if (object_size < PAGE_SIZE) -+ kfree(iobrs); -+ else -+ vfree(iobrs); -+fail1: -+ return rc; -+} -+EXPORT_SYMBOL(efrm_iobufset_resource_alloc); -+ -+static void iobufset_rm_dtor(struct efrm_resource_manager *rm) -+{ -+ EFRM_ASSERT(&efrm_iobufset_manager->rm == rm); -+ kfifo_vfree(efrm_iobufset_manager->free_ids); -+} -+ -+int -+efrm_create_iobufset_resource_manager(struct efrm_resource_manager **rm_out) -+{ -+ int rc, max; -+ -+ EFRM_ASSERT(rm_out); -+ -+ efrm_iobufset_manager = -+ kmalloc(sizeof(*efrm_iobufset_manager), GFP_KERNEL); -+ if (efrm_iobufset_manager == 0) -+ return -ENOMEM; -+ memset(efrm_iobufset_manager, 0, sizeof(*efrm_iobufset_manager)); -+ -+ /* -+ * Bug 1145, 1370: We need to set initial size of both the resource -+ * table and instance id table so they never need to grow as we -+ * want to be allocate new iobufset at tasklet time. Lets make -+ * a pessimistic guess at maximum number of iobufsets possible. -+ * Could be less because -+ * - jumbo frames have same no of packets per iobufset BUT more -+ * pages per buffer -+ * - buffer table entries used independently of iobufsets by -+ * sendfile -+ * -+ * Based on TCP/IP stack setting of PKTS_PER_SET_S=5 ... -+ * - can't use this define here as it breaks the layering. -+ */ -+#define MIN_PAGES_PER_IOBUFSET (1 << 4) -+ -+ max = efrm_buffer_table_size() / MIN_PAGES_PER_IOBUFSET; -+ max = min_t(int, max, EFRM_IOBUFSET_MAX_NUM_INSTANCES); -+ -+ /* HACK: There currently exists an option to allocate buffers that -+ * are not programmed into the buffer table, so the max number is -+ * not limited by the buffer table size. I'm hoping this usage -+ * will go away eventually. -+ */ -+ max = 32768; -+ -+ rc = efrm_kfifo_id_ctor(&efrm_iobufset_manager->free_ids, -+ 0, max, &efrm_iobufset_manager->rm.rm_lock); -+ if (rc != 0) -+ goto fail1; -+ -+ rc = efrm_resource_manager_ctor(&efrm_iobufset_manager->rm, -+ iobufset_rm_dtor, "IOBUFSET", -+ EFRM_RESOURCE_IOBUFSET); -+ if (rc < 0) -+ goto fail2; -+ -+ *rm_out = &efrm_iobufset_manager->rm; -+ return 0; -+ -+fail2: -+ kfifo_vfree(efrm_iobufset_manager->free_ids); -+fail1: -+ EFRM_DO_DEBUG(memset(efrm_iobufset_manager, 0, -+ sizeof(*efrm_iobufset_manager))); -+ kfree(efrm_iobufset_manager); -+ return rc; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/iopage.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,103 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides Linux-specific implementation for iopage API used -+ * from efhw library. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include "kernel_compat.h" -+#include /* for dma_addr_t */ -+ -+int efhw_iopage_alloc(struct efhw_nic *nic, struct efhw_iopage *p) -+{ -+ struct linux_efhw_nic *lnic = linux_efhw_nic(nic); -+ dma_addr_t handle; -+ void *kva; -+ -+ kva = efrm_pci_alloc_consistent(lnic->pci_dev, PAGE_SIZE, -+ &handle); -+ if (kva == 0) -+ return -ENOMEM; -+ -+ EFHW_ASSERT((handle & ~PAGE_MASK) == 0); -+ -+ memset((void *)kva, 0, PAGE_SIZE); -+ efhw_page_init_from_va(&p->p, kva); -+ -+ p->dma_addr = handle; -+ -+ return 0; -+} -+ -+void efhw_iopage_free(struct efhw_nic *nic, struct efhw_iopage *p) -+{ -+ struct linux_efhw_nic *lnic = linux_efhw_nic(nic); -+ EFHW_ASSERT(efhw_page_is_valid(&p->p)); -+ -+ efrm_pci_free_consistent(lnic->pci_dev, PAGE_SIZE, -+ efhw_iopage_ptr(p), p->dma_addr); -+} -+ -+int -+efhw_iopages_alloc(struct efhw_nic *nic, struct efhw_iopages *p, -+ unsigned order) -+{ -+ unsigned bytes = 1u << (order + PAGE_SHIFT); -+ struct linux_efhw_nic *lnic = linux_efhw_nic(nic); -+ dma_addr_t handle; -+ caddr_t addr; -+ int gfp_flag; -+ -+ /* Set __GFP_COMP if available to make reference counting work. -+ * This is recommended here: -+ * http://www.forbiddenweb.org/viewtopic.php?id=83167&page=4#348331 -+ */ -+ gfp_flag = ((in_atomic() ? GFP_ATOMIC : GFP_KERNEL) | __GFP_COMP); -+ addr = efrm_dma_alloc_coherent(&lnic->pci_dev->dev, bytes, &handle, -+ gfp_flag); -+ if (addr == NULL) -+ return -ENOMEM; -+ -+ EFHW_ASSERT((handle & ~PAGE_MASK) == 0); -+ -+ p->order = order; -+ p->dma_addr = handle; -+ p->kva = addr; -+ -+ return 0; -+} -+ -+void efhw_iopages_free(struct efhw_nic *nic, struct efhw_iopages *p) -+{ -+ unsigned bytes = 1u << (p->order + PAGE_SHIFT); -+ struct linux_efhw_nic *lnic = linux_efhw_nic(nic); -+ -+ efrm_dma_free_coherent(&lnic->pci_dev->dev, bytes, -+ (void *)p->kva, p->dma_addr); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/kernel_compat.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,118 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file provides compatibility layer for various Linux kernel versions -+ * (starting from 2.6.9 RHEL kernel). -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#define IN_KERNEL_COMPAT_C -+#include -+#include -+#include "kernel_compat.h" -+ -+/* Set this to 1 to enable very basic counting of iopage(s) allocations, then -+ * call dump_iopage_counts() to show the number of current allocations of -+ * orders 0-7. -+ */ -+#define EFRM_IOPAGE_COUNTS_ENABLED 0 -+ -+ -+/**************************************************************************** -+ * -+ * allocate a buffer suitable for DMA to/from the NIC -+ * -+ ****************************************************************************/ -+ -+#if EFRM_IOPAGE_COUNTS_ENABLED -+ -+static int iopage_counts[8]; -+ -+void dump_iopage_counts(void) -+{ -+ EFRM_NOTICE("iopage counts: %d %d %d %d %d %d %d %d", iopage_counts[0], -+ iopage_counts[1], iopage_counts[2], iopage_counts[3], -+ iopage_counts[4], iopage_counts[5], iopage_counts[6], -+ iopage_counts[7]); -+} -+ -+#endif -+ -+ -+ -+/*********** pci_alloc_consistent / pci_free_consistent ***********/ -+ -+void *efrm_dma_alloc_coherent(struct device *dev, size_t size, -+ dma_addr_t *dma_addr, int flag) -+{ -+ void *ptr; -+ unsigned order; -+ -+ order = __ffs(size/PAGE_SIZE); -+ EFRM_ASSERT(size == (PAGE_SIZE< -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H -+#define DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H -+ -+#include -+#include -+#include -+#include -+ -+/********* pci_map_*() ********************/ -+ -+extern void *efrm_dma_alloc_coherent(struct device *dev, size_t size, -+ dma_addr_t *dma_addr, int flag); -+ -+extern void efrm_dma_free_coherent(struct device *dev, size_t size, -+ void *ptr, dma_addr_t dma_addr); -+ -+static inline void *efrm_pci_alloc_consistent(struct pci_dev *hwdev, -+ size_t size, -+ dma_addr_t *dma_addr) -+{ -+ return efrm_dma_alloc_coherent(&hwdev->dev, size, dma_addr, -+ GFP_ATOMIC); -+} -+ -+static inline void efrm_pci_free_consistent(struct pci_dev *hwdev, size_t size, -+ void *ptr, dma_addr_t dma_addr) -+{ -+ efrm_dma_free_coherent(&hwdev->dev, size, ptr, dma_addr); -+} -+ -+ -+#endif /* DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/kernel_proc.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,109 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains /proc/driver/sfc_resource/ implementation. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+ -+/** Top level directory for sfc specific stats **/ -+static struct proc_dir_entry *efrm_proc_root; /* = NULL */ -+ -+static int -+efrm_resource_read_proc(char *buf, char **start, off_t offset, int count, -+ int *eof, void *data); -+ -+int efrm_install_proc_entries(void) -+{ -+ /* create the top-level directory for etherfabric specific stuff */ -+ efrm_proc_root = proc_mkdir("driver/sfc_resource", NULL); -+ if (!efrm_proc_root) -+ return -ENOMEM; -+ -+ if (create_proc_read_entry("resources", 0, efrm_proc_root, -+ efrm_resource_read_proc, 0) == NULL) { -+ EFRM_WARN("%s: Unable to create /proc/drivers/sfc_resource/" -+ "resources", __func__); -+ } -+ return 0; -+} -+ -+void efrm_uninstall_proc_entries(void) -+{ -+ EFRM_ASSERT(efrm_proc_root); -+ remove_proc_entry("resources", efrm_proc_root); -+ remove_proc_entry(efrm_proc_root->name, efrm_proc_root->parent); -+ efrm_proc_root = NULL; -+} -+ -+/**************************************************************************** -+ * -+ * /proc/drivers/sfc/resources -+ * -+ ****************************************************************************/ -+ -+#define EFRM_PROC_PRINTF(buf, len, fmt, ...) \ -+ do { \ -+ if (count - len > 0) \ -+ len += snprintf(buf+len, count-len, (fmt), \ -+ __VA_ARGS__); \ -+ } while (0) -+ -+static int -+efrm_resource_read_proc(char *buf, char **start, off_t offset, int count, -+ int *eof, void *data) -+{ -+ irq_flags_t lock_flags; -+ int len = 0; -+ int type; -+ struct efrm_resource_manager *rm; -+ -+ for (type = 0; type < EFRM_RESOURCE_NUM; type++) { -+ rm = efrm_rm_table[type]; -+ if (rm == NULL) -+ continue; -+ -+ EFRM_PROC_PRINTF(buf, len, "*** %s ***\n", rm->rm_name); -+ -+ spin_lock_irqsave(&rm->rm_lock, lock_flags); -+ EFRM_PROC_PRINTF(buf, len, "current = %u\n", rm->rm_resources); -+ EFRM_PROC_PRINTF(buf, len, " max = %u\n\n", -+ rm->rm_resources_hiwat); -+ spin_unlock_irqrestore(&rm->rm_lock, lock_flags); -+ } -+ -+ return count ? strlen(buf) : 0; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/kfifo.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,208 @@ -+/* -+ * A simple kernel FIFO implementation. -+ * -+ * Copyright (C) 2004 Stelian Pop -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ * -+ */ -+ -+/* -+ * This file is stolen from the Linux kernel sources -+ * (linux-2.6.22/kernel/kfifo.c) into sfc_resource driver. -+ * It should be used for old kernels without kfifo implementation. -+ * Most part of linux/kfifo.h is incorporated into -+ * ci/efrm/sysdep_linux.h. -+ */ -+#include -+#ifdef HAS_NO_KFIFO -+ -+#include -+#include -+#include -+#include -+/*#include */ -+ -+/** -+ * kfifo_init - allocates a new FIFO using a preallocated buffer -+ * @buffer: the preallocated buffer to be used. -+ * @size: the size of the internal buffer, this have to be a power of 2. -+ * @gfp_mask: get_free_pages mask, passed to kmalloc() -+ * @lock: the lock to be used to protect the fifo buffer -+ * -+ * Do NOT pass the kfifo to kfifo_free() after use! Simply free the -+ * &struct kfifo with kfree(). -+ */ -+struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, -+ gfp_t gfp_mask, spinlock_t *lock) -+{ -+ struct kfifo *fifo; -+ -+ /* size must be a power of 2 */ -+ BUG_ON(size & (size - 1)); -+ -+ fifo = kmalloc(sizeof(struct kfifo), gfp_mask); -+ if (!fifo) -+ return ERR_PTR(-ENOMEM); -+ -+ fifo->buffer = buffer; -+ fifo->size = size; -+ fifo->in = fifo->out = 0; -+ fifo->lock = lock; -+ -+ return fifo; -+} -+EXPORT_SYMBOL(kfifo_init); -+ -+/** -+ * kfifo_alloc - allocates a new FIFO and its internal buffer -+ * @size: the size of the internal buffer to be allocated. -+ * @gfp_mask: get_free_pages mask, passed to kmalloc() -+ * @lock: the lock to be used to protect the fifo buffer -+ * -+ * The size will be rounded-up to a power of 2. -+ */ -+struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) -+{ -+ unsigned char *buffer; -+ struct kfifo *ret; -+ -+ /* -+ * round up to the next power of 2, since our 'let the indices -+ * wrap' tachnique works only in this case. -+ */ -+ if (size & (size - 1)) { -+ BUG_ON(size > 0x80000000); -+ size = roundup_pow_of_two(size); -+ } -+ -+ buffer = kmalloc(size, gfp_mask); -+ if (!buffer) -+ return ERR_PTR(-ENOMEM); -+ -+ ret = kfifo_init(buffer, size, gfp_mask, lock); -+ -+ if (IS_ERR(ret)) -+ kfree(buffer); -+ -+ return ret; -+} -+EXPORT_SYMBOL(kfifo_alloc); -+ -+/** -+ * kfifo_free - frees the FIFO -+ * @fifo: the fifo to be freed. -+ */ -+void kfifo_free(struct kfifo *fifo) -+{ -+ kfree(fifo->buffer); -+ kfree(fifo); -+} -+EXPORT_SYMBOL(kfifo_free); -+ -+/** -+ * __kfifo_put - puts some data into the FIFO, no locking version -+ * @fifo: the fifo to be used. -+ * @buffer: the data to be added. -+ * @len: the length of the data to be added. -+ * -+ * This function copies at most @len bytes from the @buffer into -+ * the FIFO depending on the free space, and returns the number of -+ * bytes copied. -+ * -+ * Note that with only one concurrent reader and one concurrent -+ * writer, you don't need extra locking to use these functions. -+ */ -+unsigned int -+__kfifo_put(struct kfifo *fifo, unsigned char *buffer, unsigned int len) -+{ -+ unsigned int l; -+ -+ len = min(len, fifo->size - fifo->in + fifo->out); -+ -+ /* -+ * Ensure that we sample the fifo->out index -before- we -+ * start putting bytes into the kfifo. -+ */ -+ -+ smp_mb(); -+ -+ /* first put the data starting from fifo->in to buffer end */ -+ l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); -+ memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); -+ -+ /* then put the rest (if any) at the beginning of the buffer */ -+ memcpy(fifo->buffer, buffer + l, len - l); -+ -+ /* -+ * Ensure that we add the bytes to the kfifo -before- -+ * we update the fifo->in index. -+ */ -+ -+ smp_wmb(); -+ -+ fifo->in += len; -+ -+ return len; -+} -+EXPORT_SYMBOL(__kfifo_put); -+ -+/** -+ * __kfifo_get - gets some data from the FIFO, no locking version -+ * @fifo: the fifo to be used. -+ * @buffer: where the data must be copied. -+ * @len: the size of the destination buffer. -+ * -+ * This function copies at most @len bytes from the FIFO into the -+ * @buffer and returns the number of copied bytes. -+ * -+ * Note that with only one concurrent reader and one concurrent -+ * writer, you don't need extra locking to use these functions. -+ */ -+unsigned int -+__kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len) -+{ -+ unsigned int l; -+ -+ len = min(len, fifo->in - fifo->out); -+ -+ /* -+ * Ensure that we sample the fifo->in index -before- we -+ * start removing bytes from the kfifo. -+ */ -+ -+ smp_rmb(); -+ -+ /* first get the data from fifo->out until the end of the buffer */ -+ l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); -+ memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); -+ -+ /* then get the rest (if any) from the beginning of the buffer */ -+ memcpy(buffer + l, fifo->buffer, len - l); -+ -+ /* -+ * Ensure that we remove the bytes from the kfifo -before- -+ * we update the fifo->out index. -+ */ -+ -+ smp_mb(); -+ -+ fifo->out += len; -+ -+ return len; -+} -+EXPORT_SYMBOL(__kfifo_get); -+ -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/linux_resource_internal.h 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,76 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains Linux-specific API internal for the resource driver. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#ifndef __LINUX_RESOURCE_INTERNAL__ -+#define __LINUX_RESOURCE_INTERNAL__ -+ -+#include -+#include -+#include -+#include -+ -+ -+/*! Linux specific EtherFabric initialisation */ -+extern int -+linux_efrm_nic_ctor(struct linux_efhw_nic *, struct pci_dev *, -+ spinlock_t *reg_lock, -+ unsigned nic_flags, unsigned nic_options); -+ -+/*! Linux specific EtherFabric initialisation */ -+extern void linux_efrm_nic_dtor(struct linux_efhw_nic *); -+ -+/*! Linux specific EtherFabric initialisation -- interrupt registration */ -+extern int linux_efrm_irq_ctor(struct linux_efhw_nic *); -+ -+/*! Linux specific EtherFabric initialisation -- interrupt deregistration */ -+extern void linux_efrm_irq_dtor(struct linux_efhw_nic *); -+ -+extern int efrm_driverlink_register(void); -+extern void efrm_driverlink_unregister(void); -+ -+extern int -+efrm_nic_add(struct pci_dev *dev, unsigned int opts, const uint8_t *mac_addr, -+ struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock, -+ int bt_min, int bt_max, int non_irq_evq, -+ const struct vi_resource_dimensions *); -+extern void efrm_nic_del(struct linux_efhw_nic *); -+ -+ -+extern int efrm_install_proc_entries(void); -+extern void efrm_uninstall_proc_entries(void); -+ -+#endif /* __LINUX_RESOURCE_INTERNAL__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/nic.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,174 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains EtherFabric Generic NIC instance (init, interrupts, -+ * etc) -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+ -+int efhw_device_type_init(struct efhw_device_type *dt, -+ int vendor_id, int device_id, -+ int class_revision) -+{ -+ if (vendor_id != 0x1924) -+ return 0; -+ -+ switch (device_id) { -+ case 0x0703: -+ case 0x6703: -+ dt->variant = 'A'; -+ switch (class_revision) { -+ case 0: -+ dt->revision = 0; -+ break; -+ case 1: -+ dt->revision = 1; -+ break; -+ default: -+ return 0; -+ } -+ break; -+ case 0x0710: -+ dt->variant = 'B'; -+ switch (class_revision) { -+ case 2: -+ dt->revision = 0; -+ break; -+ default: -+ return 0; -+ } -+ break; -+ default: -+ return 0; -+ } -+ -+ return 1; -+} -+ -+ -+/*-------------------------------------------------------------------- -+ * -+ * NIC Initialisation -+ * -+ *--------------------------------------------------------------------*/ -+ -+/* make this separate from initialising data structure -+** to allow this to be called at a later time once we can access PCI -+** config space to find out what hardware we have -+*/ -+void efhw_nic_init(struct efhw_nic *nic, unsigned flags, unsigned options, -+ struct efhw_device_type dev_type) -+{ -+ nic->devtype = dev_type; -+ nic->flags = flags; -+ nic->options = options; -+ nic->bar_ioaddr = 0; -+ spin_lock_init(&nic->the_reg_lock); -+ nic->reg_lock = &nic->the_reg_lock; -+ nic->mtu = 1500 + ETH_HLEN; -+ -+ nic->irq_unit = EFHW_IRQ_UNIT_UNUSED; -+ -+ nic->evq_sizes = 512 | 1024 | 2048 | 4096 | 8192 | -+ 16384 | 32768; -+ nic->txq_sizes = 512 | 1024 | 2048 | 4096; -+ nic->rxq_sizes = 512 | 1024 | 2048 | 4096; -+ nic->efhw_func = &falcon_char_functional_units; -+ nic->ctr_ap_bytes = EFHW_64M; -+ switch (nic->devtype.variant) { -+ case 'A': -+ nic->ctr_ap_bar = FALCON_S_CTR_AP_BAR; -+ nic->num_evqs = 4096; -+ nic->num_dmaqs = 4096; -+ nic->num_timers = 4096; -+ break; -+ case 'B': -+ nic->flags |= NIC_FLAG_NO_INTERRUPT; -+ nic->ctr_ap_bar = FALCON_P_CTR_AP_BAR; -+ nic->num_evqs = 4096; -+ nic->num_dmaqs = 4096; -+ nic->num_timers = 4096; -+ break; -+ default: -+ EFHW_ASSERT(0); -+ break; -+ } -+} -+ -+ -+void efhw_nic_close_interrupts(struct efhw_nic *nic) -+{ -+ EFHW_ASSERT(nic); -+ if (!efhw_nic_have_hw(nic)) -+ return; -+ -+ EFHW_ASSERT(efhw_nic_have_hw(nic)); -+ -+ if (nic->irq_unit != EFHW_IRQ_UNIT_UNUSED) -+ efhw_nic_interrupt_disable(nic); -+} -+ -+void efhw_nic_dtor(struct efhw_nic *nic) -+{ -+ EFHW_ASSERT(nic); -+ -+ /* Check that we have functional units because the software only -+ * driver doesn't initialise anything hardware related any more */ -+ -+ /* close interrupts is called first because the act of deregistering -+ the driver could cause this driver to change from master to slave -+ and hence the implicit interrupt mappings would be wrong */ -+ -+ EFHW_TRACE("%s: functional units ... ", __func__); -+ -+ if (efhw_nic_have_functional_units(nic)) { -+ efhw_nic_close_interrupts(nic); -+ efhw_nic_close_hardware(nic); -+ } -+ EFHW_TRACE("%s: functional units ... done", __func__); -+ -+ /* destroy event queues */ -+ EFHW_TRACE("%s: event queues ... ", __func__); -+ -+ if (nic->interrupting_evq.evq_mask) -+ efhw_keventq_dtor(nic, &nic->interrupting_evq); -+ if (nic->non_interrupting_evq.evq_mask) -+ efhw_keventq_dtor(nic, &nic->non_interrupting_evq); -+ -+ EFHW_TRACE("%s: event queues ... done", __func__); -+ -+ spin_lock_destroy(&nic->the_reg_lock); -+ -+ EFHW_TRACE("%s: DONE", __func__); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/resource_driver.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,600 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains main driver entry points. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include "linux_resource_internal.h" -+#include "kernel_compat.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_AUTHOR("Solarflare Communications"); -+MODULE_LICENSE("GPL"); -+ -+static struct efhw_ev_handler ev_handler = { -+ .wakeup_fn = efrm_handle_wakeup_event, -+ .timeout_fn = efrm_handle_timeout_event, -+ .dmaq_flushed_fn = efrm_handle_dmaq_flushed, -+}; -+ -+const int max_hardware_init_repeats = 10; -+ -+/*-------------------------------------------------------------------- -+ * -+ * Module load time variables -+ * -+ *--------------------------------------------------------------------*/ -+/* See docs/notes/pci_alloc_consistent */ -+static int do_irq = 1; /* enable interrupts */ -+ -+#if defined(CONFIG_X86_XEN) -+static int irq_moderation = 60; /* interrupt moderation (60 usec) */ -+#else -+static int irq_moderation = 20; /* interrupt moderation (20 usec) */ -+#endif -+static int nic_options = NIC_OPT_DEFAULT; -+int efx_vi_eventq_size = EFX_VI_EVENTQ_SIZE_DEFAULT; -+ -+module_param(do_irq, int, S_IRUGO); -+MODULE_PARM_DESC(do_irq, "Enable interrupts. " -+ "Do not turn it off unless you know what are you doing."); -+module_param(irq_moderation, int, S_IRUGO); -+MODULE_PARM_DESC(irq_moderation, "IRQ moderation in usec"); -+module_param(nic_options, int, S_IRUGO); -+MODULE_PARM_DESC(nic_options, "Nic options -- see efhw_types.h"); -+module_param(efx_vi_eventq_size, int, S_IRUGO); -+MODULE_PARM_DESC(efx_vi_eventq_size, -+ "Size of event queue allocated by efx_vi library"); -+ -+/*-------------------------------------------------------------------- -+ * -+ * Linux specific NIC initialisation -+ * -+ *--------------------------------------------------------------------*/ -+ -+static inline irqreturn_t -+linux_efrm_interrupt(int irr, void *dev_id) -+{ -+ return efhw_nic_interrupt((struct efhw_nic *)dev_id); -+} -+ -+int linux_efrm_irq_ctor(struct linux_efhw_nic *lnic) -+{ -+ struct efhw_nic *nic = &lnic->efrm_nic.efhw_nic; -+ -+ nic->flags &= ~NIC_FLAG_MSI; -+ if (nic->flags & NIC_FLAG_TRY_MSI) { -+ int rc = pci_enable_msi(lnic->pci_dev); -+ if (rc < 0) { -+ EFRM_WARN("%s: Could not enable MSI (%d)", -+ __func__, rc); -+ EFRM_WARN("%s: Continuing with legacy interrupt mode", -+ __func__); -+ } else { -+ EFRM_NOTICE("%s: MSI enabled", __func__); -+ nic->flags |= NIC_FLAG_MSI; -+ } -+ } -+ -+ if (request_irq(lnic->pci_dev->irq, linux_efrm_interrupt, -+ IRQF_SHARED, "sfc_resource", nic)) { -+ EFRM_ERR("Request for interrupt #%d failed", -+ lnic->pci_dev->irq); -+ nic->flags &= ~NIC_FLAG_OS_IRQ_EN; -+ return -EBUSY; -+ } -+ nic->flags |= NIC_FLAG_OS_IRQ_EN; -+ -+ return 0; -+} -+ -+void linux_efrm_irq_dtor(struct linux_efhw_nic *lnic) -+{ -+ EFRM_TRACE("%s: start", __func__); -+ -+ if (lnic->efrm_nic.efhw_nic.flags & NIC_FLAG_OS_IRQ_EN) { -+ free_irq(lnic->pci_dev->irq, &lnic->efrm_nic.efhw_nic); -+ lnic->efrm_nic.efhw_nic.flags &= ~NIC_FLAG_OS_IRQ_EN; -+ } -+ -+ if (lnic->efrm_nic.efhw_nic.flags & NIC_FLAG_MSI) { -+ pci_disable_msi(lnic->pci_dev); -+ lnic->efrm_nic.efhw_nic.flags &= ~NIC_FLAG_MSI; -+ } -+ -+ EFRM_TRACE("%s: done", __func__); -+} -+ -+/* Allocate buffer table entries for a particular NIC. -+ */ -+static int efrm_nic_buffer_table_alloc(struct efhw_nic *nic) -+{ -+ int capacity; -+ int page_order; -+ int rc; -+ -+ /* Choose queue size. */ -+ for (capacity = 8192; capacity <= nic->evq_sizes; capacity <<= 1) { -+ if (capacity > nic->evq_sizes) { -+ EFRM_ERR -+ ("%s: Unable to choose EVQ size (supported=%x)", -+ __func__, nic->evq_sizes); -+ return -E2BIG; -+ } else if (capacity & nic->evq_sizes) -+ break; -+ } -+ -+ nic->interrupting_evq.hw.capacity = capacity; -+ nic->interrupting_evq.hw.buf_tbl_alloc.base = (unsigned)-1; -+ -+ nic->non_interrupting_evq.hw.capacity = capacity; -+ nic->non_interrupting_evq.hw.buf_tbl_alloc.base = (unsigned)-1; -+ -+ /* allocate buffer table entries to map onto the iobuffer */ -+ page_order = get_order(capacity * sizeof(efhw_event_t)); -+ if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) { -+ rc = efrm_buffer_table_alloc(page_order, -+ &nic->interrupting_evq -+ .hw.buf_tbl_alloc); -+ if (rc < 0) { -+ EFRM_WARN -+ ("%s: failed (%d) to alloc %d buffer table entries", -+ __func__, rc, page_order); -+ return rc; -+ } -+ } -+ rc = efrm_buffer_table_alloc(page_order, -+ &nic->non_interrupting_evq.hw. -+ buf_tbl_alloc); -+ if (rc < 0) { -+ EFRM_WARN -+ ("%s: failed (%d) to alloc %d buffer table entries", -+ __func__, rc, page_order); -+ return rc; -+ } -+ -+ return 0; -+} -+ -+/* Free buffer table entries allocated for a particular NIC. -+ */ -+static void efrm_nic_buffer_table_free(struct efhw_nic *nic) -+{ -+ if (nic->interrupting_evq.hw.buf_tbl_alloc.base != (unsigned)-1) -+ efrm_buffer_table_free(&nic->interrupting_evq.hw -+ .buf_tbl_alloc); -+ if (nic->non_interrupting_evq.hw.buf_tbl_alloc.base != (unsigned)-1) -+ efrm_buffer_table_free(&nic->non_interrupting_evq -+ .hw.buf_tbl_alloc); -+} -+ -+static int iomap_bar(struct linux_efhw_nic *lnic, size_t len) -+{ -+ volatile char __iomem *ioaddr; -+ -+ ioaddr = ioremap_nocache(lnic->ctr_ap_pci_addr, len); -+ if (ioaddr == 0) -+ return -ENOMEM; -+ -+ lnic->efrm_nic.efhw_nic.bar_ioaddr = ioaddr; -+ return 0; -+} -+ -+static int linux_efhw_nic_map_ctr_ap(struct linux_efhw_nic *lnic) -+{ -+ struct efhw_nic *nic = &lnic->efrm_nic.efhw_nic; -+ int rc; -+ -+ rc = iomap_bar(lnic, nic->ctr_ap_bytes); -+ -+ /* Bug 5195: workaround for now. */ -+ if (rc != 0 && nic->ctr_ap_bytes > 16 * 1024 * 1024) { -+ /* Try half the size for now. */ -+ nic->ctr_ap_bytes /= 2; -+ EFRM_WARN("Bug 5195 WORKAROUND: retrying iomap of %d bytes", -+ nic->ctr_ap_bytes); -+ rc = iomap_bar(lnic, nic->ctr_ap_bytes); -+ } -+ -+ if (rc < 0) { -+ EFRM_ERR("Failed (%d) to map bar (%d bytes)", -+ rc, nic->ctr_ap_bytes); -+ return rc; -+ } -+ -+ return rc; -+} -+ -+int -+linux_efrm_nic_ctor(struct linux_efhw_nic *lnic, struct pci_dev *dev, -+ spinlock_t *reg_lock, -+ unsigned nic_flags, unsigned nic_options) -+{ -+ struct efhw_device_type dev_type; -+ struct efhw_nic *nic = &lnic->efrm_nic.efhw_nic; -+ u8 class_revision; -+ int rc; -+ -+ rc = pci_read_config_byte(dev, PCI_CLASS_REVISION, &class_revision); -+ if (rc != 0) { -+ EFRM_ERR("%s: pci_read_config_byte failed (%d)", -+ __func__, rc); -+ return rc; -+ } -+ -+ if (!efhw_device_type_init(&dev_type, dev->vendor, dev->device, -+ class_revision)) { -+ EFRM_ERR("%s: efhw_device_type_init failed %04x:%04x(%d)", -+ __func__, (unsigned) dev->vendor, -+ (unsigned) dev->device, (int) class_revision); -+ return -ENODEV; -+ } -+ -+ EFRM_NOTICE("attaching device type %04x:%04x %d:%c%d", -+ (unsigned) dev->vendor, (unsigned) dev->device, -+ dev_type.arch, dev_type.variant, dev_type.revision); -+ -+ /* Initialise the adapter-structure. */ -+ efhw_nic_init(nic, nic_flags, nic_options, dev_type); -+ lnic->pci_dev = dev; -+ -+ rc = pci_enable_device(dev); -+ if (rc < 0) { -+ EFRM_ERR("%s: pci_enable_device failed (%d)", -+ __func__, rc); -+ return rc; -+ } -+ -+ lnic->ctr_ap_pci_addr = pci_resource_start(dev, nic->ctr_ap_bar); -+ -+ if (!pci_dma_supported(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) { -+ EFRM_ERR("%s: pci_dma_supported(%lx) failed", __func__, -+ (unsigned long)EFHW_DMA_ADDRMASK); -+ return -ENODEV; -+ } -+ -+ if (pci_set_dma_mask(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) { -+ EFRM_ERR("%s: pci_set_dma_mask(%lx) failed", __func__, -+ (unsigned long)EFHW_DMA_ADDRMASK); -+ return -ENODEV; -+ } -+ -+ if (pci_set_consistent_dma_mask(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) { -+ EFRM_ERR("%s: pci_set_consistent_dma_mask(%lx) failed", -+ __func__, (unsigned long)EFHW_DMA_ADDRMASK); -+ return -ENODEV; -+ } -+ -+ rc = linux_efhw_nic_map_ctr_ap(lnic); -+ if (rc < 0) -+ return rc; -+ -+ /* By default struct efhw_nic contains its own lock for protecting -+ * access to nic registers. We override it with a pointer to the -+ * lock in the net driver. This is needed when resource and net -+ * drivers share a single PCI function (falcon B series). -+ */ -+ nic->reg_lock = reg_lock; -+ return 0; -+} -+ -+void linux_efrm_nic_dtor(struct linux_efhw_nic *lnic) -+{ -+ struct efhw_nic *nic = &lnic->efrm_nic.efhw_nic; -+ volatile char __iomem *bar_ioaddr = nic->bar_ioaddr; -+ -+ efhw_nic_dtor(nic); -+ -+ /* Unmap the bar. */ -+ EFRM_ASSERT(bar_ioaddr); -+ iounmap(bar_ioaddr); -+ nic->bar_ioaddr = 0; -+} -+ -+/**************************************************************************** -+ * -+ * efrm_tasklet - used to poll the eventq which may result in further callbacks -+ * -+ ****************************************************************************/ -+ -+static void efrm_tasklet(unsigned long pdev) -+{ -+ struct efhw_nic *nic = (struct efhw_nic *)pdev; -+ -+ EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); -+ -+ efhw_keventq_poll(nic, &nic->interrupting_evq); -+ EFRM_TRACE("%s: complete", __func__); -+} -+ -+/**************************************************************************** -+ * -+ * char driver specific interrupt callbacks -- run at hard IRQL -+ * -+ ****************************************************************************/ -+static void efrm_handle_eventq_irq(struct efhw_nic *nic, int evq) -+{ -+ /* NB. The interrupt must have already been acked (for legacy mode). */ -+ -+ EFRM_TRACE("%s: starting tasklet", __func__); -+ EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); -+ -+ tasklet_schedule(&linux_efhw_nic(nic)->tasklet); -+} -+ -+/* A count of how many NICs this driver knows about. */ -+static int n_nics_probed; -+ -+/**************************************************************************** -+ * -+ * efrm_nic_add: add the NIC to the resource driver -+ * -+ * NOTE: the flow of control through this routine is quite subtle -+ * because of the number of operations that can fail. We therefore -+ * take the apporaching of keeping the return code (rc) variable -+ * accurate, and only do operations while it is non-negative. Tear down -+ * is done at the end if rc is negative, depending on what has been set up -+ * by that point. -+ * -+ * So basically just make sure that any code you add checks rc>=0 before -+ * doing any work and you'll be fine. -+ * -+ ****************************************************************************/ -+int -+efrm_nic_add(struct pci_dev *dev, unsigned flags, const uint8_t *mac_addr, -+ struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock, -+ int bt_min, int bt_lim, int non_irq_evq, -+ const struct vi_resource_dimensions *res_dim) -+{ -+ struct linux_efhw_nic *lnic = NULL; -+ struct efhw_nic *nic = NULL; -+ int count = 0, rc = 0, resources_init = 0; -+ int constructed = 0; -+ int registered_nic = 0; -+ int buffers_allocated = 0; -+ static unsigned nic_index; /* = 0; */ -+ -+ EFRM_TRACE("%s: device detected (Slot '%s', IRQ %d)", __func__, -+ pci_name(dev) ? pci_name(dev) : "?", dev->irq); -+ -+ /* Ensure that we have room for the new adapter-structure. */ -+ if (efrm_nic_tablep->nic_count == EFHW_MAX_NR_DEVS) { -+ EFRM_WARN("%s: WARNING: too many devices", __func__); -+ rc = -ENOMEM; -+ goto failed; -+ } -+ -+ if (n_nics_probed == 0) { -+ rc = efrm_resources_init(res_dim, bt_min, bt_lim); -+ if (rc != 0) -+ goto failed; -+ resources_init = 1; -+ } -+ -+ /* Allocate memory for the new adapter-structure. */ -+ lnic = kmalloc(sizeof(*lnic), GFP_KERNEL); -+ if (lnic == NULL) { -+ EFRM_ERR("%s: ERROR: failed to allocate memory", __func__); -+ rc = -ENOMEM; -+ goto failed; -+ } -+ memset(lnic, 0, sizeof(*lnic)); -+ nic = &lnic->efrm_nic.efhw_nic; -+ -+ lnic->ev_handlers = &ev_handler; -+ -+ /* OS specific hardware mappings */ -+ rc = linux_efrm_nic_ctor(lnic, dev, reg_lock, flags, nic_options); -+ if (rc < 0) { -+ EFRM_ERR("%s: ERROR: initialisation failed", __func__); -+ goto failed; -+ } -+ -+ constructed = 1; -+ -+ /* Tell the driver about the NIC - this needs to be done before the -+ resources managers get created below. Note we haven't initialised -+ the hardware yet, and I don't like doing this before the perhaps -+ unreliable hardware initialisation. However, there's quite a lot -+ of code to review if we wanted to hardware init before bringing -+ up the resource managers. */ -+ rc = efrm_driver_register_nic(&lnic->efrm_nic, nic_index, -+ /* TODO: ifindex */ nic_index); -+ if (rc < 0) { -+ EFRM_ERR("%s: cannot register nic %d with nic error code %d", -+ __func__, efrm_nic_tablep->nic_count, rc); -+ goto failed; -+ } -+ ++nic_index; -+ registered_nic = 1; -+ -+ rc = efrm_nic_buffer_table_alloc(nic); -+ if (rc < 0) -+ goto failed; -+ buffers_allocated = 1; -+ -+ /****************************************************/ -+ /* hardware bringup */ -+ /****************************************************/ -+ /* Detecting hardware can be a slightly unreliable process; -+ we want to make sure that we maximise our chances, so we -+ loop a few times until all is good. */ -+ for (count = 0; count < max_hardware_init_repeats; count++) { -+ rc = efhw_nic_init_hardware(nic, &ev_handler, mac_addr, -+ non_irq_evq); -+ if (rc >= 0) -+ break; -+ -+ /* pain */ -+ EFRM_ERR -+ ("error - hardware initialisation failed code %d, " -+ "attempt %d of %d", rc, count + 1, -+ max_hardware_init_repeats); -+ } -+ if (rc < 0) -+ goto failed; -+ -+ tasklet_init(&lnic->tasklet, efrm_tasklet, (ulong)nic); -+ -+ /* set up interrupt handlers (hard-irq) */ -+ nic->irq_handler = &efrm_handle_eventq_irq; -+ -+ /* this device can now take management interrupts */ -+ if (do_irq && !(nic->flags & NIC_FLAG_NO_INTERRUPT)) { -+ rc = linux_efrm_irq_ctor(lnic); -+ if (rc < 0) { -+ EFRM_ERR("Interrupt initialisation failed (%d)", rc); -+ goto failed; -+ } -+ efhw_nic_set_interrupt_moderation(nic, -1, irq_moderation); -+ efhw_nic_interrupt_enable(nic); -+ } -+ EFRM_TRACE("interrupts are %sregistered", do_irq ? "" : "not "); -+ -+ *lnic_out = lnic; -+ EFRM_ASSERT(rc == 0); -+ ++n_nics_probed; -+ return 0; -+ -+failed: -+ if (buffers_allocated) -+ efrm_nic_buffer_table_free(nic); -+ if (registered_nic) -+ efrm_driver_unregister_nic(&lnic->efrm_nic); -+ if (constructed) -+ linux_efrm_nic_dtor(lnic); -+ kfree(lnic); /* safe in any case */ -+ if (resources_init) -+ efrm_resources_fini(); -+ return rc; -+} -+ -+/**************************************************************************** -+ * -+ * efrm_nic_del: Remove the nic from the resource driver structures -+ * -+ ****************************************************************************/ -+void efrm_nic_del(struct linux_efhw_nic *lnic) -+{ -+ struct efhw_nic *nic = &lnic->efrm_nic.efhw_nic; -+ -+ EFRM_TRACE("%s:", __func__); -+ EFRM_ASSERT(nic); -+ -+ efrm_nic_buffer_table_free(nic); -+ -+ efrm_driver_unregister_nic(&lnic->efrm_nic); -+ -+ /* -+ * Synchronise here with any running ISR. -+ * Remove the OS handler. There should be no IRQs being generated -+ * by our NIC at this point. -+ */ -+ if (efhw_nic_have_functional_units(nic)) { -+ efhw_nic_close_interrupts(nic); -+ linux_efrm_irq_dtor(lnic); -+ tasklet_kill(&lnic->tasklet); -+ } -+ -+ /* Close down hardware and free resources. */ -+ linux_efrm_nic_dtor(lnic); -+ kfree(lnic); -+ -+ if (--n_nics_probed == 0) -+ efrm_resources_fini(); -+ -+ EFRM_TRACE("%s: done", __func__); -+} -+ -+/**************************************************************************** -+ * -+ * init_module: register as a PCI driver. -+ * -+ ****************************************************************************/ -+static int init_sfc_resource(void) -+{ -+ int rc = 0; -+ -+ EFRM_TRACE("%s: RESOURCE driver starting", __func__); -+ -+ efrm_driver_ctor(); -+ -+ /* Register the driver so that our 'probe' function is called for -+ * each EtherFabric device in the system. -+ */ -+ rc = efrm_driverlink_register(); -+ if (rc == -ENODEV) -+ EFRM_ERR("%s: no devices found", __func__); -+ if (rc < 0) -+ goto failed_driverlink; -+ -+ if (efrm_install_proc_entries() != 0) { -+ /* Do not fail, but print a warning */ -+ EFRM_WARN("%s: WARNING: failed to install /proc entries", -+ __func__); -+ } -+ -+ return 0; -+ -+failed_driverlink: -+ efrm_driver_dtor(); -+ return rc; -+} -+ -+/**************************************************************************** -+ * -+ * cleanup_module: module-removal entry-point -+ * -+ ****************************************************************************/ -+static void cleanup_sfc_resource(void) -+{ -+ efrm_uninstall_proc_entries(); -+ -+ efrm_driverlink_unregister(); -+ -+ /* Clean up char-driver specific initialisation. -+ - driver dtor can use both work queue and buffer table entries */ -+ efrm_driver_dtor(); -+ -+ EFRM_TRACE("%s: unloaded", __func__); -+} -+ -+module_init(init_sfc_resource); -+module_exit(cleanup_sfc_resource); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/resource_manager.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,145 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains generic code for resources and resource managers. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+/********************************************************************** -+ * struct efrm_resource_manager -+ */ -+ -+void efrm_resource_manager_dtor(struct efrm_resource_manager *rm) -+{ -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm); -+ -+ /* call destructor */ -+ EFRM_DO_DEBUG(if (rm->rm_resources) -+ EFRM_ERR("%s: %s leaked %d resources", -+ __func__, rm->rm_name, rm->rm_resources)); -+ EFRM_ASSERT(rm->rm_resources == 0); -+ EFRM_ASSERT(list_empty(&rm->rm_resources_list)); -+ -+ rm->rm_dtor(rm); -+ -+ /* clear out things built by efrm_resource_manager_ctor */ -+ spin_lock_destroy(&rm->rm_lock); -+ -+ /* and the free the memory */ -+ EFRM_DO_DEBUG(memset(rm, 0, sizeof(*rm))); -+ kfree(rm); -+} -+ -+/* Construct a resource manager. Resource managers are singletons. */ -+int -+efrm_resource_manager_ctor(struct efrm_resource_manager *rm, -+ void (*dtor)(struct efrm_resource_manager *), -+ const char *name, unsigned type) -+{ -+ EFRM_ASSERT(rm); -+ EFRM_ASSERT(dtor); -+ -+ rm->rm_name = name; -+ EFRM_DO_DEBUG(rm->rm_type = type); -+ rm->rm_dtor = dtor; -+ spin_lock_init(&rm->rm_lock); -+ rm->rm_resources = 0; -+ rm->rm_resources_hiwat = 0; -+ INIT_LIST_HEAD(&rm->rm_resources_list); -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm); -+ return 0; -+} -+ -+ -+void efrm_client_add_resource(struct efrm_client *client, -+ struct efrm_resource *rs) -+{ -+ struct efrm_resource_manager *rm; -+ irq_flags_t lock_flags; -+ -+ EFRM_ASSERT(client != NULL); -+ EFRM_ASSERT(rs != NULL); -+ -+ spin_lock_irqsave(&efrm_nic_tablep->lock, lock_flags); -+ rm = efrm_rm_table[EFRM_RESOURCE_TYPE(rs->rs_handle)]; -+ ++rm->rm_resources; -+ list_add(&rs->rs_manager_link, &rm->rm_resources_list); -+ if (rm->rm_resources > rm->rm_resources_hiwat) -+ rm->rm_resources_hiwat = rm->rm_resources; -+ rs->rs_client = client; -+ ++client->ref_count; -+ list_add(&rs->rs_client_link, &client->resources); -+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, lock_flags); -+} -+ -+ -+void efrm_resource_ref(struct efrm_resource *rs) -+{ -+ irq_flags_t lock_flags; -+ spin_lock_irqsave(&efrm_nic_tablep->lock, lock_flags); -+ ++rs->rs_ref_count; -+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, lock_flags); -+} -+EXPORT_SYMBOL(efrm_resource_ref); -+ -+ -+int __efrm_resource_release(struct efrm_resource *rs) -+{ -+ struct efrm_resource_manager *rm; -+ irq_flags_t lock_flags; -+ int free_rs; -+ -+ spin_lock_irqsave(&efrm_nic_tablep->lock, lock_flags); -+ free_rs = --rs->rs_ref_count == 0; -+ if (free_rs) { -+ rm = efrm_rm_table[EFRM_RESOURCE_TYPE(rs->rs_handle)]; -+ EFRM_ASSERT(rm->rm_resources > 0); -+ --rm->rm_resources; -+ list_del(&rs->rs_manager_link); -+ list_del(&rs->rs_client_link); -+ } -+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, lock_flags); -+ return free_rs; -+} -+EXPORT_SYMBOL(__efrm_resource_release); -+ -+/* -+ * vi: sw=8:ai:aw -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/resources.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,94 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains resource managers initialisation functions. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+ -+int -+efrm_resources_init(const struct vi_resource_dimensions *vi_res_dim, -+ int buffer_table_min, int buffer_table_lim) -+{ -+ int i, rc; -+ -+ rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_lim); -+ if (rc != 0) -+ return rc; -+ -+ /* Create resources in the correct order */ -+ for (i = 0; i < EFRM_RESOURCE_NUM; ++i) { -+ struct efrm_resource_manager **rmp = &efrm_rm_table[i]; -+ -+ EFRM_ASSERT(*rmp == NULL); -+ switch (i) { -+ case EFRM_RESOURCE_VI: -+ rc = efrm_create_vi_resource_manager(rmp, -+ vi_res_dim); -+ break; -+ case EFRM_RESOURCE_FILTER: -+ rc = efrm_create_filter_resource_manager(rmp); -+ break; -+ case EFRM_RESOURCE_IOBUFSET: -+ rc = efrm_create_iobufset_resource_manager(rmp); -+ break; -+ default: -+ rc = 0; -+ break; -+ } -+ -+ if (rc < 0) { -+ EFRM_ERR("%s: failed type=%d (%d)", -+ __func__, i, rc); -+ efrm_buffer_table_dtor(); -+ return rc; -+ } -+ } -+ -+ return 0; -+} -+ -+void efrm_resources_fini(void) -+{ -+ int i; -+ -+ for (i = EFRM_RESOURCE_NUM - 1; i >= 0; --i) -+ if (efrm_rm_table[i]) { -+ efrm_resource_manager_dtor(efrm_rm_table[i]); -+ efrm_rm_table[i] = NULL; -+ } -+ -+ efrm_buffer_table_dtor(); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/vi_resource_alloc.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,820 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains allocation of VI resources. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+ -+/*** Data definitions ****************************************************/ -+ -+static const char *dmaq_names[] = { "TX", "RX" }; -+ -+struct vi_resource_manager *efrm_vi_manager; -+ -+/*** Forward references **************************************************/ -+ -+static int -+efrm_vi_resource_alloc_or_free(struct efrm_client *client, -+ int alloc, struct vi_resource *evq_virs, -+ uint16_t vi_flags, int32_t evq_capacity, -+ int32_t txq_capacity, int32_t rxq_capacity, -+ uint8_t tx_q_tag, uint8_t rx_q_tag, -+ struct vi_resource **virs_in_out); -+ -+/*** Reference count handling ********************************************/ -+ -+static inline void efrm_vi_rm_get_ref(struct vi_resource *virs) -+{ -+ atomic_inc(&virs->evq_refs); -+} -+ -+static inline void efrm_vi_rm_drop_ref(struct vi_resource *virs) -+{ -+ EFRM_ASSERT(atomic_read(&virs->evq_refs) != 0); -+ if (atomic_dec_and_test(&virs->evq_refs)) -+ efrm_vi_resource_alloc_or_free(virs->rs.rs_client, false, NULL, -+ 0, 0, 0, 0, 0, 0, &virs); -+} -+ -+/*** Instance numbers ****************************************************/ -+ -+static inline int efrm_vi_rm_alloc_id(uint16_t vi_flags, int32_t evq_capacity) -+{ -+ irq_flags_t lock_flags; -+ int instance; -+ int rc; -+ -+ if (efrm_nic_tablep->a_nic == NULL) /* ?? FIXME: surely not right */ -+ return -ENODEV; -+ -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ /* Falcon A1 RX phys addr wierdness. */ -+ if (efrm_nic_tablep->a_nic->devtype.variant == 'A' && -+ (vi_flags & EFHW_VI_RX_PHYS_ADDR_EN)) { -+ if (vi_flags & EFHW_VI_JUMBO_EN) { -+ /* Falcon-A cannot do phys + scatter. */ -+ EFRM_WARN -+ ("%s: falcon-A does not support phys+scatter mode", -+ __func__); -+ instance = -1; -+ } else if (efrm_vi_manager->iscsi_dmaq_instance_is_free -+ && evq_capacity == 0) { -+ /* Falcon-A has a single RXQ that gives the correct -+ * semantics for physical addressing. However, it -+ * happens to have the same instance number as the -+ * 'char' event queue, so we cannot also hand out -+ * the event queue. */ -+ efrm_vi_manager->iscsi_dmaq_instance_is_free = false; -+ instance = FALCON_A1_ISCSI_DMAQ; -+ } else { -+ EFRM_WARN("%s: iSCSI receive queue not free", -+ __func__); -+ instance = -1; -+ } -+ goto unlock_out; -+ } -+ -+ if (vi_flags & EFHW_VI_RM_WITH_INTERRUPT) { -+ rc = __kfifo_get(efrm_vi_manager->instances_with_interrupt, -+ (unsigned char *)&instance, sizeof(instance)); -+ if (rc != sizeof(instance)) { -+ EFRM_ASSERT(rc == 0); -+ instance = -1; -+ } -+ goto unlock_out; -+ } -+ -+ /* Otherwise a normal run-of-the-mill VI. */ -+ rc = __kfifo_get(efrm_vi_manager->instances_with_timer, -+ (unsigned char *)&instance, sizeof(instance)); -+ if (rc != sizeof(instance)) { -+ EFRM_ASSERT(rc == 0); -+ instance = -1; -+ } -+ -+unlock_out: -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ return instance; -+} -+ -+static void efrm_vi_rm_free_id(int instance) -+{ -+ irq_flags_t lock_flags; -+ struct kfifo *instances; -+ -+ if (efrm_nic_tablep->a_nic == NULL) /* ?? FIXME: surely not right */ -+ return; -+ -+ if (efrm_nic_tablep->a_nic->devtype.variant == 'A' && -+ instance == FALCON_A1_ISCSI_DMAQ) { -+ EFRM_ASSERT(efrm_vi_manager->iscsi_dmaq_instance_is_free == -+ false); -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ efrm_vi_manager->iscsi_dmaq_instance_is_free = true; -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, -+ lock_flags); -+ } else { -+ if (instance >= efrm_vi_manager->with_timer_base && -+ instance < efrm_vi_manager->with_timer_limit) { -+ instances = efrm_vi_manager->instances_with_timer; -+ } else { -+ EFRM_ASSERT(instance >= -+ efrm_vi_manager->with_interrupt_base); -+ EFRM_ASSERT(instance < -+ efrm_vi_manager->with_interrupt_limit); -+ instances = efrm_vi_manager->instances_with_interrupt; -+ } -+ -+ EFRM_VERIFY_EQ(kfifo_put(instances, (unsigned char *)&instance, -+ sizeof(instance)), sizeof(instance)); -+ } -+} -+ -+/*** Queue sizes *********************************************************/ -+ -+/* NB. This should really take a nic as an argument, but that makes -+ * the buffer table allocation difficult. */ -+uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs -+ /*,struct efhw_nic *nic */) -+{ -+ return virs->evq_capacity * sizeof(efhw_event_t); -+} -+EXPORT_SYMBOL(efrm_vi_rm_evq_bytes); -+ -+/* NB. This should really take a nic as an argument, but that makes -+ * the buffer table allocation difficult. */ -+uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs -+ /*,struct efhw_nic *nic */) -+{ -+ return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] * -+ FALCON_DMA_TX_DESC_BYTES; -+} -+EXPORT_SYMBOL(efrm_vi_rm_txq_bytes); -+ -+/* NB. This should really take a nic as an argument, but that makes -+ * the buffer table allocation difficult. */ -+uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs -+ /*,struct efhw_nic *nic */) -+{ -+ uint32_t bytes_per_desc = ((virs->flags & EFHW_VI_RX_PHYS_ADDR_EN) -+ ? FALCON_DMA_RX_PHYS_DESC_BYTES -+ : FALCON_DMA_RX_BUF_DESC_BYTES); -+ return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] * bytes_per_desc; -+} -+EXPORT_SYMBOL(efrm_vi_rm_rxq_bytes); -+ -+static int choose_size(int size_rq, unsigned sizes) -+{ -+ int size; -+ -+ /* size_rq < 0 means default, but we interpret this as 'minimum'. */ -+ -+ for (size = 256;; size <<= 1) -+ if ((size & sizes) && size >= size_rq) -+ return size; -+ else if ((sizes & ~((size - 1) | size)) == 0) -+ return -1; -+} -+ -+static int -+efrm_vi_rm_adjust_alloc_request(struct vi_resource *virs, struct efhw_nic *nic) -+{ -+ int capacity; -+ -+ EFRM_ASSERT(nic->efhw_func); -+ -+ if (virs->evq_capacity) { -+ capacity = choose_size(virs->evq_capacity, nic->evq_sizes); -+ if (capacity < 0) { -+ EFRM_ERR("vi_resource: bad evq size %d (supported=%x)", -+ virs->evq_capacity, nic->evq_sizes); -+ return -E2BIG; -+ } -+ virs->evq_capacity = capacity; -+ } -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) { -+ capacity = -+ choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX], -+ nic->txq_sizes); -+ if (capacity < 0) { -+ EFRM_ERR("vi_resource: bad txq size %d (supported=%x)", -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX], -+ nic->txq_sizes); -+ return -E2BIG; -+ } -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = capacity; -+ } -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) { -+ capacity = -+ choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX], -+ nic->rxq_sizes); -+ if (capacity < 0) { -+ EFRM_ERR("vi_resource: bad rxq size %d (supported=%x)", -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX], -+ nic->rxq_sizes); -+ return -E2BIG; -+ } -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = capacity; -+ } -+ -+ return 0; -+} -+ -+/* remove the reference to the event queue in this VI resource and decrement -+ the event queue's use count */ -+static inline void efrm_vi_rm_detach_evq(struct vi_resource *virs) -+{ -+ struct vi_resource *evq_virs; -+ -+ EFRM_ASSERT(virs != NULL); -+ -+ evq_virs = virs->evq_virs; -+ -+ if (evq_virs != NULL) { -+ virs->evq_virs = NULL; -+ if (evq_virs == virs) { -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT -+ " had internal event queue ", __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); -+ } else { -+ efrm_vi_rm_drop_ref(evq_virs); -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " had event queue " -+ EFRM_RESOURCE_FMT, __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), -+ EFRM_RESOURCE_PRI_ARG(evq_virs->rs. -+ rs_handle)); -+ } -+ } else { -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT -+ " had no event queue (nothing to do)", -+ __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); -+ } -+} -+ -+/*** Buffer Table allocations ********************************************/ -+ -+static int -+efrm_vi_rm_alloc_or_free_buffer_table(struct vi_resource *virs, bool is_alloc) -+{ -+ uint32_t bytes; -+ int page_order; -+ int rc; -+ -+ if (!is_alloc) -+ goto destroy; -+ -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) { -+ bytes = efrm_vi_rm_txq_bytes(virs); -+ page_order = get_order(bytes); -+ rc = efrm_buffer_table_alloc(page_order, -+ (virs->dmaq_buf_tbl_alloc + -+ EFRM_VI_RM_DMA_QUEUE_TX)); -+ if (rc != 0) { -+ EFRM_TRACE -+ ("%s: Error %d allocating TX buffer table entry", -+ __func__, rc); -+ goto fail_txq_alloc; -+ } -+ } -+ -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) { -+ bytes = efrm_vi_rm_rxq_bytes(virs); -+ page_order = get_order(bytes); -+ rc = efrm_buffer_table_alloc(page_order, -+ (virs->dmaq_buf_tbl_alloc + -+ EFRM_VI_RM_DMA_QUEUE_RX)); -+ if (rc != 0) { -+ EFRM_TRACE -+ ("%s: Error %d allocating RX buffer table entry", -+ __func__, rc); -+ goto fail_rxq_alloc; -+ } -+ } -+ return 0; -+ -+destroy: -+ rc = 0; -+ -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) { -+ efrm_buffer_table_free(&virs-> -+ dmaq_buf_tbl_alloc -+ [EFRM_VI_RM_DMA_QUEUE_RX]); -+ } -+fail_rxq_alloc: -+ -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) { -+ efrm_buffer_table_free(&virs-> -+ dmaq_buf_tbl_alloc -+ [EFRM_VI_RM_DMA_QUEUE_TX]); -+ } -+fail_txq_alloc: -+ -+ return rc; -+} -+ -+/*** Per-NIC allocations *************************************************/ -+ -+static inline int -+efrm_vi_rm_init_evq(struct vi_resource *virs, struct efhw_nic *nic) -+{ -+ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ struct eventq_resource_hardware *evq_hw = -+ &virs->nic_info.evq_pages; -+ uint32_t buf_bytes = efrm_vi_rm_evq_bytes(virs); -+ int rc; -+ -+ if (virs->evq_capacity == 0) -+ return 0; -+ evq_hw->capacity = virs->evq_capacity; -+ -+ /* Allocate buffer table entries to map onto the iobuffer. This -+ * currently allocates its own buffer table entries on Falcon which is -+ * a bit wasteful on a multi-NIC system. */ -+ evq_hw->buf_tbl_alloc.base = (unsigned)-1; -+ rc = efrm_buffer_table_alloc(get_order(buf_bytes), -+ &evq_hw->buf_tbl_alloc); -+ if (rc < 0) { -+ EFHW_WARN("%s: failed (%d) to alloc %d buffer table entries", -+ __func__, rc, get_order(buf_bytes)); -+ return rc; -+ } -+ -+ /* Allocate the event queue memory. */ -+ rc = efhw_nic_event_queue_alloc_iobuffer(nic, evq_hw, instance, -+ buf_bytes); -+ if (rc != 0) { -+ EFRM_ERR("%s: Error allocating iobuffer: %d", __func__, rc); -+ efrm_buffer_table_free(&evq_hw->buf_tbl_alloc); -+ return rc; -+ } -+ -+ /* Initialise the event queue hardware */ -+ efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity, -+ efhw_iopages_dma_addr(&evq_hw->iobuff) + -+ evq_hw->iobuff_off, -+ evq_hw->buf_tbl_alloc.base, -+ instance < 64); -+ -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " capacity=%u", __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), -+ virs->evq_capacity); -+ -+#if defined(__ia64__) -+ /* Page size may be large, so for now just increase the -+ * size of the requested evq up to a round number of -+ * pages -+ */ -+ buf_bytes = CI_ROUNDUP(buf_bytes, PAGE_SIZE); -+#endif -+ EFRM_ASSERT(buf_bytes % PAGE_SIZE == 0); -+ -+ virs->mem_mmap_bytes += buf_bytes; -+ -+ return 0; -+} -+ -+static inline void -+efrm_vi_rm_fini_evq(struct vi_resource *virs, struct efhw_nic *nic) -+{ -+ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ struct vi_resource_nic_info *nic_info = &virs->nic_info; -+ -+ if (virs->evq_capacity == 0) -+ return; -+ -+ /* Zero the timer-value for this queue. -+ And Tell NIC to stop using this event queue. */ -+ efhw_nic_event_queue_disable(nic, instance, 0); -+ -+ if (nic_info->evq_pages.buf_tbl_alloc.base != (unsigned)-1) -+ efrm_buffer_table_free(&nic_info->evq_pages.buf_tbl_alloc); -+ -+ efhw_iopages_free(nic, &nic_info->evq_pages.iobuff); -+} -+ -+/*! FIXME: we should make sure this number is never zero (=> unprotected) */ -+/*! FIXME: put this definition in a relevant header (e.g. as (evqid)+1) */ -+#define EFAB_EVQ_OWNER_ID(evqid) ((evqid)) -+ -+void -+efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_type, -+ struct efhw_nic *nic) -+{ -+ int instance; -+ int evq_instance; -+ efhw_buffer_addr_t buf_addr; -+ -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ evq_instance = EFRM_RESOURCE_INSTANCE(virs->evq_virs->rs.rs_handle); -+ -+ buf_addr = virs->dmaq_buf_tbl_alloc[queue_type].base; -+ -+ if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) { -+ efhw_nic_dmaq_tx_q_init(nic, -+ instance, /* dmaq */ -+ evq_instance, /* evq */ -+ EFAB_EVQ_OWNER_ID(evq_instance), /* owner */ -+ virs->dmaq_tag[queue_type], /* tag */ -+ virs->dmaq_capacity[queue_type], /* size of queue */ -+ buf_addr, /* buffer index */ -+ virs->flags); /* user specified Q attrs */ -+ } else { -+ efhw_nic_dmaq_rx_q_init(nic, -+ instance, /* dmaq */ -+ evq_instance, /* evq */ -+ EFAB_EVQ_OWNER_ID(evq_instance), /* owner */ -+ virs->dmaq_tag[queue_type], /* tag */ -+ virs->dmaq_capacity[queue_type], /* size of queue */ -+ buf_addr, /* buffer index */ -+ virs->flags); /* user specified Q attrs */ -+ } -+} -+ -+static int -+efrm_vi_rm_init_or_fini_dmaq(struct vi_resource *virs, -+ int queue_type, int init, -+ struct efhw_nic *nic) -+{ -+ int rc; -+ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ uint32_t buf_bytes; -+ struct vi_resource_nic_info *nic_info = &virs->nic_info; -+ int page_order; -+ uint32_t num_pages; -+ struct efhw_iopages *iobuff; -+ -+ if (!init) -+ goto destroy; -+ -+ /* Ignore disabled queues. */ -+ if (virs->dmaq_capacity[queue_type] == 0) { -+ if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) -+ efhw_nic_dmaq_tx_q_disable(nic, instance); -+ else -+ efhw_nic_dmaq_rx_q_disable(nic, instance); -+ return 0; -+ } -+ -+ buf_bytes = (queue_type == EFRM_VI_RM_DMA_QUEUE_TX -+ ? efrm_vi_rm_txq_bytes(virs) -+ : efrm_vi_rm_rxq_bytes(virs)); -+ -+ page_order = get_order(buf_bytes); -+ -+ rc = efhw_iopages_alloc(nic, &nic_info->dmaq_pages[queue_type], -+ page_order); -+ if (rc != 0) { -+ EFRM_ERR("%s: Failed to allocate %s DMA buffer.", __func__, -+ dmaq_names[queue_type]); -+ goto fail_iopages; -+ } -+ -+ num_pages = 1 << page_order; -+ iobuff = &nic_info->dmaq_pages[queue_type]; -+ efhw_nic_buffer_table_set_n(nic, -+ virs->dmaq_buf_tbl_alloc[queue_type].base, -+ efhw_iopages_dma_addr(iobuff), -+ EFHW_NIC_PAGE_SIZE, 0, num_pages, 0); -+ -+ falcon_nic_buffer_table_confirm(nic); -+ -+ virs->mem_mmap_bytes += roundup(buf_bytes, PAGE_SIZE); -+ -+ /* Make sure there is an event queue. */ -+ if (virs->evq_virs->evq_capacity <= 0) { -+ EFRM_ERR("%s: Cannot use empty event queue for %s DMA", -+ __func__, dmaq_names[queue_type]); -+ rc = -EINVAL; -+ goto fail_evq; -+ } -+ -+ efrm_vi_rm_init_dmaq(virs, queue_type, nic); -+ -+ return 0; -+ -+destroy: -+ rc = 0; -+ -+ /* Ignore disabled queues. */ -+ if (virs->dmaq_capacity[queue_type] == 0) -+ return 0; -+ -+ /* Ensure TX pacing turned off -- queue flush doesn't reset this. */ -+ if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) -+ falcon_nic_pace(nic, instance, 0); -+ -+ /* No need to disable the queue here. Nobody is using it anyway. */ -+ -+fail_evq: -+ efhw_iopages_free(nic, &nic_info->dmaq_pages[queue_type]); -+fail_iopages: -+ -+ return rc; -+} -+ -+static int -+efrm_vi_rm_init_or_fini_nic(struct vi_resource *virs, int init, -+ struct efhw_nic *nic) -+{ -+ int rc; -+#ifndef NDEBUG -+ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+#endif -+ -+ if (!init) -+ goto destroy; -+ -+ rc = efrm_vi_rm_init_evq(virs, nic); -+ if (rc != 0) -+ goto fail_evq; -+ -+ rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX, -+ init, nic); -+ if (rc != 0) -+ goto fail_txq; -+ -+ rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX, -+ init, nic); -+ if (rc != 0) -+ goto fail_rxq; -+ -+ /* Allocate space for the control page. */ -+ EFRM_ASSERT(falcon_tx_dma_page_offset(instance) < PAGE_SIZE); -+ EFRM_ASSERT(falcon_rx_dma_page_offset(instance) < PAGE_SIZE); -+ EFRM_ASSERT(falcon_timer_page_offset(instance) < PAGE_SIZE); -+ virs->bar_mmap_bytes += PAGE_SIZE; -+ -+ return 0; -+ -+destroy: -+ rc = 0; -+ -+ efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX, -+ false, nic); -+fail_rxq: -+ -+ efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX, -+ false, nic); -+fail_txq: -+ -+ efrm_vi_rm_fini_evq(virs, nic); -+fail_evq: -+ -+ EFRM_ASSERT(rc != 0 || !init); -+ return rc; -+} -+ -+static int -+efrm_vi_resource_alloc_or_free(struct efrm_client *client, -+ int alloc, struct vi_resource *evq_virs, -+ uint16_t vi_flags, int32_t evq_capacity, -+ int32_t txq_capacity, int32_t rxq_capacity, -+ uint8_t tx_q_tag, uint8_t rx_q_tag, -+ struct vi_resource **virs_in_out) -+{ -+ struct efhw_nic *nic = client->nic; -+ struct vi_resource *virs; -+ int rc; -+ int instance; -+ -+ EFRM_ASSERT(virs_in_out); -+ EFRM_ASSERT(efrm_vi_manager); -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm); -+ -+ if (!alloc) -+ goto destroy; -+ -+ rx_q_tag &= (1 << TX_DESCQ_LABEL_WIDTH) - 1; -+ tx_q_tag &= (1 << RX_DESCQ_LABEL_WIDTH) - 1; -+ -+ virs = kmalloc(sizeof(*virs), GFP_KERNEL); -+ if (virs == NULL) { -+ EFRM_ERR("%s: Error allocating VI resource object", -+ __func__); -+ rc = -ENOMEM; -+ goto fail_alloc; -+ } -+ memset(virs, 0, sizeof(*virs)); -+ -+ /* Some macros make the assumption that the struct efrm_resource is -+ * the first member of a struct vi_resource. */ -+ EFRM_ASSERT(&virs->rs == (struct efrm_resource *) (virs)); -+ -+ instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity); -+ if (instance < 0) { -+ /* Clear out the close list... */ -+ efrm_vi_rm_salvage_flushed_vis(); -+ instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity); -+ if (instance >= 0) -+ EFRM_TRACE("%s: Salvaged a closed VI.", __func__); -+ } -+ -+ if (instance < 0) { -+ /* Could flush resources and try again here. */ -+ EFRM_ERR("%s: Out of appropriate VI resources", __func__); -+ rc = -EBUSY; -+ goto fail_alloc_id; -+ } -+ -+ EFRM_TRACE("%s: new VI ID %d", __func__, instance); -+ efrm_resource_init(&virs->rs, EFRM_RESOURCE_VI, instance); -+ -+ /* Start with one reference. Any external VIs using the EVQ of this -+ * resource will increment this reference rather than the resource -+ * reference to avoid DMAQ flushes from waiting for other DMAQ -+ * flushes to complete. When the resource reference goes to zero, -+ * the DMAQ flush happens. When the flush completes, this reference -+ * is decremented. When this reference reaches zero, the instance -+ * is freed. */ -+ atomic_set(&virs->evq_refs, 1); -+ -+ virs->bar_mmap_bytes = 0; -+ virs->mem_mmap_bytes = 0; -+ virs->evq_capacity = evq_capacity; -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = txq_capacity; -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = rxq_capacity; -+ virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_TX] = tx_q_tag; -+ virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_RX] = rx_q_tag; -+ virs->flags = vi_flags; -+ INIT_LIST_HEAD(&virs->tx_flush_link); -+ INIT_LIST_HEAD(&virs->rx_flush_link); -+ virs->tx_flushing = 0; -+ virs->rx_flushing = 0; -+ -+ /* Adjust the queue sizes. */ -+ rc = efrm_vi_rm_adjust_alloc_request(virs, nic); -+ if (rc != 0) -+ goto fail_adjust_request; -+ -+ /* Attach the EVQ early so that we can ensure that the NIC sets -+ * match. */ -+ if (evq_virs == NULL) { -+ evq_virs = virs; -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT -+ " has no external event queue", __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); -+ } else { -+ /* Make sure the resource managers are the same. */ -+ if (EFRM_RESOURCE_TYPE(evq_virs->rs.rs_handle) != -+ EFRM_RESOURCE_VI) { -+ EFRM_ERR("%s: Mismatched owner for event queue VI " -+ EFRM_RESOURCE_FMT, __func__, -+ EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle)); -+ return -EINVAL; -+ } -+ EFRM_ASSERT(atomic_read(&evq_virs->evq_refs) != 0); -+ efrm_vi_rm_get_ref(evq_virs); -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " uses event queue " -+ EFRM_RESOURCE_FMT, -+ __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), -+ EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle)); -+ } -+ virs->evq_virs = evq_virs; -+ -+ rc = efrm_vi_rm_alloc_or_free_buffer_table(virs, true); -+ if (rc != 0) -+ goto fail_buffer_table; -+ -+ rc = efrm_vi_rm_init_or_fini_nic(virs, true, nic); -+ if (rc != 0) -+ goto fail_init_nic; -+ -+ efrm_client_add_resource(client, &virs->rs); -+ *virs_in_out = virs; -+ EFRM_TRACE("%s: Allocated " EFRM_RESOURCE_FMT, __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); -+ return 0; -+ -+destroy: -+ virs = *virs_in_out; -+ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 1); -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ -+ EFRM_TRACE("%s: Freeing %d", __func__, -+ EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)); -+ -+ /* Destroying the VI. The reference count must be zero. */ -+ EFRM_ASSERT(atomic_read(&virs->evq_refs) == 0); -+ -+ /* The EVQ should have gone (and DMA disabled) so that this -+ * function can't be re-entered to destroy the EVQ VI. */ -+ EFRM_ASSERT(virs->evq_virs == NULL); -+ rc = 0; -+ -+fail_init_nic: -+ efrm_vi_rm_init_or_fini_nic(virs, false, nic); -+ -+ efrm_vi_rm_alloc_or_free_buffer_table(virs, false); -+fail_buffer_table: -+ -+ efrm_vi_rm_detach_evq(virs); -+ -+fail_adjust_request: -+ -+ EFRM_ASSERT(virs->evq_callback_fn == NULL); -+ EFRM_TRACE("%s: delete VI ID %d", __func__, instance); -+ efrm_vi_rm_free_id(instance); -+fail_alloc_id: -+ if (!alloc) -+ efrm_client_put(virs->rs.rs_client); -+ EFRM_DO_DEBUG(memset(virs, 0, sizeof(*virs))); -+ kfree(virs); -+fail_alloc: -+ *virs_in_out = NULL; -+ -+ return rc; -+} -+ -+/*** Resource object ****************************************************/ -+ -+int -+efrm_vi_resource_alloc(struct efrm_client *client, -+ struct vi_resource *evq_virs, -+ uint16_t vi_flags, int32_t evq_capacity, -+ int32_t txq_capacity, int32_t rxq_capacity, -+ uint8_t tx_q_tag, uint8_t rx_q_tag, -+ struct vi_resource **virs_out, -+ uint32_t *out_io_mmap_bytes, -+ uint32_t *out_mem_mmap_bytes, -+ uint32_t *out_txq_capacity, uint32_t *out_rxq_capacity) -+{ -+ int rc; -+ EFRM_ASSERT(client != NULL); -+ rc = efrm_vi_resource_alloc_or_free(client, true, evq_virs, vi_flags, -+ evq_capacity, txq_capacity, -+ rxq_capacity, tx_q_tag, rx_q_tag, -+ virs_out); -+ if (rc == 0) { -+ if (out_io_mmap_bytes != NULL) -+ *out_io_mmap_bytes = (*virs_out)->bar_mmap_bytes; -+ if (out_mem_mmap_bytes != NULL) -+ *out_mem_mmap_bytes = (*virs_out)->mem_mmap_bytes; -+ if (out_txq_capacity != NULL) -+ *out_txq_capacity = -+ (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]; -+ if (out_rxq_capacity != NULL) -+ *out_rxq_capacity = -+ (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]; -+ } -+ -+ return rc; -+} -+EXPORT_SYMBOL(efrm_vi_resource_alloc); -+ -+void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs) -+{ -+ EFRM_ASSERT(virs != NULL); -+ EFRM_ASSERT(virs->rs.rs_ref_count == 0); -+ -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); -+ /* release the associated event queue then drop our own reference -+ * count */ -+ efrm_vi_rm_detach_evq(virs); -+ efrm_vi_rm_drop_ref(virs); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/vi_resource_event.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,250 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains event handling for VI resource. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+ -+static inline int -+efrm_eventq_bytes(struct vi_resource *virs) -+{ -+ return efrm_vi_rm_evq_bytes(virs); -+} -+ -+ -+static inline efhw_event_t * -+efrm_eventq_base(struct vi_resource *virs) -+{ -+ struct eventq_resource_hardware *hw; -+ hw = &(virs->nic_info.evq_pages); -+ return (efhw_event_t *) (efhw_iopages_ptr(&(hw->iobuff)) + -+ hw->iobuff_off); -+} -+ -+ -+void -+efrm_eventq_request_wakeup(struct vi_resource *virs, unsigned current_ptr) -+{ -+ struct efhw_nic *nic = virs->rs.rs_client->nic; -+ int next_i; -+ next_i = ((current_ptr / sizeof(efhw_event_t)) & -+ (virs->evq_capacity - 1)); -+ -+ efhw_nic_wakeup_request(nic, efrm_eventq_dma_addr(virs), next_i, -+ EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)); -+} -+EXPORT_SYMBOL(efrm_eventq_request_wakeup); -+ -+void efrm_eventq_reset(struct vi_resource *virs) -+{ -+ struct efhw_nic *nic = virs->rs.rs_client->nic; -+ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ -+ EFRM_ASSERT(virs->evq_capacity != 0); -+ -+ /* FIXME: Protect against concurrent resets. */ -+ -+ efhw_nic_event_queue_disable(nic, instance, 0); -+ -+ memset(efrm_eventq_base(virs), EFHW_CLEAR_EVENT_VALUE, -+ efrm_eventq_bytes(virs)); -+ efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity, -+ efrm_eventq_dma_addr(virs), -+ virs->nic_info.evq_pages. -+ buf_tbl_alloc.base, -+ instance < 64); -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); -+} -+EXPORT_SYMBOL(efrm_eventq_reset); -+ -+int -+efrm_eventq_register_callback(struct vi_resource *virs, -+ void (*handler) (void *, int, -+ struct efhw_nic *nic), -+ void *arg) -+{ -+ struct efrm_nic_per_vi *cb_info; -+ int instance; -+ int bit; -+ -+ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); -+ EFRM_ASSERT(virs->evq_capacity != 0); -+ EFRM_ASSERT(handler != NULL); -+ -+ /* ?? TODO: Get rid of this test when client is compulsory. */ -+ if (virs->rs.rs_client == NULL) { -+ EFRM_ERR("%s: no client", __func__); -+ return -EINVAL; -+ } -+ -+ virs->evq_callback_arg = arg; -+ virs->evq_callback_fn = handler; -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance]; -+ -+ /* The handler can be set only once. */ -+ bit = test_and_set_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, -+ &cb_info->state); -+ if (bit) -+ return -EBUSY; -+ cb_info->vi = virs; -+ -+ return 0; -+} -+EXPORT_SYMBOL(efrm_eventq_register_callback); -+ -+void efrm_eventq_kill_callback(struct vi_resource *virs) -+{ -+ struct efrm_nic_per_vi *cb_info; -+ int32_t evq_state; -+ int instance; -+ int bit; -+ -+ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); -+ EFRM_ASSERT(virs->evq_capacity != 0); -+ EFRM_ASSERT(virs->rs.rs_client != NULL); -+ -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance]; -+ cb_info->vi = NULL; -+ -+ /* Disable the timer. */ -+ efhw_nic_event_queue_disable(virs->rs.rs_client->nic, -+ instance, /*timer_only */ 1); -+ -+ /* Disable the callback. */ -+ bit = test_and_clear_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, -+ &cb_info->state); -+ EFRM_ASSERT(bit); /* do not call me twice! */ -+ -+ /* Spin until the callback is complete. */ -+ do { -+ rmb(); -+ -+ udelay(1); -+ evq_state = cb_info->state; -+ } while ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY))); -+ -+ virs->evq_callback_fn = NULL; -+} -+EXPORT_SYMBOL(efrm_eventq_kill_callback); -+ -+static void -+efrm_eventq_do_callback(struct efhw_nic *nic, unsigned instance, -+ bool is_timeout) -+{ -+ struct efrm_nic *rnic = efrm_nic(nic); -+ void (*handler) (void *, int is_timeout, struct efhw_nic *nic); -+ void *arg; -+ struct efrm_nic_per_vi *cb_info; -+ int32_t evq_state; -+ int32_t new_evq_state; -+ struct vi_resource *virs; -+ int bit; -+ -+ EFRM_ASSERT(efrm_vi_manager); -+ -+ cb_info = &rnic->vis[instance]; -+ -+ /* Set the BUSY bit and clear WAKEUP_PENDING. Do this -+ * before waking up the sleeper to avoid races. */ -+ while (1) { -+ evq_state = cb_info->state; -+ new_evq_state = evq_state; -+ -+ if ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)) != 0) { -+ EFRM_ERR("%s:%d: evq_state[%d] corrupted!", -+ __func__, __LINE__, instance); -+ return; -+ } -+ -+ if (!is_timeout) -+ new_evq_state &= ~VI_RESOURCE_EVQ_STATE(WAKEUP_PENDING); -+ -+ if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) { -+ new_evq_state |= VI_RESOURCE_EVQ_STATE(BUSY); -+ virs = cb_info->vi; -+ if (cmpxchg(&cb_info->state, evq_state, -+ new_evq_state) == evq_state) -+ break; -+ } else { -+ /* Just update the state if necessary. */ -+ if (new_evq_state == evq_state || -+ cmpxchg(&cb_info->state, evq_state, -+ new_evq_state) == evq_state) -+ return; -+ } -+ } -+ -+ if (virs) { -+ handler = virs->evq_callback_fn; -+ arg = virs->evq_callback_arg; -+ EFRM_ASSERT(handler != NULL); -+ handler(arg, is_timeout, nic); -+ } -+ -+ /* Clear the BUSY bit. */ -+ bit = -+ test_and_clear_bit(VI_RESOURCE_EVQ_STATE_BUSY, -+ &cb_info->state); -+ if (!bit) { -+ EFRM_ERR("%s:%d: evq_state corrupted!", -+ __func__, __LINE__); -+ } -+} -+ -+void efrm_handle_wakeup_event(struct efhw_nic *nic, unsigned instance) -+{ -+ efrm_eventq_do_callback(nic, instance, false); -+} -+ -+void efrm_handle_timeout_event(struct efhw_nic *nic, unsigned instance) -+{ -+ efrm_eventq_do_callback(nic, instance, true); -+} -+ -+void efrm_handle_sram_event(struct efhw_nic *nic) -+{ -+ if (nic->buf_commit_outstanding > 0) -+ nic->buf_commit_outstanding--; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/vi_resource_flush.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,483 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains DMA queue flushing of VI resources. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+ -+/* can fail as workitem can already be scheuled -- ignore failure */ -+#define EFRM_VI_RM_DELAYED_FREE(manager) \ -+ queue_work(manager->workqueue, &manager->work_item) -+ -+static const int flush_fifo_hwm = 8 /* TODO should be a HW specific const */ ; -+ -+static void -+efrm_vi_resource_rx_flush_done(struct vi_resource *virs, bool *completed) -+{ -+ /* We should only get a flush event if there is a flush -+ * outstanding. */ -+ EFRM_ASSERT(virs->rx_flush_outstanding); -+ -+ virs->rx_flush_outstanding = 0; -+ virs->rx_flushing = 0; -+ -+ list_del(&virs->rx_flush_link); -+ efrm_vi_manager->rx_flush_outstanding_count--; -+ -+ if (virs->tx_flushing == 0) { -+ list_add_tail(&virs->rx_flush_link, -+ &efrm_vi_manager->close_pending); -+ *completed = 1; -+ } -+} -+ -+static void -+efrm_vi_resource_tx_flush_done(struct vi_resource *virs, bool *completed) -+{ -+ /* We should only get a flush event if there is a flush -+ * outstanding. */ -+ EFRM_ASSERT(virs->tx_flushing); -+ -+ virs->tx_flushing = 0; -+ -+ list_del(&virs->tx_flush_link); -+ -+ if (virs->rx_flushing == 0) { -+ list_add_tail(&virs->rx_flush_link, -+ &efrm_vi_manager->close_pending); -+ *completed = 1; -+ } -+} -+ -+static void -+efrm_vi_resource_issue_rx_flush(struct vi_resource *virs, bool *completed) -+{ -+ struct efhw_nic *nic = virs->rs.rs_client->nic; -+ int instance; -+ int rc; -+ -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ -+ list_add_tail(&virs->rx_flush_link, -+ &efrm_vi_manager->rx_flush_outstanding_list); -+ virs->rx_flush_outstanding = virs->rx_flushing; -+ efrm_vi_manager->rx_flush_outstanding_count++; -+ -+ EFRM_TRACE("%s: rx queue %d flush requested for nic %d", -+ __func__, instance, nic->index); -+ rc = efhw_nic_flush_rx_dma_channel(nic, instance); -+ if (rc == -EAGAIN) -+ efrm_vi_resource_rx_flush_done(virs, completed); -+} -+ -+static void -+efrm_vi_resource_issue_tx_flush(struct vi_resource *virs, bool *completed) -+{ -+ struct efhw_nic *nic = virs->rs.rs_client->nic; -+ int instance; -+ int rc; -+ -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ -+ list_add_tail(&virs->tx_flush_link, -+ &efrm_vi_manager->tx_flush_outstanding_list); -+ -+ EFRM_TRACE("%s: tx queue %d flush requested for nic %d", -+ __func__, instance, nic->index); -+ rc = efhw_nic_flush_tx_dma_channel(nic, instance); -+ if (rc == -EAGAIN) -+ efrm_vi_resource_tx_flush_done(virs, completed); -+} -+ -+static void efrm_vi_resource_process_waiting_flushes(bool *completed) -+{ -+ struct vi_resource *virs; -+ -+ while (efrm_vi_manager->rx_flush_outstanding_count < flush_fifo_hwm && -+ !list_empty(&efrm_vi_manager->rx_flush_waiting_list)) { -+ virs = -+ list_entry(list_pop -+ (&efrm_vi_manager->rx_flush_waiting_list), -+ struct vi_resource, rx_flush_link); -+ efrm_vi_resource_issue_rx_flush(virs, completed); -+ } -+} -+ -+#if BUG7916_WORKAROUND || BUG5302_WORKAROUND -+static void -+efrm_vi_resource_flush_retry_vi(struct vi_resource *virs, -+ int64_t time_now, bool *completed) -+{ -+ struct efhw_nic *nic; -+ int instance; -+ -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ -+ virs->flush_count++; -+ virs->flush_time = time_now; -+ nic = virs->rs.rs_client->nic; -+ -+#if BUG7916_WORKAROUND -+ if (virs->rx_flush_outstanding) { -+ EFRM_TRACE("%s: Retrying RX flush on instance %d", -+ __func__, instance); -+ -+ list_del(&virs->rx_flush_link); -+ efrm_vi_manager->rx_flush_outstanding_count--; -+ efrm_vi_resource_issue_rx_flush(virs, completed); -+ efrm_vi_resource_process_waiting_flushes(completed); -+ } -+#endif -+ -+#if BUG5302_WORKAROUND -+ if (virs->tx_flushing) { -+ if (virs->flush_count > 5) { -+ EFRM_TRACE("%s: VI resource stuck flush pending " -+ "(instance=%d, count=%d)", -+ __func__, instance, virs->flush_count); -+ falcon_clobber_tx_dma_ptrs(nic, instance); -+ } else { -+ EFRM_TRACE("%s: Retrying TX flush on instance %d", -+ __func__, instance); -+ } -+ -+ list_del(&virs->tx_flush_link); -+ efrm_vi_resource_issue_tx_flush(virs, completed); -+ } -+#endif -+} -+#endif -+ -+int efrm_vi_resource_flush_retry(struct vi_resource *virs) -+{ -+#if BUG7916_WORKAROUND || BUG5302_WORKAROUND -+ irq_flags_t lock_flags; -+ bool completed = false; -+ -+ if (virs->rx_flushing == 0 && virs->tx_flushing == 0) -+ return -EALREADY; -+ -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ efrm_vi_resource_flush_retry_vi(virs, get_jiffies_64(), &completed); -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ if (completed) -+ EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager); -+#endif -+ -+ return 0; -+} -+EXPORT_SYMBOL(efrm_vi_resource_flush_retry); -+ -+#if BUG7916_WORKAROUND || BUG5302_WORKAROUND -+/* resource manager lock should be taken before this call */ -+static void efrm_vi_handle_flush_loss(bool *completed) -+{ -+ struct list_head *pos, *temp; -+ struct vi_resource *virs; -+ int64_t time_now, time_pending; -+ -+ /* It's possible we miss flushes - the list is sorted in order we -+ * generate flushes, see if any are very old. It's also possible -+ * that we decide an endpoint is flushed even though we've not -+ * received all the flush events. We *should * mark as -+ * completed, reclaim and loop again. ?? -+ * THIS NEEDS BACKPORTING FROM THE FALCON branch -+ */ -+ time_now = get_jiffies_64(); -+ -+#if BUG7916_WORKAROUND -+ list_for_each_safe(pos, temp, -+ &efrm_vi_manager->rx_flush_outstanding_list) { -+ virs = container_of(pos, struct vi_resource, rx_flush_link); -+ -+ time_pending = time_now - virs->flush_time; -+ -+ /* List entries are held in reverse chronological order. Only -+ * process the old ones. */ -+ if (time_pending <= 0x100000000LL) -+ break; -+ -+ efrm_vi_resource_flush_retry_vi(virs, time_now, completed); -+ } -+#endif -+ -+#if BUG5302_WORKAROUND -+ list_for_each_safe(pos, temp, -+ &efrm_vi_manager->tx_flush_outstanding_list) { -+ virs = container_of(pos, struct vi_resource, tx_flush_link); -+ -+ time_pending = time_now - virs->flush_time; -+ -+ /* List entries are held in reverse chronological order. -+ * Only process the old ones. */ -+ if (time_pending <= 0x100000000LL) -+ break; -+ -+ efrm_vi_resource_flush_retry_vi(virs, time_now, completed); -+ } -+#endif -+} -+#endif -+ -+void -+efrm_vi_register_flush_callback(struct vi_resource *virs, -+ void (*handler)(void *), void *arg) -+{ -+ if (handler == NULL) { -+ virs->flush_callback_fn = handler; -+ wmb(); -+ virs->flush_callback_arg = arg; -+ } else { -+ virs->flush_callback_arg = arg; -+ wmb(); -+ virs->flush_callback_fn = handler; -+ } -+} -+EXPORT_SYMBOL(efrm_vi_register_flush_callback); -+ -+int efrm_pt_flush(struct vi_resource *virs) -+{ -+ int instance; -+ irq_flags_t lock_flags; -+ bool completed = false; -+ -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ -+ EFRM_ASSERT(virs->rx_flushing == 0); -+ EFRM_ASSERT(virs->rx_flush_outstanding == 0); -+ EFRM_ASSERT(virs->tx_flushing == 0); -+ -+ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " EVQ=%d TXQ=%d RXQ=%d", -+ __func__, EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), -+ virs->evq_capacity, -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX], -+ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]); -+ -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] != 0) -+ virs->rx_flushing = 1; -+ -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] != 0) -+ virs->tx_flushing = 1; -+ -+ /* Clean up immediately if there are no flushes. */ -+ if (virs->rx_flushing == 0 && virs->tx_flushing == 0) { -+ list_add_tail(&virs->rx_flush_link, -+ &efrm_vi_manager->close_pending); -+ completed = true; -+ } -+ -+ /* Issue the RX flush if possible or queue it for later. */ -+ if (virs->rx_flushing) { -+#if BUG7916_WORKAROUND || BUG5302_WORKAROUND -+ if (efrm_vi_manager->rx_flush_outstanding_count >= -+ flush_fifo_hwm) -+ efrm_vi_handle_flush_loss(&completed); -+#endif -+ if (efrm_vi_manager->rx_flush_outstanding_count >= -+ flush_fifo_hwm) { -+ list_add_tail(&virs->rx_flush_link, -+ &efrm_vi_manager->rx_flush_waiting_list); -+ } else { -+ efrm_vi_resource_issue_rx_flush(virs, &completed); -+ } -+ } -+ -+ /* Issue the TX flush. There's no limit to the number of -+ * outstanding TX flushes. */ -+ if (virs->tx_flushing) -+ efrm_vi_resource_issue_tx_flush(virs, &completed); -+ -+ virs->flush_time = get_jiffies_64(); -+ -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ if (completed) -+ EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager); -+ -+ return 0; -+} -+EXPORT_SYMBOL(efrm_pt_flush); -+ -+static void -+efrm_handle_rx_dmaq_flushed(struct efhw_nic *flush_nic, int instance, -+ bool *completed) -+{ -+ struct list_head *pos, *temp; -+ struct vi_resource *virs; -+ -+ list_for_each_safe(pos, temp, -+ &efrm_vi_manager->rx_flush_outstanding_list) { -+ virs = container_of(pos, struct vi_resource, rx_flush_link); -+ -+ if (instance == EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)) { -+ efrm_vi_resource_rx_flush_done(virs, completed); -+ efrm_vi_resource_process_waiting_flushes(completed); -+ return; -+ } -+ } -+ EFRM_TRACE("%s: Unhandled rx flush event, nic %d, instance %d", -+ __func__, flush_nic->index, instance); -+} -+ -+static void -+efrm_handle_tx_dmaq_flushed(struct efhw_nic *flush_nic, int instance, -+ bool *completed) -+{ -+ struct list_head *pos, *temp; -+ struct vi_resource *virs; -+ -+ list_for_each_safe(pos, temp, -+ &efrm_vi_manager->tx_flush_outstanding_list) { -+ virs = container_of(pos, struct vi_resource, tx_flush_link); -+ -+ if (instance == EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)) { -+ efrm_vi_resource_tx_flush_done(virs, completed); -+ return; -+ } -+ } -+ EFRM_TRACE("%s: Unhandled tx flush event, nic %d, instance %d", -+ __func__, flush_nic->index, instance); -+} -+ -+void -+efrm_handle_dmaq_flushed(struct efhw_nic *flush_nic, unsigned instance, -+ int rx_flush) -+{ -+ irq_flags_t lock_flags; -+ bool completed = false; -+ -+ EFRM_TRACE("%s: nic_i=%d instance=%d rx_flush=%d", __func__, -+ flush_nic->index, instance, rx_flush); -+ -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ if (rx_flush) -+ efrm_handle_rx_dmaq_flushed(flush_nic, instance, &completed); -+ else -+ efrm_handle_tx_dmaq_flushed(flush_nic, instance, &completed); -+ -+#if BUG7916_WORKAROUND || BUG5302_WORKAROUND -+ efrm_vi_handle_flush_loss(&completed); -+#endif -+ -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ if (completed) -+ EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager); -+} -+ -+static void -+efrm_vi_rm_reinit_dmaqs(struct vi_resource *virs) -+{ -+ struct efhw_nic *nic = virs->rs.rs_client->nic; -+ -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] != 0) -+ efrm_vi_rm_init_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX, nic); -+ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) -+ efrm_vi_rm_init_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX, nic); -+} -+ -+/* free any PT endpoints whose flush has now complete */ -+void efrm_vi_rm_delayed_free(struct work_struct *data) -+{ -+ irq_flags_t lock_flags; -+ struct list_head close_pending; -+ struct vi_resource *virs; -+ -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm); -+ -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ list_replace_init(&efrm_vi_manager->close_pending, &close_pending); -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ EFRM_TRACE("%s: %p", __func__, efrm_vi_manager); -+ while (!list_empty(&close_pending)) { -+ virs = -+ list_entry(list_pop(&close_pending), struct vi_resource, -+ rx_flush_link); -+ EFRM_TRACE("%s: flushed VI instance=%d", __func__, -+ EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)); -+ -+ if (virs->flush_callback_fn != NULL) { -+ efrm_vi_rm_reinit_dmaqs(virs); -+ virs->flush_callback_fn(virs->flush_callback_arg); -+ } else -+ efrm_vi_rm_free_flushed_resource(virs); -+ } -+} -+ -+void efrm_vi_rm_salvage_flushed_vis(void) -+{ -+#if BUG7916_WORKAROUND || BUG5302_WORKAROUND -+ irq_flags_t lock_flags; -+ bool completed; -+ -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ efrm_vi_handle_flush_loss(&completed); -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); -+#endif -+ -+ efrm_vi_rm_delayed_free(&efrm_vi_manager->work_item); -+} -+ -+void efrm_vi_resource_free(struct vi_resource *virs) -+{ -+ efrm_vi_register_flush_callback(virs, NULL, NULL); -+ efrm_pt_flush(virs); -+} -+EXPORT_SYMBOL(efrm_vi_resource_free); -+ -+ -+void efrm_vi_resource_release(struct vi_resource *virs) -+{ -+ if (__efrm_resource_release(&virs->rs)) -+ efrm_vi_resource_free(virs); -+} -+EXPORT_SYMBOL(efrm_vi_resource_release); -+ -+/* -+ * vi: sw=8:ai:aw -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2009-04-21/drivers/net/sfc/sfc_resource/vi_resource_manager.c 2008-07-17 16:18:07.000000000 +0200 -@@ -0,0 +1,231 @@ -+/**************************************************************************** -+ * Driver for Solarflare network controllers - -+ * resource management for Xen backend, OpenOnload, etc -+ * (including support for SFE4001 10GBT NIC) -+ * -+ * This file contains the VI resource manager. -+ * -+ * Copyright 2005-2007: Solarflare Communications Inc, -+ * 9501 Jeronimo Road, Suite 250, -+ * Irvine, CA 92618, USA -+ * -+ * Developed and maintained by Solarflare Communications: -+ * -+ * -+ * -+ * Certain parts of the driver were implemented by -+ * Alexandra Kossovsky -+ * OKTET Labs Ltd, Russia, -+ * http://oktetlabs.ru, -+ * by request of Solarflare Communications -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation, incorporated herein by reference. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ **************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "efrm_internal.h" -+ -+ -+int efrm_pt_pace(struct vi_resource *virs, unsigned int val) -+{ -+ struct efhw_nic *nic = virs->rs.rs_client->nic; -+ int instance; -+ -+ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); -+ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); -+ falcon_nic_pace(nic, instance, val); -+ EFRM_TRACE("%s[%d]=%d DONE", __func__, instance, val); -+ return 0; -+} -+EXPORT_SYMBOL(efrm_pt_pace); -+ -+/*** Resource manager creation/destruction *******************************/ -+ -+static void efrm_vi_rm_dtor(struct efrm_resource_manager *rm); -+ -+static int -+efrm_create_or_destroy_vi_resource_manager( -+ struct efrm_resource_manager **rm_in_out, -+ const struct vi_resource_dimensions *dims, -+ bool destroy) -+{ -+ struct vi_resource *virs; -+ struct list_head *pos, *temp; -+ struct list_head flush_pending; -+ irq_flags_t lock_flags; -+ int rc; -+ unsigned dmaq_min, dmaq_lim; -+ -+ EFRM_ASSERT(rm_in_out); -+ -+ if (destroy) -+ goto destroy; -+ -+ EFRM_ASSERT(dims); -+ EFRM_NOTICE("vi_resource_manager: evq_int=%u-%u evq_timer=%u-%u", -+ dims->evq_int_min, dims->evq_int_lim, -+ dims->evq_timer_min, dims->evq_timer_lim); -+ EFRM_NOTICE("vi_resource_manager: rxq=%u-%u txq=%u-%u", -+ dims->rxq_min, dims->rxq_lim, -+ dims->txq_min, dims->txq_lim); -+ -+ efrm_vi_manager = kmalloc(sizeof(*efrm_vi_manager), GFP_KERNEL); -+ if (efrm_vi_manager == NULL) { -+ rc = -ENOMEM; -+ goto fail_alloc; -+ } -+ -+ memset(efrm_vi_manager, 0, sizeof(*efrm_vi_manager)); -+ -+ efrm_vi_manager->iscsi_dmaq_instance_is_free = true; -+ -+ dmaq_min = max(dims->rxq_min, dims->txq_min); -+ dmaq_lim = min(dims->rxq_lim, dims->txq_lim); -+ -+ efrm_vi_manager->with_timer_base = -+ max(dmaq_min, dims->evq_timer_min); -+ efrm_vi_manager->with_timer_limit = -+ min(dmaq_lim, dims->evq_timer_lim); -+ rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_timer, -+ efrm_vi_manager->with_timer_base, -+ efrm_vi_manager->with_timer_limit, -+ &efrm_vi_manager->rm.rm_lock); -+ if (rc < 0) -+ goto fail_with_timer_id_pool; -+ -+ efrm_vi_manager->with_interrupt_base = -+ max(dmaq_min, dims->evq_int_min); -+ efrm_vi_manager->with_interrupt_limit = -+ min(dmaq_lim, dims->evq_int_lim); -+ efrm_vi_manager->with_interrupt_limit = -+ max(efrm_vi_manager->with_interrupt_limit, -+ efrm_vi_manager->with_interrupt_base); -+ rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_interrupt, -+ efrm_vi_manager->with_interrupt_base, -+ efrm_vi_manager->with_interrupt_limit, -+ &efrm_vi_manager->rm.rm_lock); -+ if (rc < 0) -+ goto fail_with_int_id_pool; -+ -+ INIT_LIST_HEAD(&efrm_vi_manager->rx_flush_waiting_list); -+ INIT_LIST_HEAD(&efrm_vi_manager->rx_flush_outstanding_list); -+ INIT_LIST_HEAD(&efrm_vi_manager->tx_flush_outstanding_list); -+ efrm_vi_manager->rx_flush_outstanding_count = 0; -+ -+ INIT_LIST_HEAD(&efrm_vi_manager->close_pending); -+ efrm_vi_manager->workqueue = create_workqueue("sfc_vi"); -+ if (efrm_vi_manager->workqueue == NULL) -+ goto fail_create_workqueue; -+ INIT_WORK(&efrm_vi_manager->work_item, efrm_vi_rm_delayed_free); -+ -+ /* NB. This must be the last step to avoid things getting tangled. -+ * efrm_resource_manager_dtor calls the vi_rm_dtor which ends up in -+ * this function. */ -+ rc = efrm_resource_manager_ctor(&efrm_vi_manager->rm, efrm_vi_rm_dtor, -+ "VI", EFRM_RESOURCE_VI); -+ if (rc < 0) -+ goto fail_rm_ctor; -+ -+ *rm_in_out = &efrm_vi_manager->rm; -+ return 0; -+ -+destroy: -+ rc = 0; -+ EFRM_RESOURCE_MANAGER_ASSERT_VALID(*rm_in_out); -+ -+ /* Abort outstanding flushes. Note, a VI resource can be on more -+ * than one of these lists. We handle this by starting with the TX -+ * list and then append VIs to this list if they aren't on the TX -+ * list already. A VI is on the TX flush list if tx_flushing -+ * is not empty. */ -+ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ list_replace_init(&efrm_vi_manager->tx_flush_outstanding_list, -+ &flush_pending); -+ -+ list_for_each_safe(pos, temp, -+ &efrm_vi_manager->rx_flush_waiting_list) { -+ virs = container_of(pos, struct vi_resource, rx_flush_link); -+ -+ list_del(&virs->rx_flush_link); -+ if (virs->tx_flushing == 0) -+ list_add_tail(&virs->tx_flush_link, &flush_pending); -+ } -+ -+ list_for_each_safe(pos, temp, -+ &efrm_vi_manager->rx_flush_outstanding_list) { -+ virs = container_of(pos, struct vi_resource, rx_flush_link); -+ -+ list_del(&virs->rx_flush_link); -+ if (virs->tx_flushing == 0) -+ list_add_tail(&virs->tx_flush_link, &flush_pending); -+ } -+ -+ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); -+ -+ while (!list_empty(&flush_pending)) { -+ virs = -+ list_entry(list_pop(&flush_pending), struct vi_resource, -+ tx_flush_link); -+ EFRM_TRACE("%s: found PT endpoint " EFRM_RESOURCE_FMT -+ " with flush pending [Tx=0x%x, Rx=0x%x, RxO=0x%x]", -+ __func__, -+ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), -+ virs->tx_flushing, -+ virs->rx_flushing, -+ virs->rx_flush_outstanding); -+ efrm_vi_rm_free_flushed_resource(virs); -+ } -+ -+fail_rm_ctor: -+ -+ /* Complete outstanding closes. */ -+ destroy_workqueue(efrm_vi_manager->workqueue); -+fail_create_workqueue: -+ EFRM_ASSERT(list_empty(&efrm_vi_manager->close_pending)); -+ kfifo_vfree(efrm_vi_manager->instances_with_interrupt); -+fail_with_int_id_pool: -+ -+ kfifo_vfree(efrm_vi_manager->instances_with_timer); -+fail_with_timer_id_pool: -+ -+ if (destroy) -+ return 0; -+ -+ EFRM_DO_DEBUG(memset(efrm_vi_manager, 0, sizeof(*efrm_vi_manager))); -+ kfree(efrm_vi_manager); -+fail_alloc: -+ -+ *rm_in_out = NULL; -+ EFRM_ERR("%s: failed rc=%d", __func__, rc); -+ return rc; -+} -+ -+int -+efrm_create_vi_resource_manager(struct efrm_resource_manager **rm_out, -+ const struct vi_resource_dimensions *dims) -+{ -+ return efrm_create_or_destroy_vi_resource_manager(rm_out, dims, false); -+} -+ -+static void efrm_vi_rm_dtor(struct efrm_resource_manager *rm) -+{ -+ efrm_create_or_destroy_vi_resource_manager(&rm, NULL, true); -+} diff --git a/patches.xen/sfc-set-arch b/patches.xen/sfc-set-arch deleted file mode 100644 index a1ef8da..0000000 --- a/patches.xen/sfc-set-arch +++ /dev/null @@ -1,38 +0,0 @@ -From: Kieran Mansley -Subject: set efhw_arch field of device type -References: bnc#489105 -Patch-mainline: n/a - -Acked-by: jbeulich@novell.com - ---- head-2009-04-07.orig/drivers/net/sfc/sfc_resource/ci/efhw/common.h 2009-04-07 14:39:57.000000000 +0200 -+++ head-2009-04-07/drivers/net/sfc/sfc_resource/ci/efhw/common.h 2009-04-07 15:02:05.000000000 +0200 -@@ -41,6 +41,10 @@ - - #include - -+enum efhw_arch { -+ EFHW_ARCH_FALCON, -+}; -+ - typedef uint32_t efhw_buffer_addr_t; - #define EFHW_BUFFER_ADDR_FMT "[ba:%"PRIx32"]" - ---- head-2009-04-07.orig/drivers/net/sfc/sfc_resource/nic.c 2009-04-07 14:39:57.000000000 +0200 -+++ head-2009-04-07/drivers/net/sfc/sfc_resource/nic.c 2009-04-07 15:02:05.000000000 +0200 -@@ -47,6 +47,7 @@ int efhw_device_type_init(struct efhw_de - switch (device_id) { - case 0x0703: - case 0x6703: -+ dt->arch = EFHW_ARCH_FALCON; - dt->variant = 'A'; - switch (class_revision) { - case 0: -@@ -60,6 +61,7 @@ int efhw_device_type_init(struct efhw_de - } - break; - case 0x0710: -+ dt->arch = EFHW_ARCH_FALCON; - dt->variant = 'B'; - switch (class_revision) { - case 2: diff --git a/patches.xen/tmem b/patches.xen/tmem deleted file mode 100644 index 9bf577f..0000000 --- a/patches.xen/tmem +++ /dev/null @@ -1,1410 +0,0 @@ -Subject: Transcendent memory ("tmem") for Linux -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 908:baeb818cd2dc) -Patch-mainline: n/a - -Tmem, when called from a tmem-capable (paravirtualized) guest, makes -use of otherwise unutilized ("fallow") memory to create and manage -pools of pages that can be accessed from the guest either as -"ephemeral" pages or as "persistent" pages. In either case, the pages -are not directly addressible by the guest, only copied to and fro via -the tmem interface. Ephemeral pages are a nice place for a guest to -put recently evicted clean pages that it might need again; these pages -can be reclaimed synchronously by Xen for other guests or other uses. -Persistent pages are a nice place for a guest to put "swap" pages to -avoid sending them to disk. These pages retain data as long as the -guest lives, but count against the guest memory allocation. - -This patch contains the Linux paravirtualization changes to -complement the tmem Xen patch (xen-unstable c/s 19646). It -implements "precache" (ext3 only as of now), "preswap", -and limited "shared precache" (ocfs2 only as of now) support. -CONFIG options are required to turn on -the support (but in this patch they default to "y"). If -the underlying Xen does not have tmem support or has it -turned off, this is sensed early to avoid nearly all -hypercalls. - -Lots of useful prose about tmem can be found at -http://oss.oracle.com/projects/tmem - -Signed-off-by: Dan Magenheimer -Acked-by: jbeulich@novell.com - ---- - Documentation/transcendent-memory.txt | 176 ++++++++++++++++++++++++++++++++ - fs/btrfs/extent_io.c | 9 + - fs/btrfs/super.c | 2 - fs/buffer.c | 6 + - fs/ext3/super.c | 2 - fs/ext4/super.c | 3 - fs/mpage.c | 8 + - fs/ocfs2/super.c | 2 - fs/super.c | 5 - include/linux/fs.h | 3 - include/linux/precache.h | 55 ++++++++++ - include/linux/swap.h | 53 +++++++++ - kernel/sysctl.c | 11 ++ - mm/Kconfig | 28 +++++ - mm/Makefile | 3 - mm/filemap.c | 11 ++ - mm/page_io.c | 13 ++ - mm/precache.c | 138 +++++++++++++++++++++++++ - mm/preswap.c | 182 ++++++++++++++++++++++++++++++++++ - mm/swapfile.c | 143 +++++++++++++++++++++++++- - mm/tmem.h | 84 +++++++++++++++ - mm/truncate.c | 10 + - 22 files changed, 943 insertions(+), 4 deletions(-) - ---- /dev/null -+++ b/Documentation/transcendent-memory.txt -@@ -0,0 +1,176 @@ -+Normal memory is directly addressable by the kernel, of a known -+normally-fixed size, synchronously accessible, and persistent (though -+not across a reboot). -+ -+What if there was a class of memory that is of unknown and dynamically -+variable size, is addressable only indirectly by the kernel, can be -+configured either as persistent or as "ephemeral" (meaning it will be -+around for awhile, but might disappear without warning), and is still -+fast enough to be synchronously accessible? -+ -+We call this latter class "transcendent memory" and it provides an -+interesting opportunity to more efficiently utilize RAM in a virtualized -+environment. However this "memory but not really memory" may also have -+applications in NON-virtualized environments, such as hotplug-memory -+deletion, SSDs, and page cache compression. Others have suggested ideas -+such as allowing use of highmem memory without a highmem kernel, or use -+of spare video memory. -+ -+Transcendent memory, or "tmem" for short, provides a well-defined API to -+access this unusual class of memory. (A summary of the API is provided -+below.) The basic operations are page-copy-based and use a flexible -+object-oriented addressing mechanism. Tmem assumes that some "privileged -+entity" is capable of executing tmem requests and storing pages of data; -+this entity is currently a hypervisor and operations are performed via -+hypercalls, but the entity could be a kernel policy, or perhaps a -+"memory node" in a cluster of blades connected by a high-speed -+interconnect such as hypertransport or QPI. -+ -+Since tmem is not directly accessible and because page copying is done -+to/from physical pageframes, it more suitable for in-kernel memory needs -+than for userland applications. However, there may be yet undiscovered -+userland possibilities. -+ -+With the tmem concept outlined vaguely and its broader potential hinted, -+we will overview two existing examples of how tmem can be used by the -+kernel. -+ -+"Cleancache" can be thought of as a page-granularity victim cache for clean -+pages that the kernel's pageframe replacement algorithm (PFRA) would like -+to keep around, but can't since there isn't enough memory. So when the -+PFRA "evicts" a page, it first puts it into the cleancache via a call to -+tmem. And any time a filesystem reads a page from disk, it first attempts -+to get the page from cleancache. If it's there, a disk access is eliminated. -+If not, the filesystem just goes to the disk like normal. Cleancache is -+"ephemeral" so whether a page is kept in cleancache (between the "put" and -+the "get") is dependent on a number of factors that are invisible to -+the kernel. -+ -+"Frontswap" is so named because it can be thought of as the opposite of -+a "backing store". Frontswap IS persistent, but for various reasons may not -+always be available for use, again due to factors that may not be visible to -+the kernel. (But, briefly, if the kernel is being "good" and has shared its -+resources nicely, then it will be able to use frontswap, else it will not.) -+Once a page is put, a get on the page will always succeed. So when the -+kernel finds itself in a situation where it needs to swap out a page, it -+first attempts to use frontswap. If the put works, a disk write and -+(usually) a disk read are avoided. If it doesn't, the page is written -+to swap as usual. Unlike cleancache, whether a page is stored in frontswap -+vs swap is recorded in kernel data structures, so when a page needs to -+be fetched, the kernel does a get if it is in frontswap and reads from -+swap if it is not in frontswap. -+ -+Both cleancache and frontswap may be optionally compressed, trading off 2x -+space reduction vs 10x performance for access. Cleancache also has a -+sharing feature, which allows different nodes in a "virtual cluster" -+to share a local page cache. -+ -+Tmem has some similarity to IBM's Collaborative Memory Management, but -+creates more of a partnership between the kernel and the "privileged -+entity" and is not very invasive. Tmem may be applicable for KVM and -+containers; there is some disagreement on the extent of its value. -+Tmem is highly complementary to ballooning (aka page granularity hot -+plug) and memory deduplication (aka transparent content-based page -+sharing) but still has value when neither are present. -+ -+Performance is difficult to quantify because some benchmarks respond -+very favorably to increases in memory and tmem may do quite well on -+those, depending on how much tmem is available which may vary widely -+and dynamically, depending on conditions completely outside of the -+system being measured. Ideas on how best to provide useful metrics -+would be appreciated. -+ -+Tmem is supported starting in Xen 4.0 and is in Xen's Linux 2.6.18-xen -+source tree. It is also released as a technology preview in Oracle's -+Xen-based virtualization product, Oracle VM 2.2. Again, Xen is not -+necessarily a requirement, but currently provides the only existing -+implementation of tmem. -+ -+Lots more information about tmem can be found at: -+ http://oss.oracle.com/projects/tmem -+and there was a talk about it on the first day of Linux Symposium in -+July 2009; an updated talk is planned at linux.conf.au in January 2010. -+Tmem is the result of a group effort, including Dan Magenheimer, -+Chris Mason, Dave McCracken, Kurt Hackel and Zhigang Wang, with helpful -+input from Jeremy Fitzhardinge, Keir Fraser, Ian Pratt, Sunil Mushran, -+Joel Becker, and Jan Beulich. -+ -+THE TRANSCENDENT MEMORY API -+ -+Transcendent memory is made up of a set of pools. Each pool is made -+up of a set of objects. And each object contains a set of pages. -+The combination of a 32-bit pool id, a 64-bit object id, and a 32-bit -+page id, uniquely identify a page of tmem data, and this tuple is called -+a "handle." Commonly, the three parts of a handle are used to address -+a filesystem, a file within that filesystem, and a page within that file; -+however an OS can use any values as long as they uniquely identify -+a page of data. -+ -+When a tmem pool is created, it is given certain attributes: It can -+be private or shared, and it can be persistent or ephemeral. Each -+combination of these attributes provides a different set of useful -+functionality and also defines a slightly different set of semantics -+for the various operations on the pool. Other pool attributes include -+the size of the page and a version number. -+ -+Once a pool is created, operations are performed on the pool. Pages -+are copied between the OS and tmem and are addressed using a handle. -+Pages and/or objects may also be flushed from the pool. When all -+operations are completed, a pool can be destroyed. -+ -+The specific tmem functions are called in Linux through a set of -+accessor functions: -+ -+int (*new_pool)(struct tmem_pool_uuid uuid, u32 flags); -+int (*destroy_pool)(u32 pool_id); -+int (*put_page)(u32 pool_id, u64 object, u32 index, unsigned long pfn); -+int (*get_page)(u32 pool_id, u64 object, u32 index, unsigned long pfn); -+int (*flush_page)(u32 pool_id, u64 object, u32 index); -+int (*flush_object)(u32 pool_id, u64 object); -+ -+The new_pool accessor creates a new pool and returns a pool id -+which is a non-negative 32-bit integer. If the flags parameter -+specifies that the pool is to be shared, the uuid is a 128-bit "shared -+secret" else it is ignored. The destroy_pool accessor destroys the pool. -+(Note: shared pools are not supported until security implications -+are better understood.) -+ -+The put_page accessor copies a page of data from the specified pageframe -+and associates it with the specified handle. -+ -+The get_page accessor looks up a page of data in tmem associated with -+the specified handle and, if found, copies it to the specified pageframe. -+ -+The flush_page accessor ensures that subsequent gets of a page with -+the specified handle will fail. The flush_object accessor ensures -+that subsequent gets of any page matching the pool id and object -+will fail. -+ -+There are many subtle but critical behaviors for get_page and put_page: -+- Any put_page (with one notable exception) may be rejected and the client -+ must be prepared to deal with that failure. A put_page copies, NOT moves, -+ data; that is the data exists in both places. Linux is responsible for -+ destroying or overwriting its own copy, or alternately managing any -+ coherency between the copies. -+- Every page successfully put to a persistent pool must be found by a -+ subsequent get_page that specifies the same handle. A page successfully -+ put to an ephemeral pool has an indeterminate lifetime and even an -+ immediately subsequent get_page may fail. -+- A get_page to a private pool is destructive, that is it behaves as if -+ the get_page were atomically followed by a flush_page. A get_page -+ to a shared pool is non-destructive. A flush_page behaves just like -+ a get_page to a private pool except the data is thrown away. -+- Put-put-get coherency is guaranteed. For example, after the sequence: -+ put_page(ABC,D1); -+ put_page(ABC,D2); -+ get_page(ABC,E) -+ E may never contain the data from D1. However, even for a persistent -+ pool, the get_page may fail if the second put_page indicates failure. -+- Get-get coherency is guaranteed. For example, in the sequence: -+ put_page(ABC,D); -+ get_page(ABC,E1); -+ get_page(ABC,E2) -+ if the first get_page fails, the second must also fail. -+- A tmem implementation provides no serialization guarantees (e.g. to -+ an SMP Linux). So if different Linux threads are putting and flushing -+ the same page, the results are indeterminate. ---- a/fs/btrfs/extent_io.c -+++ b/fs/btrfs/extent_io.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include "extent_io.h" - #include "extent_map.h" - #include "compat.h" -@@ -1990,6 +1991,13 @@ static int __extent_read_full_page(struc - - set_page_extent_mapped(page); - -+ if (!PageUptodate(page)) { -+ if (precache_get(page->mapping, page->index, page) == 1) { -+ BUG_ON(blocksize != PAGE_SIZE); -+ goto out; -+ } -+ } -+ - end = page_end; - while (1) { - lock_extent(tree, start, end, GFP_NOFS); -@@ -2117,6 +2125,7 @@ static int __extent_read_full_page(struc - cur = cur + iosize; - page_offset += iosize; - } -+out: - if (!nr) { - if (!PageError(page)) - SetPageUptodate(page); ---- a/fs/btrfs/super.c -+++ b/fs/btrfs/super.c -@@ -39,6 +39,7 @@ - #include - #include - #include -+#include - #include "compat.h" - #include "ctree.h" - #include "disk-io.h" -@@ -607,6 +608,7 @@ static int btrfs_fill_super(struct super - sb->s_root = root_dentry; - - save_mount_options(sb, data); -+ precache_init(sb); - return 0; - - fail_close: ---- a/fs/buffer.c -+++ b/fs/buffer.c -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - - static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); - -@@ -277,6 +278,11 @@ void invalidate_bdev(struct block_device - invalidate_bh_lrus(); - lru_add_drain_all(); /* make sure all lru add caches are flushed */ - invalidate_mapping_pages(mapping, 0, -1); -+ -+ /* 99% of the time, we don't need to flush the precache on the bdev. -+ * But, for the strange corners, lets be cautious -+ */ -+ precache_flush_inode(mapping); - } - EXPORT_SYMBOL(invalidate_bdev); - ---- a/fs/ext3/super.c -+++ b/fs/ext3/super.c -@@ -36,6 +36,7 @@ - #include - #include - #include -+#include - - #include - -@@ -1367,6 +1368,7 @@ static int ext3_setup_super(struct super - } else { - ext3_msg(sb, KERN_INFO, "using internal journal"); - } -+ precache_init(sb); - return res; - } - ---- a/fs/ext4/super.c -+++ b/fs/ext4/super.c -@@ -38,6 +38,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -1941,6 +1942,8 @@ static int ext4_setup_super(struct super - EXT4_INODES_PER_GROUP(sb), - sbi->s_mount_opt, sbi->s_mount_opt2); - -+ precache_init(sb); -+ - return res; - } - ---- a/fs/mpage.c -+++ b/fs/mpage.c -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include - - /* - * I/O completion handler for multipage BIOs. -@@ -271,6 +272,13 @@ do_mpage_readpage(struct bio *bio, struc - SetPageMappedToDisk(page); - } - -+ if (fully_mapped && -+ blocks_per_page == 1 && !PageUptodate(page) && -+ precache_get(page->mapping, page->index, page) == 1) { -+ SetPageUptodate(page); -+ goto confused; -+ } -+ - /* - * This page will go to BIO. Do we need to send this BIO off first? - */ ---- a/fs/ocfs2/super.c -+++ b/fs/ocfs2/super.c -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - - #define MLOG_MASK_PREFIX ML_SUPER - #include -@@ -2385,6 +2386,7 @@ static int ocfs2_initialize_super(struct - mlog_errno(status); - goto bail; - } -+ shared_precache_init(sb, &di->id2.i_super.s_uuid[0]); - - bail: - mlog_exit(status); ---- a/fs/super.c -+++ b/fs/super.c -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #include "internal.h" - - -@@ -112,6 +113,9 @@ static struct super_block *alloc_super(s - s->s_maxbytes = MAX_NON_LFS; - s->s_op = &default_op; - s->s_time_gran = 1000000000; -+#ifdef CONFIG_PRECACHE -+ s->precache_poolid = -1; -+#endif - } - out: - return s; -@@ -183,6 +187,7 @@ void deactivate_locked_super(struct supe - * inodes are flushed before we release the fs module. - */ - rcu_barrier(); -+ precache_flush_filesystem(s); - put_filesystem(fs); - put_super(s); - } else { ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1426,6 +1426,9 @@ struct super_block { - /* Granularity of c/m/atime in ns. - Cannot be worse than a second */ - u32 s_time_gran; -+#ifdef CONFIG_PRECACHE -+ u32 precache_poolid; -+#endif - - /* - * The next field is for VFS *only*. No filesystems have any business ---- /dev/null -+++ b/include/linux/precache.h -@@ -0,0 +1,55 @@ -+#ifndef _LINUX_PRECACHE_H -+ -+#include -+#include -+ -+#ifdef CONFIG_PRECACHE -+extern void precache_init(struct super_block *sb); -+extern void shared_precache_init(struct super_block *sb, char *uuid); -+extern int precache_get(struct address_space *mapping, unsigned long index, -+ struct page *empty_page); -+extern int precache_put(struct address_space *mapping, unsigned long index, -+ struct page *page); -+extern int precache_flush(struct address_space *mapping, unsigned long index); -+extern int precache_flush_inode(struct address_space *mapping); -+extern int precache_flush_filesystem(struct super_block *s); -+#else -+static inline void precache_init(struct super_block *sb) -+{ -+} -+ -+static inline void shared_precache_init(struct super_block *sb, char *uuid) -+{ -+} -+ -+static inline int precache_get(struct address_space *mapping, -+ unsigned long index, struct page *empty_page) -+{ -+ return 0; -+} -+ -+static inline int precache_put(struct address_space *mapping, -+ unsigned long index, struct page *page) -+{ -+ return 0; -+} -+ -+static inline int precache_flush(struct address_space *mapping, -+ unsigned long index) -+{ -+ return 0; -+} -+ -+static inline int precache_flush_inode(struct address_space *mapping) -+{ -+ return 0; -+} -+ -+static inline int precache_flush_filesystem(struct super_block *s) -+{ -+ return 0; -+} -+#endif -+ -+#define _LINUX_PRECACHE_H -+#endif /* _LINUX_PRECACHE_H */ ---- a/include/linux/swap.h -+++ b/include/linux/swap.h -@@ -186,8 +186,61 @@ struct swap_info_struct { - struct block_device *bdev; /* swap device or bdev of swap file */ - struct file *swap_file; /* seldom referenced */ - unsigned int old_block_size; /* seldom referenced */ -+#ifdef CONFIG_PRESWAP -+ unsigned long *preswap_map; -+ unsigned int preswap_pages; -+#endif - }; - -+#ifdef CONFIG_PRESWAP -+ -+#include -+extern int preswap_sysctl_handler(struct ctl_table *, int, void __user *, -+ size_t *, loff_t *); -+extern const unsigned long preswap_zero, preswap_infinity; -+ -+extern struct swap_info_struct *get_swap_info_struct(unsigned int type); -+ -+extern void preswap_shrink(unsigned long); -+extern int preswap_test(struct swap_info_struct *, unsigned long); -+extern void preswap_init(unsigned); -+extern int preswap_put(struct page *); -+extern int preswap_get(struct page *); -+extern void preswap_flush(unsigned, unsigned long); -+extern void preswap_flush_area(unsigned); -+#else -+static inline void preswap_shrink(unsigned long target_pages) -+{ -+} -+ -+static inline int preswap_test(struct swap_info_struct *sis, unsigned long offset) -+{ -+ return 0; -+} -+ -+static inline void preswap_init(unsigned type) -+{ -+} -+ -+static inline int preswap_put(struct page *page) -+{ -+ return 0; -+} -+ -+static inline int preswap_get(struct page *get) -+{ -+ return 0; -+} -+ -+static inline void preswap_flush(unsigned type, unsigned long offset) -+{ -+} -+ -+static inline void preswap_flush_area(unsigned type) -+{ -+} -+#endif /* CONFIG_PRESWAP */ -+ - struct swap_list_t { - int head; /* head of priority-ordered swapfile list */ - int next; /* swapfile to be used next */ ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -1330,6 +1330,17 @@ static struct ctl_table vm_table[] = { - .mode = 0644, - .proc_handler = scan_unevictable_handler, - }, -+#ifdef CONFIG_PRESWAP -+ { -+ .procname = "preswap", -+ .data = NULL, -+ .maxlen = sizeof(unsigned long), -+ .mode = 0644, -+ .proc_handler = preswap_sysctl_handler, -+ .extra1 = (void *)&preswap_zero, -+ .extra2 = (void *)&preswap_infinity, -+ }, -+#endif - #ifdef CONFIG_MEMORY_FAILURE - { - .procname = "memory_failure_early_kill", ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -347,3 +347,31 @@ config NEED_PER_CPU_KM - depends on !SMP - bool - default y -+ -+# -+# support for transcendent memory -+# -+config TMEM -+ bool -+ help -+ In a virtualized environment, allows unused and underutilized -+ system physical memory to be made accessible through a narrow -+ well-defined page-copy-based API. If unsure, say Y. -+ -+config PRECACHE -+ bool "Cache clean pages in transcendent memory" -+ depends on XEN -+ select TMEM -+ help -+ Allows the transcendent memory pool to be used to store clean -+ page-cache pages which, under some circumstances, will greatly -+ reduce paging and thus improve performance. If unsure, say Y. -+ -+config PRESWAP -+ bool "Swap pages to transcendent memory" -+ depends on XEN -+ select TMEM -+ help -+ Allows the transcendent memory pool to be used as a pseudo-swap -+ device which, under some circumstances, will greatly reduce -+ swapping and thus improve performance. If unsure, say Y. ---- a/mm/Makefile -+++ b/mm/Makefile -@@ -19,6 +19,9 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock. - - obj-$(CONFIG_BOUNCE) += bounce.o - obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o -+obj-$(CONFIG_TMEM) += tmem.o -+obj-$(CONFIG_PRESWAP) += preswap.o -+obj-$(CONFIG_PRECACHE) += precache.o - obj-$(CONFIG_HAS_DMA) += dmapool.o - obj-$(CONFIG_HUGETLBFS) += hugetlb.o - obj-$(CONFIG_NUMA) += mempolicy.o ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -33,6 +33,7 @@ - #include - #include /* for BUG_ON(!in_atomic()) only */ - #include -+#include - #include /* for page_is_file_cache() */ - #include "internal.h" - -@@ -116,6 +117,16 @@ void __remove_from_page_cache(struct pag - { - struct address_space *mapping = page->mapping; - -+ /* -+ * if we're uptodate, flush out into the precache, otherwise -+ * invalidate any existing precache entries. We can't leave -+ * stale data around in the precache once our page is gone -+ */ -+ if (PageUptodate(page)) -+ precache_put(page->mapping, page->index, page); -+ else -+ precache_flush(page->mapping, page->index); -+ - radix_tree_delete(&mapping->page_tree, page->index); - page->mapping = NULL; - mapping->nrpages--; ---- a/mm/page_io.c -+++ b/mm/page_io.c -@@ -111,6 +111,13 @@ int swap_writepage(struct page *page, st - return ret; - } - -+ if (preswap_put(page) == 1) { -+ set_page_writeback(page); -+ unlock_page(page); -+ end_page_writeback(page); -+ goto out; -+ } -+ - bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); - if (bio == NULL) { - set_page_dirty(page); -@@ -179,6 +186,12 @@ int swap_readpage(struct page *page) - return ret; - } - -+ if (preswap_get(page) == 1) { -+ SetPageUptodate(page); -+ unlock_page(page); -+ goto out; -+ } -+ - bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); - if (bio == NULL) { - unlock_page(page); ---- /dev/null -+++ b/mm/precache.c -@@ -0,0 +1,138 @@ -+/* -+ * linux/mm/precache.c -+ * -+ * Implements "precache" for filesystems/pagecache on top of transcendent -+ * memory ("tmem") API. A filesystem creates an "ephemeral tmem pool" -+ * and retains the returned pool_id in its superblock. Clean pages evicted -+ * from pagecache may be "put" into the pool and associated with a "handle" -+ * consisting of the pool_id, an object (inode) id, and an index (page offset). -+ * Note that the page is copied to tmem; no kernel mappings are changed. -+ * If the page is later needed, the filesystem (or VFS) issues a "get", passing -+ * the same handle and an empty pageframe. If successful, the page is copied -+ * into the pageframe and a disk read is avoided. But since the tmem pool -+ * is of indeterminate size, a "put" page has indeterminate longevity -+ * ("ephemeral"), and the "get" may fail, in which case the filesystem must -+ * read the page from disk as before. Note that the filesystem/pagecache are -+ * responsible for maintaining coherency between the pagecache, precache, -+ * and the disk, for which "flush page" and "flush object" actions are -+ * provided. And when a filesystem is unmounted, it must "destroy" the pool. -+ * -+ * Two types of pools may be created for a precache: "private" or "shared". -+ * For a private pool, a successful "get" always flushes, implementing -+ * exclusive semantics; for a "shared" pool (which is intended for use by -+ * co-resident nodes of a cluster filesystem), the "flush" is not guaranteed. -+ * In either case, a failed "duplicate" put (overwrite) always guarantee -+ * the old data is flushed. -+ * -+ * Note also that multiple accesses to a tmem pool may be concurrent and any -+ * ordering must be guaranteed by the caller. -+ * -+ * Copyright (C) 2008,2009 Dan Magenheimer, Oracle Corp. -+ */ -+ -+#include -+#include -+#include "tmem.h" -+ -+static int precache_auto_allocate; /* set to 1 to auto_allocate */ -+ -+int precache_put(struct address_space *mapping, unsigned long index, -+ struct page *page) -+{ -+ u32 tmem_pool = mapping->host->i_sb->precache_poolid; -+ u64 obj = (unsigned long) mapping->host->i_ino; -+ u32 ind = (u32) index; -+ unsigned long mfn = pfn_to_mfn(page_to_pfn(page)); -+ int ret; -+ -+ if ((s32)tmem_pool < 0) { -+ if (!precache_auto_allocate) -+ return 0; -+ /* a put on a non-existent precache may auto-allocate one */ -+ ret = tmem_new_pool(0, 0, 0); -+ if (ret < 0) -+ return 0; -+ pr_info("Mapping superblock for s_id=%s to precache_id=%d\n", -+ mapping->host->i_sb->s_id, tmem_pool); -+ mapping->host->i_sb->precache_poolid = tmem_pool; -+ } -+ if (ind != index) -+ return 0; -+ mb(); /* ensure page is quiescent; tmem may address it with an alias */ -+ return tmem_put_page(tmem_pool, obj, ind, mfn); -+} -+ -+int precache_get(struct address_space *mapping, unsigned long index, -+ struct page *empty_page) -+{ -+ u32 tmem_pool = mapping->host->i_sb->precache_poolid; -+ u64 obj = (unsigned long) mapping->host->i_ino; -+ u32 ind = (u32) index; -+ unsigned long mfn = pfn_to_mfn(page_to_pfn(empty_page)); -+ -+ if ((s32)tmem_pool < 0) -+ return 0; -+ if (ind != index) -+ return 0; -+ -+ return tmem_get_page(tmem_pool, obj, ind, mfn); -+} -+EXPORT_SYMBOL(precache_get); -+ -+int precache_flush(struct address_space *mapping, unsigned long index) -+{ -+ u32 tmem_pool = mapping->host->i_sb->precache_poolid; -+ u64 obj = (unsigned long) mapping->host->i_ino; -+ u32 ind = (u32) index; -+ -+ if ((s32)tmem_pool < 0) -+ return 0; -+ if (ind != index) -+ return 0; -+ -+ return tmem_flush_page(tmem_pool, obj, ind); -+} -+EXPORT_SYMBOL(precache_flush); -+ -+int precache_flush_inode(struct address_space *mapping) -+{ -+ u32 tmem_pool = mapping->host->i_sb->precache_poolid; -+ u64 obj = (unsigned long) mapping->host->i_ino; -+ -+ if ((s32)tmem_pool < 0) -+ return 0; -+ -+ return tmem_flush_object(tmem_pool, obj); -+} -+EXPORT_SYMBOL(precache_flush_inode); -+ -+int precache_flush_filesystem(struct super_block *sb) -+{ -+ u32 tmem_pool = sb->precache_poolid; -+ int ret; -+ -+ if ((s32)tmem_pool < 0) -+ return 0; -+ ret = tmem_destroy_pool(tmem_pool); -+ if (!ret) -+ return 0; -+ pr_info("Unmapping superblock for s_id=%s from precache_id=%d\n", -+ sb->s_id, ret); -+ sb->precache_poolid = 0; -+ return 1; -+} -+EXPORT_SYMBOL(precache_flush_filesystem); -+ -+void precache_init(struct super_block *sb) -+{ -+ sb->precache_poolid = tmem_new_pool(0, 0, 0); -+} -+EXPORT_SYMBOL(precache_init); -+ -+void shared_precache_init(struct super_block *sb, char *uuid) -+{ -+ u64 uuid_lo = *(u64 *)uuid; -+ u64 uuid_hi = *(u64 *)(&uuid[8]); -+ sb->precache_poolid = tmem_new_pool(uuid_lo, uuid_hi, TMEM_POOL_SHARED); -+} -+EXPORT_SYMBOL(shared_precache_init); ---- /dev/null -+++ b/mm/preswap.c -@@ -0,0 +1,182 @@ -+/* -+ * linux/mm/preswap.c -+ * -+ * Implements a fast "preswap" on top of the transcendent memory ("tmem") API. -+ * When a swapdisk is enabled (with swapon), a "private persistent tmem pool" -+ * is created along with a bit-per-page preswap_map. When swapping occurs -+ * and a page is about to be written to disk, a "put" into the pool may first -+ * be attempted by passing the pageframe to be swapped, along with a "handle" -+ * consisting of a pool_id, an object id, and an index. Since the pool is of -+ * indeterminate size, the "put" may be rejected, in which case the page -+ * is swapped to disk as normal. If the "put" is successful, the page is -+ * copied to tmem and the preswap_map records the success. Later, when -+ * the page needs to be swapped in, the preswap_map is checked and, if set, -+ * the page may be obtained with a "get" operation. Note that the swap -+ * subsystem is responsible for: maintaining coherency between the swapcache, -+ * preswap, and the swapdisk; for evicting stale pages from preswap; and for -+ * emptying preswap when swapoff is performed. The "flush page" and "flush -+ * object" actions are provided for this. -+ * -+ * Note that if a "duplicate put" is performed to overwrite a page and -+ * the "put" operation fails, the page (and old data) is flushed and lost. -+ * Also note that multiple accesses to a tmem pool may be concurrent and -+ * any ordering must be guaranteed by the caller. -+ * -+ * Copyright (C) 2008,2009 Dan Magenheimer, Oracle Corp. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "tmem.h" -+ -+static u32 preswap_poolid = -1; /* if negative, preswap will never call tmem */ -+ -+const unsigned long preswap_zero = 0, preswap_infinity = ~0UL; /* for sysctl */ -+ -+/* -+ * Swizzling increases objects per swaptype, increasing tmem concurrency -+ * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS -+ */ -+#define SWIZ_BITS 4 -+#define SWIZ_MASK ((1 << SWIZ_BITS) - 1) -+#define oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) -+#define iswiz(_ind) (_ind >> SWIZ_BITS) -+ -+/* -+ * preswap_map test/set/clear operations (must be atomic) -+ */ -+ -+int preswap_test(struct swap_info_struct *sis, unsigned long offset) -+{ -+ if (!sis->preswap_map) -+ return 0; -+ return test_bit(offset % BITS_PER_LONG, -+ &sis->preswap_map[offset/BITS_PER_LONG]); -+} -+ -+static inline void preswap_set(struct swap_info_struct *sis, -+ unsigned long offset) -+{ -+ if (!sis->preswap_map) -+ return; -+ set_bit(offset % BITS_PER_LONG, -+ &sis->preswap_map[offset/BITS_PER_LONG]); -+} -+ -+static inline void preswap_clear(struct swap_info_struct *sis, -+ unsigned long offset) -+{ -+ if (!sis->preswap_map) -+ return; -+ clear_bit(offset % BITS_PER_LONG, -+ &sis->preswap_map[offset/BITS_PER_LONG]); -+} -+ -+/* -+ * preswap tmem operations -+ */ -+ -+/* returns 1 if the page was successfully put into preswap, 0 if the page -+ * was declined, and -ERRNO for a specific error */ -+int preswap_put(struct page *page) -+{ -+ swp_entry_t entry = { .val = page_private(page), }; -+ unsigned type = swp_type(entry); -+ pgoff_t offset = swp_offset(entry); -+ u64 ind64 = (u64)offset; -+ u32 ind = (u32)offset; -+ unsigned long mfn = pfn_to_mfn(page_to_pfn(page)); -+ struct swap_info_struct *sis = get_swap_info_struct(type); -+ int dup = 0, ret; -+ -+ if ((s32)preswap_poolid < 0) -+ return 0; -+ if (ind64 != ind) -+ return 0; -+ if (preswap_test(sis, offset)) -+ dup = 1; -+ mb(); /* ensure page is quiescent; tmem may address it with an alias */ -+ ret = tmem_put_page(preswap_poolid, oswiz(type, ind), iswiz(ind), mfn); -+ if (ret == 1) { -+ preswap_set(sis, offset); -+ if (!dup) -+ sis->preswap_pages++; -+ } else if (dup) { -+ /* failed dup put always results in an automatic flush of -+ * the (older) page from preswap */ -+ preswap_clear(sis, offset); -+ sis->preswap_pages--; -+ } -+ return ret; -+} -+ -+/* returns 1 if the page was successfully gotten from preswap, 0 if the page -+ * was not present (should never happen!), and -ERRNO for a specific error */ -+int preswap_get(struct page *page) -+{ -+ swp_entry_t entry = { .val = page_private(page), }; -+ unsigned type = swp_type(entry); -+ pgoff_t offset = swp_offset(entry); -+ u64 ind64 = (u64)offset; -+ u32 ind = (u32)offset; -+ unsigned long mfn = pfn_to_mfn(page_to_pfn(page)); -+ struct swap_info_struct *sis = get_swap_info_struct(type); -+ int ret; -+ -+ if ((s32)preswap_poolid < 0) -+ return 0; -+ if (ind64 != ind) -+ return 0; -+ if (!preswap_test(sis, offset)) -+ return 0; -+ ret = tmem_get_page(preswap_poolid, oswiz(type, ind), iswiz(ind), mfn); -+ return ret; -+} -+ -+/* flush a single page from preswap */ -+void preswap_flush(unsigned type, unsigned long offset) -+{ -+ u64 ind64 = (u64)offset; -+ u32 ind = (u32)offset; -+ struct swap_info_struct *sis = get_swap_info_struct(type); -+ int ret = 1; -+ -+ if ((s32)preswap_poolid < 0) -+ return; -+ if (ind64 != ind) -+ return; -+ if (preswap_test(sis, offset)) { -+ ret = tmem_flush_page(preswap_poolid, -+ oswiz(type, ind), iswiz(ind)); -+ sis->preswap_pages--; -+ preswap_clear(sis, offset); -+ } -+} -+ -+/* flush all pages from the passed swaptype */ -+void preswap_flush_area(unsigned type) -+{ -+ struct swap_info_struct *sis = get_swap_info_struct(type); -+ int ind; -+ -+ if ((s32)preswap_poolid < 0) -+ return; -+ for (ind = SWIZ_MASK; ind >= 0; ind--) -+ (void)tmem_flush_object(preswap_poolid, oswiz(type, ind)); -+ sis->preswap_pages = 0; -+} -+ -+void preswap_init(unsigned type) -+{ -+ /* only need one tmem pool for all swap types */ -+ if ((s32)preswap_poolid >= 0) -+ return; -+ preswap_poolid = tmem_new_pool(0, 0, TMEM_POOL_PERSIST); -+} ---- a/mm/swapfile.c -+++ b/mm/swapfile.c -@@ -588,6 +588,7 @@ static unsigned char swap_entry_free(str - swap_list.next = p->type; - nr_swap_pages++; - p->inuse_pages--; -+ preswap_flush(p->type, offset); - if (p->flags & SWP_BLKDEV) { - struct gendisk *disk = p->bdev->bd_disk; - if (disk->fops->swap_slot_free_notify) -@@ -1055,7 +1056,7 @@ static int unuse_mm(struct mm_struct *mm - * Recycle to start on reaching the end, returning 0 when empty. - */ - static unsigned int find_next_to_unuse(struct swap_info_struct *si, -- unsigned int prev) -+ unsigned int prev, unsigned int preswap) - { - unsigned int max = si->max; - unsigned int i = prev; -@@ -1081,6 +1082,12 @@ static unsigned int find_next_to_unuse(s - prev = 0; - i = 1; - } -+ if (preswap) { -+ if (preswap_test(si, i)) -+ break; -+ else -+ continue; -+ } - count = si->swap_map[i]; - if (count && swap_count(count) != SWAP_MAP_BAD) - break; -@@ -1092,8 +1099,12 @@ static unsigned int find_next_to_unuse(s - * We completely avoid races by reading each swap page in advance, - * and then search for the process using it. All the necessary - * page table adjustments can then be made atomically. -+ * -+ * if the boolean preswap is true, only unuse pages_to_unuse pages; -+ * pages_to_unuse==0 means all pages - */ --static int try_to_unuse(unsigned int type) -+static int try_to_unuse(unsigned int type, unsigned int preswap, -+ unsigned long pages_to_unuse) - { - struct swap_info_struct *si = swap_info[type]; - struct mm_struct *start_mm; -@@ -1126,7 +1137,7 @@ static int try_to_unuse(unsigned int typ - * one pass through swap_map is enough, but not necessarily: - * there are races when an instance of an entry might be missed. - */ -- while ((i = find_next_to_unuse(si, i)) != 0) { -+ while ((i = find_next_to_unuse(si, i, preswap)) != 0) { - if (signal_pending(current)) { - retval = -EINTR; - break; -@@ -1293,6 +1304,8 @@ static int try_to_unuse(unsigned int typ - * interactive performance. - */ - cond_resched(); -+ if (preswap && pages_to_unuse && !--pages_to_unuse) -+ break; - } - - mmput(start_mm); -@@ -1637,7 +1650,7 @@ SYSCALL_DEFINE1(swapoff, const char __us - spin_unlock(&swap_lock); - - current->flags |= PF_OOM_ORIGIN; -- err = try_to_unuse(type); -+ err = try_to_unuse(type, 0, 0); - current->flags &= ~PF_OOM_ORIGIN; - - if (err) { -@@ -1689,9 +1702,14 @@ SYSCALL_DEFINE1(swapoff, const char __us - swap_map = p->swap_map; - p->swap_map = NULL; - p->flags = 0; -+ preswap_flush_area(type); - spin_unlock(&swap_lock); - mutex_unlock(&swapon_mutex); - vfree(swap_map); -+#ifdef CONFIG_PRESWAP -+ if (p->preswap_map) -+ vfree(p->preswap_map); -+#endif - /* Destroy swap account informatin */ - swap_cgroup_swapoff(type); - -@@ -1886,6 +1904,7 @@ SYSCALL_DEFINE2(swapon, const char __use - unsigned long maxpages; - unsigned long swapfilepages; - unsigned char *swap_map = NULL; -+ unsigned long *preswap_map = NULL; - struct page *page = NULL; - struct inode *inode = NULL; - int did_down = 0; -@@ -2088,6 +2107,12 @@ SYSCALL_DEFINE2(swapon, const char __use - } - } - -+#ifdef CONFIG_PRESWAP -+ preswap_map = vmalloc(maxpages / sizeof(long)); -+ if (preswap_map) -+ memset(preswap_map, 0, maxpages / sizeof(long)); -+#endif -+ - error = swap_cgroup_swapon(type, maxpages); - if (error) - goto bad_swap; -@@ -2126,6 +2151,9 @@ SYSCALL_DEFINE2(swapon, const char __use - else - p->prio = --least_priority; - p->swap_map = swap_map; -+#ifdef CONFIG_PRESWAP -+ p->preswap_map = preswap_map; -+#endif - p->flags |= SWP_WRITEOK; - nr_swap_pages += nr_good_pages; - total_swap_pages += nr_good_pages; -@@ -2149,6 +2177,7 @@ SYSCALL_DEFINE2(swapon, const char __use - swap_list.head = swap_list.next = type; - else - swap_info[prev]->next = type; -+ preswap_init(type); - spin_unlock(&swap_lock); - mutex_unlock(&swapon_mutex); - atomic_inc(&proc_poll_event); -@@ -2168,6 +2197,7 @@ bad_swap_2: - p->swap_file = NULL; - p->flags = 0; - spin_unlock(&swap_lock); -+ vfree(preswap_map); - vfree(swap_map); - if (swap_file) { - if (did_down) { -@@ -2373,6 +2403,10 @@ int valid_swaphandles(swp_entry_t entry, - base++; - - spin_lock(&swap_lock); -+ if (preswap_test(si, target)) { -+ spin_unlock(&swap_lock); -+ return 0; -+ } - if (end > si->max) /* don't go beyond end of map */ - end = si->max; - -@@ -2383,6 +2417,9 @@ int valid_swaphandles(swp_entry_t entry, - break; - if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) - break; -+ /* Don't read in preswap pages */ -+ if (preswap_test(si, toff)) -+ break; - } - /* Count contiguous allocated slots below our target */ - for (toff = target; --toff >= base; nr_pages++) { -@@ -2391,6 +2428,9 @@ int valid_swaphandles(swp_entry_t entry, - break; - if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) - break; -+ /* Don't read in preswap pages */ -+ if (preswap_test(si, toff)) -+ break; - } - spin_unlock(&swap_lock); - -@@ -2617,3 +2657,98 @@ static void free_swap_count_continuation - } - } - } -+ -+#ifdef CONFIG_PRESWAP -+/* -+ * preswap infrastructure functions -+ */ -+ -+struct swap_info_struct *get_swap_info_struct(unsigned int type) -+{ -+ BUG_ON(type > MAX_SWAPFILES); -+ return swap_info[type]; -+} -+ -+/* code structure leveraged from sys_swapoff */ -+void preswap_shrink(unsigned long target_pages) -+{ -+ struct swap_info_struct *si = NULL; -+ unsigned long total_pages = 0, total_pages_to_unuse; -+ unsigned long pages = 0, unuse_pages = 0; -+ int type; -+ int wrapped = 0; -+ -+ do { -+ /* -+ * we don't want to hold swap_lock while doing a very -+ * lengthy try_to_unuse, but swap_list may change -+ * so restart scan from swap_list.head each time -+ */ -+ spin_lock(&swap_lock); -+ total_pages = 0; -+ for (type = swap_list.head; type >= 0; type = si->next) { -+ si = swap_info[type]; -+ total_pages += si->preswap_pages; -+ } -+ if (total_pages <= target_pages) { -+ spin_unlock(&swap_lock); -+ return; -+ } -+ total_pages_to_unuse = total_pages - target_pages; -+ for (type = swap_list.head; type >= 0; type = si->next) { -+ si = swap_info[type]; -+ if (total_pages_to_unuse < si->preswap_pages) -+ pages = unuse_pages = total_pages_to_unuse; -+ else { -+ pages = si->preswap_pages; -+ unuse_pages = 0; /* unuse all */ -+ } -+ if (security_vm_enough_memory_kern(pages)) -+ continue; -+ vm_unacct_memory(pages); -+ break; -+ } -+ spin_unlock(&swap_lock); -+ if (type < 0) -+ return; -+ current->flags |= PF_OOM_ORIGIN; -+ (void)try_to_unuse(type, 1, unuse_pages); -+ current->flags &= ~PF_OOM_ORIGIN; -+ wrapped++; -+ } while (wrapped <= 3); -+} -+ -+ -+#ifdef CONFIG_SYSCTL -+/* cat /sys/proc/vm/preswap provides total number of pages in preswap -+ * across all swaptypes. echo N > /sys/proc/vm/preswap attempts to shrink -+ * preswap page usage to N (usually 0) */ -+int preswap_sysctl_handler(ctl_table *table, int write, -+ void __user *buffer, size_t *length, loff_t *ppos) -+{ -+ unsigned long npages; -+ int type; -+ unsigned long totalpages = 0; -+ struct swap_info_struct *si = NULL; -+ -+ /* modeled after hugetlb_sysctl_handler in mm/hugetlb.c */ -+ if (!write) { -+ spin_lock(&swap_lock); -+ for (type = swap_list.head; type >= 0; type = si->next) { -+ si = swap_info[type]; -+ totalpages += si->preswap_pages; -+ } -+ spin_unlock(&swap_lock); -+ npages = totalpages; -+ } -+ table->data = &npages; -+ table->maxlen = sizeof(unsigned long); -+ proc_doulongvec_minmax(table, write, buffer, length, ppos); -+ -+ if (write) -+ preswap_shrink(npages); -+ -+ return 0; -+} -+#endif -+#endif /* CONFIG_PRESWAP */ ---- /dev/null -+++ b/mm/tmem.h -@@ -0,0 +1,84 @@ -+/* -+ * linux/mm/tmem.h -+ * -+ * Interface to transcendent memory, used by mm/precache.c and mm/preswap.c -+ * Currently implemented on XEN, but may be implemented elsewhere in future. -+ * -+ * Copyright (C) 2008,2009 Dan Magenheimer, Oracle Corp. -+ */ -+ -+#ifdef CONFIG_XEN -+#include -+ -+/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ -+#define TMEM_POOL_MIN_PAGESHIFT 12 -+#define TMEM_POOL_PAGEORDER (PAGE_SHIFT - TMEM_POOL_MIN_PAGESHIFT) -+ -+extern int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, u64 object, u32 index, -+ unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len); -+extern int xen_tmem_new_pool(u32 tmem_cmd, u64 uuid_lo, u64 uuid_hi, u32 flags); -+ -+static inline int tmem_put_page(u32 pool_id, u64 object, u32 index, -+ unsigned long gmfn) -+{ -+ return xen_tmem_op(TMEM_PUT_PAGE, pool_id, object, index, -+ gmfn, 0, 0, 0); -+} -+ -+static inline int tmem_get_page(u32 pool_id, u64 object, u32 index, -+ unsigned long gmfn) -+{ -+ return xen_tmem_op(TMEM_GET_PAGE, pool_id, object, index, -+ gmfn, 0, 0, 0); -+} -+ -+static inline int tmem_flush_page(u32 pool_id, u64 object, u32 index) -+{ -+ return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, object, index, -+ 0, 0, 0, 0); -+} -+ -+static inline int tmem_flush_object(u32 pool_id, u64 object) -+{ -+ return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, object, 0, 0, 0, 0, 0); -+} -+ -+static inline int tmem_new_pool(u64 uuid_lo, u64 uuid_hi, u32 flags) -+{ -+ BUILD_BUG_ON((TMEM_POOL_PAGEORDER < 0) || -+ (TMEM_POOL_PAGEORDER >= TMEM_POOL_PAGESIZE_MASK)); -+ flags |= TMEM_POOL_PAGEORDER << TMEM_POOL_PAGESIZE_SHIFT; -+ return xen_tmem_new_pool(TMEM_NEW_POOL, uuid_lo, uuid_hi, flags); -+} -+ -+static inline int tmem_destroy_pool(u32 pool_id) -+{ -+ return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, 0, 0, 0, 0, 0, 0); -+} -+#else -+struct tmem_op { -+ u32 cmd; -+ s32 pool_id; /* private > 0; shared < 0; 0 is invalid */ -+ union { -+ struct { /* for cmd == TMEM_NEW_POOL */ -+ u64 uuid[2]; -+ u32 flags; -+ } new; -+ struct { /* for cmd == TMEM_CONTROL */ -+ u32 subop; -+ u32 cli_id; -+ u32 arg1; -+ u32 arg2; -+ void *buf; -+ } ctrl; -+ struct { -+ u64 object; -+ u32 index; -+ u32 tmem_offset; -+ u32 pfn_offset; -+ u32 len; -+ unsigned long pfn; /* page frame */ -+ } gen; -+ } u; -+}; -+#endif ---- a/mm/truncate.c -+++ b/mm/truncate.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - #include - #include /* grr. try_to_release_page, - do_invalidatepage */ -@@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page - static inline void truncate_partial_page(struct page *page, unsigned partial) - { - zero_user_segment(page, partial, PAGE_CACHE_SIZE); -+ precache_flush(page->mapping, page->index); - if (page_has_private(page)) - do_invalidatepage(page, partial); - } -@@ -108,6 +110,10 @@ truncate_complete_page(struct address_sp - clear_page_mlock(page); - remove_from_page_cache(page); - ClearPageMappedToDisk(page); -+ /* this must be after the remove_from_page_cache which -+ * calls precache_put -+ */ -+ precache_flush(mapping, page->index); - page_cache_release(page); /* pagecache ref */ - return 0; - } -@@ -215,6 +221,7 @@ void truncate_inode_pages_range(struct a - pgoff_t next; - int i; - -+ precache_flush_inode(mapping); - if (mapping->nrpages == 0) - return; - -@@ -292,6 +299,7 @@ void truncate_inode_pages_range(struct a - pagevec_release(&pvec); - mem_cgroup_uncharge_end(); - } -+ precache_flush_inode(mapping); - } - EXPORT_SYMBOL(truncate_inode_pages_range); - -@@ -434,6 +442,7 @@ int invalidate_inode_pages2_range(struct - int did_range_unmap = 0; - int wrapped = 0; - -+ precache_flush_inode(mapping); - pagevec_init(&pvec, 0); - next = start; - while (next <= end && !wrapped && -@@ -492,6 +501,7 @@ int invalidate_inode_pages2_range(struct - mem_cgroup_uncharge_end(); - cond_resched(); - } -+ precache_flush_inode(mapping); - return ret; - } - EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); diff --git a/patches.xen/xen-balloon-max-target b/patches.xen/xen-balloon-max-target deleted file mode 100644 index 79af8b5..0000000 --- a/patches.xen/xen-balloon-max-target +++ /dev/null @@ -1,78 +0,0 @@ -From: ccoffing@novell.com -Subject: Expose min/max limits of domain ballooning -Patch-mainline: obsolete -References: 152667, 184727 - -jb: Also added this to the sysfs representation. - ---- head-2010-11-25.orig/drivers/xen/balloon/balloon.c 2010-11-25 11:55:54.000000000 +0100 -+++ head-2010-11-25/drivers/xen/balloon/balloon.c 2010-11-25 13:47:01.000000000 +0100 -@@ -236,7 +236,7 @@ static unsigned long current_target(void - return target; - } - --static unsigned long minimum_target(void) -+unsigned long balloon_minimum_target(void) - { - #ifndef CONFIG_XEN - #define max_pfn num_physpages -@@ -458,7 +458,7 @@ static void balloon_process(struct work_ - void balloon_set_new_target(unsigned long target) - { - /* No need for lock. Not read-modify-write updates. */ -- bs.target_pages = max(target, minimum_target()); -+ bs.target_pages = max(target, balloon_minimum_target()); - schedule_work(&balloon_worker); - } - -@@ -533,10 +533,13 @@ static int balloon_read(char *page, char - page, - "Current allocation: %8lu kB\n" - "Requested target: %8lu kB\n" -+ "Minimum target: %8lu kB\n" -+ "Maximum target: %8lu kB\n" - "Low-mem balloon: %8lu kB\n" - "High-mem balloon: %8lu kB\n" - "Driver pages: %8lu kB\n", - PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), -+ PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), - PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), - PAGES2KB(bs.driver_pages)); - ---- head-2010-11-25.orig/drivers/xen/balloon/common.h 2009-06-09 15:01:37.000000000 +0200 -+++ head-2010-11-25/drivers/xen/balloon/common.h 2009-08-19 10:36:49.000000000 +0200 -@@ -52,5 +52,6 @@ int balloon_sysfs_init(void); - void balloon_sysfs_exit(void); - - void balloon_set_new_target(unsigned long target); -+unsigned long balloon_minimum_target(void); - - #endif /* __XEN_BALLOON_COMMON_H__ */ ---- head-2010-11-25.orig/drivers/xen/balloon/sysfs.c 2010-11-22 13:06:57.000000000 +0100 -+++ head-2010-11-25/drivers/xen/balloon/sysfs.c 2009-08-19 10:36:47.000000000 +0200 -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -53,6 +54,8 @@ - static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) - - BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); -+BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); -+BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); - BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); - BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); - BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); -@@ -123,6 +126,8 @@ static struct sysdev_attribute *balloon_ - - static struct attribute *balloon_info_attrs[] = { - &attr_current_kb.attr, -+ &attr_min_kb.attr, -+ &attr_max_kb.attr, - &attr_low_kb.attr, - &attr_high_kb.attr, - &attr_driver_kb.attr, diff --git a/patches.xen/xen-blkback-bimodal-suse b/patches.xen/xen-blkback-bimodal-suse deleted file mode 100644 index 9f7c848..0000000 --- a/patches.xen/xen-blkback-bimodal-suse +++ /dev/null @@ -1,39 +0,0 @@ -Subject: backward compatibility -From: Gerd Hoffmann -Patch-mainline: obsolete - ---- - linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c | 6 ++++++ - linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c | 6 ++++++ - 2 files changed, 12 insertions(+) - ---- head-2010-11-25.orig/drivers/xen/blkback/xenbus.c 2010-11-22 13:10:22.000000000 +0100 -+++ head-2010-11-25/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:03.000000000 +0100 -@@ -506,6 +506,12 @@ static int connect_ring(struct backend_i - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; - else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; -+#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */ -+ else if (0 == strcmp(protocol, "1")) -+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; -+ else if (0 == strcmp(protocol, "2")) -+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; -+#endif - else { - xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); - return -1; ---- head-2010-11-25.orig/drivers/xen/blktap/xenbus.c 2010-11-25 10:28:23.000000000 +0100 -+++ head-2010-11-25/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:00.000000000 +0100 -@@ -457,6 +457,12 @@ static int connect_ring(struct backend_i - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; - else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; -+#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */ -+ else if (0 == strcmp(protocol, "1")) -+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; -+ else if (0 == strcmp(protocol, "2")) -+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; -+#endif - else { - xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); - return -1; diff --git a/patches.xen/xen-blkback-cdrom b/patches.xen/xen-blkback-cdrom deleted file mode 100644 index 99277e3..0000000 --- a/patches.xen/xen-blkback-cdrom +++ /dev/null @@ -1,233 +0,0 @@ -Subject: CDROM removable media-present attribute plus handling code -From: plc@novell.com -Patch-mainline: obsolete -References: 159907 - ---- head-2011-01-30.orig/drivers/xen/blkback/Makefile 2009-06-09 15:01:37.000000000 +0200 -+++ head-2011-01-30/drivers/xen/blkback/Makefile 2010-03-25 14:38:02.000000000 +0100 -@@ -1,4 +1,4 @@ - obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o - obj-$(CONFIG_XEN_BLKBACK_PAGEMAP) += blkback-pagemap.o - --blkbk-y := blkback.o xenbus.o interface.o vbd.o -+blkbk-y := blkback.o xenbus.o interface.o vbd.o cdrom.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-01-30/drivers/xen/blkback/cdrom.c 2010-10-11 10:34:44.000000000 +0200 -@@ -0,0 +1,162 @@ -+/****************************************************************************** -+ * blkback/cdrom.c -+ * -+ * Routines for managing cdrom watch and media-present attribute of a -+ * cdrom type virtual block device (VBD). -+ * -+ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand -+ * Copyright (c) 2007 Pat Campbell -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License version 2 -+ * as published by the Free Software Foundation; or, when distributed -+ * separately from the Linux kernel or incorporated into other -+ * software packages, subject to the following license: -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this source file (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, copy, modify, -+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, -+ * and to permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#include "common.h" -+ -+#undef DPRINTK -+#define DPRINTK(_f, _a...) \ -+ printk(KERN_DEBUG "(%s() file=%s, line=%d) " _f "\n", \ -+ __func__, __FILE__ , __LINE__ , ##_a ) -+ -+ -+#define MEDIA_PRESENT "media-present" -+ -+static void cdrom_media_changed(struct xenbus_watch *, const char **, unsigned int); -+ -+/** -+ * Writes media-present=1 attribute for the given vbd device if not -+ * already there -+ */ -+static int cdrom_xenstore_write_media_present(struct backend_info *be) -+{ -+ struct xenbus_device *dev = be->dev; -+ struct xenbus_transaction xbt; -+ int err; -+ int media_present; -+ -+ err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d", -+ &media_present); -+ if (0 < err) { -+ DPRINTK("already written err%d", err); -+ return(0); -+ } -+ media_present = 1; -+ -+again: -+ err = xenbus_transaction_start(&xbt); -+ if (err) { -+ xenbus_dev_fatal(dev, err, "starting transaction"); -+ return(-1); -+ } -+ -+ err = xenbus_printf(xbt, dev->nodename, MEDIA_PRESENT, "%d", media_present ); -+ if (err) { -+ xenbus_dev_fatal(dev, err, "writing %s/%s", -+ dev->nodename, MEDIA_PRESENT); -+ goto abort; -+ } -+ err = xenbus_transaction_end(xbt, 0); -+ if (err == -EAGAIN) -+ goto again; -+ if (err) -+ xenbus_dev_fatal(dev, err, "ending transaction"); -+ return 0; -+ abort: -+ xenbus_transaction_end(xbt, 1); -+ return -1; -+} -+ -+/** -+ * -+ */ -+static int cdrom_is_type(struct backend_info *be) -+{ -+ DPRINTK("type:%x", be->blkif->vbd.type ); -+ return (be->blkif->vbd.type & VDISK_CDROM) -+ && (be->blkif->vbd.type & GENHD_FL_REMOVABLE); -+} -+ -+/** -+ * -+ */ -+void cdrom_add_media_watch(struct backend_info *be) -+{ -+ struct xenbus_device *dev = be->dev; -+ int err; -+ -+ DPRINTK("nodename:%s", dev->nodename); -+ if (cdrom_is_type(be)) { -+ DPRINTK("is a cdrom"); -+ if ( cdrom_xenstore_write_media_present(be) == 0 ) { -+ DPRINTK( "xenstore wrote OK"); -+ err = xenbus_watch_path2(dev, dev->nodename, MEDIA_PRESENT, -+ &be->cdrom_watch, -+ cdrom_media_changed); -+ if (err) -+ DPRINTK( "media_present watch add failed" ); -+ } -+ } -+} -+ -+/** -+ * Callback received when the "media_present" xenstore node is changed -+ */ -+static void cdrom_media_changed(struct xenbus_watch *watch, -+ const char **vec, unsigned int len) -+{ -+ int err; -+ unsigned media_present; -+ struct backend_info *be -+ = container_of(watch, struct backend_info, cdrom_watch); -+ struct xenbus_device *dev = be->dev; -+ -+ if (!cdrom_is_type(be)) { -+ DPRINTK("callback not for a cdrom" ); -+ return; -+ } -+ -+ err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d", -+ &media_present); -+ if (err == 0 || err == -ENOENT) { -+ DPRINTK("xenbus_read of cdrom media_present node error:%d",err); -+ return; -+ } -+ -+ if (media_present == 0) -+ vbd_free(&be->blkif->vbd); -+ else { -+ char *p = strrchr(dev->otherend, '/') + 1; -+ long handle = simple_strtoul(p, NULL, 0); -+ -+ if (!be->blkif->vbd.bdev) { -+ err = vbd_create(be->blkif, handle, be->major, be->minor, -+ !strchr(be->mode, 'w'), 1); -+ if (err) { -+ be->major = be->minor = 0; -+ xenbus_dev_fatal(dev, err, "creating vbd structure"); -+ return; -+ } -+ } -+ } -+} ---- head-2011-01-30.orig/drivers/xen/blkback/common.h 2011-01-31 17:49:31.000000000 +0100 -+++ head-2011-01-30/drivers/xen/blkback/common.h 2010-09-23 16:58:21.000000000 +0200 -@@ -103,6 +103,7 @@ struct backend_info - struct xenbus_device *dev; - blkif_t *blkif; - struct xenbus_watch backend_watch; -+ struct xenbus_watch cdrom_watch; - unsigned major; - unsigned minor; - char *mode; -@@ -149,4 +150,7 @@ int blkif_schedule(void *arg); - int blkback_barrier(struct xenbus_transaction xbt, - struct backend_info *be, int state); - -+/* cdrom media change */ -+void cdrom_add_media_watch(struct backend_info *be); -+ - #endif /* __BLKIF__BACKEND__COMMON_H__ */ ---- head-2011-01-30.orig/drivers/xen/blkback/vbd.c 2011-02-07 14:04:20.000000000 +0100 -+++ head-2011-01-30/drivers/xen/blkback/vbd.c 2011-02-07 14:17:36.000000000 +0100 -@@ -111,6 +111,9 @@ int vbd_translate(struct phys_req *req, - if ((operation != READ) && vbd->readonly) - goto out; - -+ if (vbd->bdev == NULL) -+ goto out; -+ - if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd))) - goto out; - ---- head-2011-01-30.orig/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:11.000000000 +0100 -+++ head-2011-01-30/drivers/xen/blkback/xenbus.c 2010-03-25 14:38:02.000000000 +0100 -@@ -187,6 +187,12 @@ static int blkback_remove(struct xenbus_ - be->backend_watch.node = NULL; - } - -+ if (be->cdrom_watch.node) { -+ unregister_xenbus_watch(&be->cdrom_watch); -+ kfree(be->cdrom_watch.node); -+ be->cdrom_watch.node = NULL; -+ } -+ - if (be->blkif) { - blkif_disconnect(be->blkif); - vbd_free(&be->blkif->vbd); -@@ -343,6 +349,9 @@ static void backend_changed(struct xenbu - - /* We're potentially connected now */ - update_blkif_status(be->blkif); -+ -+ /* Add watch for cdrom media status if necessay */ -+ cdrom_add_media_watch(be); - } - } - diff --git a/patches.xen/xen-blkback-disable-barriers.patch b/patches.xen/xen-blkback-disable-barriers.patch deleted file mode 100644 index 1c9eab7..0000000 --- a/patches.xen/xen-blkback-disable-barriers.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- linux-2.6.38.3/drivers/xen/blkback/xenbus.c 2011-08-18 00:08:53.000000000 +0200 -+++ linux-2.6.38.3/drivers/xen/blkback/xenbus.c.new 2011-08-18 14:37:46.615344310 +0200 -@@ -441,7 +441,7 @@ - return; - } - -- err = blkback_barrier(xbt, be, 1); -+ err = blkback_barrier(xbt, be, 0); - if (err) - goto abort; - diff --git a/patches.xen/xen-blkfront-cdrom b/patches.xen/xen-blkfront-cdrom deleted file mode 100644 index a01fcf9..0000000 --- a/patches.xen/xen-blkfront-cdrom +++ /dev/null @@ -1,709 +0,0 @@ -From: plc@novell.com -Subject: implement forwarding of CD-ROM specific commands -Patch-mainline: obsolete -References: fate#300964 - ---- head-2010-12-06.orig/drivers/cdrom/Makefile 2010-12-06 14:17:48.000000000 +0100 -+++ head-2010-12-06/drivers/cdrom/Makefile 2010-11-23 15:06:54.000000000 +0100 -@@ -9,6 +9,7 @@ obj-$(CONFIG_BLK_DEV_IDECD) += - obj-$(CONFIG_BLK_DEV_SR) += cdrom.o - obj-$(CONFIG_PARIDE_PCD) += cdrom.o - obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o -+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += cdrom.o - - obj-$(CONFIG_VIOCD) += viocd.o cdrom.o - obj-$(CONFIG_GDROM) += gdrom.o cdrom.o ---- head-2010-12-06.orig/drivers/xen/blkfront/Makefile 2007-06-12 13:13:44.000000000 +0200 -+++ head-2010-12-06/drivers/xen/blkfront/Makefile 2010-11-23 15:06:54.000000000 +0100 -@@ -1,5 +1,5 @@ - - obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o - --xenblk-objs := blkfront.o vbd.o -+xenblk-objs := blkfront.o vbd.o vcd.o - ---- head-2010-12-06.orig/drivers/xen/blkfront/blkfront.c 2010-12-06 15:01:01.000000000 +0100 -+++ head-2010-12-06/drivers/xen/blkfront/blkfront.c 2010-12-06 15:01:15.000000000 +0100 -@@ -412,6 +412,8 @@ static void connect(struct blkfront_info - add_disk(info->gd); - - info->is_ready = 1; -+ -+ register_vcd(info); - } - - /** -@@ -441,6 +443,8 @@ static void blkfront_closing(struct blkf - - xlvbd_sysfs_delif(info); - -+ unregister_vcd(info); -+ - xlvbd_del(info); - - out: ---- head-2010-12-06.orig/drivers/xen/blkfront/block.h 2010-12-06 14:37:35.000000000 +0100 -+++ head-2010-12-06/drivers/xen/blkfront/block.h 2010-11-23 15:06:54.000000000 +0100 -@@ -163,4 +163,8 @@ static inline void xlvbd_sysfs_delif(str - } - #endif - -+/* Virtual cdrom block-device */ -+extern void register_vcd(struct blkfront_info *info); -+extern void unregister_vcd(struct blkfront_info *info); -+ - #endif /* __XEN_DRIVERS_BLOCK_H__ */ ---- head-2010-12-06.orig/drivers/xen/blkfront/vbd.c 2010-11-23 16:11:19.000000000 +0100 -+++ head-2010-12-06/drivers/xen/blkfront/vbd.c 2010-11-23 15:06:54.000000000 +0100 -@@ -367,7 +367,8 @@ xlvbd_add(blkif_sector_t capacity, int v - goto out; - info->mi = mi; - -- if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) -+ if (!(vdisk_info & VDISK_CDROM) && -+ (minor & ((1 << mi->type->partn_shift) - 1)) == 0) - nr_minors = 1 << mi->type->partn_shift; - - err = xlbd_reserve_minors(mi, minor, nr_minors); -@@ -381,7 +382,7 @@ xlvbd_add(blkif_sector_t capacity, int v - - offset = mi->index * mi->type->disks_per_major + - (minor >> mi->type->partn_shift); -- if (nr_minors > 1) { -+ if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) { - if (offset < 26) { - sprintf(gd->disk_name, "%s%c", - mi->type->diskname, 'a' + offset ); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-12-06/drivers/xen/blkfront/vcd.c 2010-11-23 15:06:54.000000000 +0100 -@@ -0,0 +1,507 @@ -+/******************************************************************************* -+ * vcd.c -+ * -+ * Implements CDROM cmd packet passing between frontend guest and backend driver. -+ * -+ * Copyright (c) 2008, Pat Campell plc@novell.com -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this source file (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, copy, modify, -+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, -+ * and to permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#define REVISION "$Revision: 1.0 $" -+ -+#include -+#include -+#include -+#include -+#include -+#include "block.h" -+ -+/* List of cdrom_device_info, can have as many as blkfront supports */ -+struct vcd_disk { -+ struct list_head vcd_entry; -+ struct cdrom_device_info vcd_cdrom_info; -+ spinlock_t vcd_cdrom_info_lock; -+}; -+static LIST_HEAD(vcd_disks); -+static DEFINE_SPINLOCK(vcd_disks_lock); -+ -+static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) -+{ -+ struct vcd_disk *ret_vcd = NULL; -+ struct vcd_disk *vcd; -+ -+ spin_lock(&vcd_disks_lock); -+ list_for_each_entry(vcd, &vcd_disks, vcd_entry) { -+ if (vcd->vcd_cdrom_info.disk == disk) { -+ spin_lock(&vcd->vcd_cdrom_info_lock); -+ ret_vcd = vcd; -+ break; -+ } -+ } -+ spin_unlock(&vcd_disks_lock); -+ return ret_vcd; -+} -+ -+static void submit_message(struct blkfront_info *info, void *sp) -+{ -+ struct request *req = NULL; -+ -+ req = blk_get_request(info->rq, READ, __GFP_WAIT); -+ if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) -+ goto out; -+ -+ req->rq_disk = info->gd; -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) -+ req->cmd_type = REQ_TYPE_BLOCK_PC; -+ req->cmd_flags |= REQ_NOMERGE; -+#else -+ req->flags |= REQ_BLOCK_PC; -+#endif -+ req->__sector = 0; -+ req->__data_len = PAGE_SIZE; -+ req->timeout = 60*HZ; -+ -+ blk_execute_rq(req->q, info->gd, req, 1); -+ -+out: -+ blk_put_request(req); -+} -+ -+static int submit_cdrom_cmd(struct blkfront_info *info, -+ struct packet_command *cgc) -+{ -+ int ret = 0; -+ struct page *page; -+ size_t size; -+ union xen_block_packet *sp; -+ struct xen_cdrom_packet *xcp; -+ struct vcd_generic_command *vgc; -+ -+ if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { -+ pr_warning("%s() Packet buffer length is to large \n", __func__); -+ return -EIO; -+ } -+ -+ page = alloc_page(GFP_NOIO|__GFP_ZERO); -+ if (!page) { -+ pr_crit("%s() Unable to allocate page\n", __func__); -+ return -ENOMEM; -+ } -+ -+ size = PAGE_SIZE; -+ sp = page_address(page); -+ xcp = &(sp->xcp); -+ xcp->type = XEN_TYPE_CDROM_PACKET; -+ xcp->payload_offset = PACKET_PAYLOAD_OFFSET; -+ -+ vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); -+ memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); -+ vgc->stat = cgc->stat; -+ vgc->data_direction = cgc->data_direction; -+ vgc->quiet = cgc->quiet; -+ vgc->timeout = cgc->timeout; -+ if (cgc->sense) { -+ vgc->sense_offset = PACKET_SENSE_OFFSET; -+ memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); -+ } -+ if (cgc->buffer) { -+ vgc->buffer_offset = PACKET_BUFFER_OFFSET; -+ memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); -+ vgc->buflen = cgc->buflen; -+ } -+ -+ submit_message(info,sp); -+ -+ if (xcp->ret) -+ ret = xcp->err; -+ -+ if (cgc->sense) { -+ memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); -+ } -+ if (cgc->buffer && cgc->buflen) { -+ memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); -+ } -+ -+ __free_page(page); -+ return ret; -+} -+ -+ -+static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) -+{ -+ int ret = 0; -+ struct page *page; -+ struct blkfront_info *info; -+ union xen_block_packet *sp; -+ struct xen_cdrom_open *xco; -+ -+ info = cdi->disk->private_data; -+ -+ if (!info->xbdev) -+ return -ENODEV; -+ -+ if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { -+ return -EIO; -+ } -+ -+ page = alloc_page(GFP_NOIO|__GFP_ZERO); -+ if (!page) { -+ pr_crit("%s() Unable to allocate page\n", __func__); -+ return -ENOMEM; -+ } -+ -+ sp = page_address(page); -+ xco = &(sp->xco); -+ xco->type = XEN_TYPE_CDROM_OPEN; -+ xco->payload_offset = sizeof(struct xen_cdrom_open); -+ strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); -+ -+ submit_message(info,sp); -+ -+ if (xco->ret) { -+ ret = xco->err; -+ goto out; -+ } -+ -+ if (xco->media_present) -+ set_capacity(cdi->disk, xco->sectors); -+ -+out: -+ __free_page(page); -+ return ret; -+} -+ -+static void xencdrom_release(struct cdrom_device_info *cdi) -+{ -+} -+ -+static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) -+{ -+ int ret; -+ struct page *page; -+ struct blkfront_info *info; -+ union xen_block_packet *sp; -+ struct xen_cdrom_media_changed *xcmc; -+ -+ info = cdi->disk->private_data; -+ -+ page = alloc_page(GFP_NOIO|__GFP_ZERO); -+ if (!page) { -+ pr_crit("%s() Unable to allocate page\n", __func__); -+ return -ENOMEM; -+ } -+ -+ sp = page_address(page); -+ xcmc = &(sp->xcmc); -+ xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; -+ submit_message(info,sp); -+ ret = xcmc->media_changed; -+ -+ __free_page(page); -+ -+ return ret; -+} -+ -+static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) -+{ -+ int ret; -+ struct packet_command cgc; -+ struct blkfront_info *info; -+ -+ info = cdi->disk->private_data; -+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); -+ cgc.cmd[0] = GPCMD_START_STOP_UNIT; -+ if (position) -+ cgc.cmd[4] = 2; -+ else -+ cgc.cmd[4] = 3; -+ ret = submit_cdrom_cmd(info, &cgc); -+ return ret; -+} -+ -+static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) -+{ -+ int ret = 0; -+ struct blkfront_info *info; -+ struct packet_command cgc; -+ -+ info = cdi->disk->private_data; -+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); -+ cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; -+ cgc.cmd[4] = lock; -+ ret = submit_cdrom_cmd(info, &cgc); -+ return ret; -+} -+ -+static int xencdrom_packet(struct cdrom_device_info *cdi, -+ struct packet_command *cgc) -+{ -+ int ret = -EIO; -+ struct blkfront_info *info; -+ -+ info = cdi->disk->private_data; -+ ret = submit_cdrom_cmd(info, cgc); -+ cgc->stat = ret; -+ return ret; -+} -+ -+static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, -+ void *arg) -+{ -+ return -EINVAL; -+} -+ -+/* Query backend to see if CDROM packets are supported */ -+static int xencdrom_supported(struct blkfront_info *info) -+{ -+ struct page *page; -+ union xen_block_packet *sp; -+ struct xen_cdrom_support *xcs; -+ -+ page = alloc_page(GFP_NOIO|__GFP_ZERO); -+ if (!page) { -+ pr_crit("%s() Unable to allocate page\n", __func__); -+ return -ENOMEM; -+ } -+ -+ sp = page_address(page); -+ xcs = &(sp->xcs); -+ xcs->type = XEN_TYPE_CDROM_SUPPORT; -+ submit_message(info,sp); -+ return xcs->supported; -+} -+ -+static struct cdrom_device_ops xencdrom_dops = { -+ .open = xencdrom_open, -+ .release = xencdrom_release, -+ .media_changed = xencdrom_media_changed, -+ .tray_move = xencdrom_tray_move, -+ .lock_door = xencdrom_lock_door, -+ .generic_packet = xencdrom_packet, -+ .audio_ioctl = xencdrom_audio_ioctl, -+ .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ -+ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ -+ CDC_CD_R), -+ .n_minors = 1, -+}; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -+static int xencdrom_block_open(struct inode *inode, struct file *file) -+{ -+ struct block_device *bd = inode->i_bdev; -+#else -+static int xencdrom_block_open(struct block_device *bd, fmode_t mode) -+{ -+#endif -+ struct blkfront_info *info = bd->bd_disk->private_data; -+ struct vcd_disk *vcd; -+ int ret = 0; -+ -+ if (!info->xbdev) -+ return -ENODEV; -+ -+ if ((vcd = xencdrom_get_list_entry(info->gd))) { -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -+ ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); -+#else -+ ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); -+#endif -+ info->users = vcd->vcd_cdrom_info.use_count; -+ spin_unlock(&vcd->vcd_cdrom_info_lock); -+ } -+ return ret; -+} -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -+static int xencdrom_block_release(struct inode *inode, struct file *file) -+{ -+ struct gendisk *gd = inode->i_bdev->bd_disk; -+#else -+static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) -+{ -+#endif -+ struct blkfront_info *info = gd->private_data; -+ struct vcd_disk *vcd; -+ int ret = 0; -+ -+ if ((vcd = xencdrom_get_list_entry(info->gd))) { -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -+ ret = cdrom_release(&vcd->vcd_cdrom_info, file); -+#else -+ cdrom_release(&vcd->vcd_cdrom_info, mode); -+#endif -+ spin_unlock(&vcd->vcd_cdrom_info_lock); -+ if (vcd->vcd_cdrom_info.use_count == 0) { -+ info->users = 1; -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -+ blkif_release(inode, file); -+#else -+ blkif_release(gd, mode); -+#endif -+ } -+ } -+ return ret; -+} -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -+static int xencdrom_block_ioctl(struct inode *inode, struct file *file, -+ unsigned cmd, unsigned long arg) -+{ -+ struct block_device *bd = inode->i_bdev; -+#else -+static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, -+ unsigned cmd, unsigned long arg) -+{ -+#endif -+ struct blkfront_info *info = bd->bd_disk->private_data; -+ struct vcd_disk *vcd; -+ int ret = 0; -+ -+ if (!(vcd = xencdrom_get_list_entry(info->gd))) -+ goto out; -+ -+ switch (cmd) { -+ case 2285: /* SG_IO */ -+ ret = -ENOSYS; -+ break; -+ case CDROMEJECT: -+ ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); -+ break; -+ case CDROMCLOSETRAY: -+ ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); -+ break; -+ case CDROM_GET_CAPABILITY: -+ ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; -+ break; -+ case CDROM_SET_OPTIONS: -+ ret = vcd->vcd_cdrom_info.options; -+ break; -+ case CDROM_SEND_PACKET: -+ ret = submit_cdrom_cmd(info, (struct packet_command *)arg); -+ break; -+ default: -+out: -+ spin_unlock(&vcd->vcd_cdrom_info_lock); -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -+ return blkif_ioctl(inode, file, cmd, arg); -+#else -+ return blkif_ioctl(bd, mode, cmd, arg); -+#endif -+ } -+ spin_unlock(&vcd->vcd_cdrom_info_lock); -+ return ret; -+} -+ -+/* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ -+static int xencdrom_block_media_changed(struct gendisk *disk) -+{ -+ struct vcd_disk *vcd; -+ struct vcd_disk *ret_vcd = NULL; -+ int ret = 0; -+ -+ spin_lock(&vcd_disks_lock); -+ list_for_each_entry(vcd, &vcd_disks, vcd_entry) { -+ if (vcd->vcd_cdrom_info.disk == disk) { -+ ret_vcd = vcd; -+ break; -+ } -+ } -+ spin_unlock(&vcd_disks_lock); -+ if (ret_vcd) { -+ ret = cdrom_media_changed(&ret_vcd->vcd_cdrom_info); -+ } -+ return ret; -+} -+ -+static const struct block_device_operations xencdrom_bdops = -+{ -+ .owner = THIS_MODULE, -+ .open = xencdrom_block_open, -+ .release = xencdrom_block_release, -+ .ioctl = xencdrom_block_ioctl, -+ .media_changed = xencdrom_block_media_changed, -+}; -+ -+void register_vcd(struct blkfront_info *info) -+{ -+ struct gendisk *gd = info->gd; -+ struct vcd_disk *vcd; -+ -+ /* Make sure this is for a CD device */ -+ if (!(gd->flags & GENHD_FL_CD)) -+ goto out; -+ -+ /* Make sure we have backend support */ -+ if (!xencdrom_supported(info)) { -+ goto out; -+ } -+ -+ /* Create new vcd_disk and fill in cdrom_info */ -+ vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL); -+ if (!vcd) { -+ pr_info("%s(): Unable to allocate vcd struct!\n", __func__); -+ goto out; -+ } -+ spin_lock_init(&vcd->vcd_cdrom_info_lock); -+ -+ vcd->vcd_cdrom_info.ops = &xencdrom_dops; -+ vcd->vcd_cdrom_info.speed = 4; -+ vcd->vcd_cdrom_info.capacity = 1; -+ vcd->vcd_cdrom_info.options = 0; -+ strcpy(vcd->vcd_cdrom_info.name, gd->disk_name); -+ vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | -+ CDC_SELECT_DISC | CDC_SELECT_SPEED | -+ CDC_MRW | CDC_MRW_W | CDC_RAM); -+ -+ if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { -+ pr_warning("%s() Cannot register blkdev as a cdrom %d!\n", -+ __func__, gd->major); -+ goto err_out; -+ } -+ gd->fops = &xencdrom_bdops; -+ vcd->vcd_cdrom_info.disk = gd; -+ -+ spin_lock(&vcd_disks_lock); -+ list_add(&(vcd->vcd_entry), &vcd_disks); -+ spin_unlock(&vcd_disks_lock); -+out: -+ return; -+err_out: -+ kfree(vcd); -+} -+ -+void unregister_vcd(struct blkfront_info *info) { -+ struct gendisk *gd = info->gd; -+ struct vcd_disk *vcd; -+ -+ spin_lock(&vcd_disks_lock); -+ list_for_each_entry(vcd, &vcd_disks, vcd_entry) { -+ if (vcd->vcd_cdrom_info.disk == gd) { -+ spin_lock(&vcd->vcd_cdrom_info_lock); -+ unregister_cdrom(&vcd->vcd_cdrom_info); -+ list_del(&vcd->vcd_entry); -+ spin_unlock(&vcd->vcd_cdrom_info_lock); -+ kfree(vcd); -+ break; -+ } -+ } -+ spin_unlock(&vcd_disks_lock); -+} -+ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2010-12-06/include/xen/interface/io/cdromif.h 2010-11-23 15:06:54.000000000 +0100 -@@ -0,0 +1,120 @@ -+/****************************************************************************** -+ * cdromif.h -+ * -+ * Shared definitions between backend driver and Xen guest Virtual CDROM -+ * block device. -+ * -+ * Copyright (c) 2008, Pat Campell plc@novell.com -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this source file (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, copy, modify, -+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, -+ * and to permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_IO_CDROMIF_H__ -+#define __XEN_PUBLIC_IO_CDROMIF_H__ -+ -+/* -+ * Queries backend for CDROM support -+ */ -+#define XEN_TYPE_CDROM_SUPPORT _IO('c', 1) -+ -+struct xen_cdrom_support -+{ -+ uint32_t type; -+ int8_t ret; /* returned, 0 succeded, -1 error */ -+ int8_t err; /* returned, backend errno */ -+ int8_t supported; /* returned, 1 supported */ -+}; -+ -+/* -+ * Opens backend device, returns drive geometry or -+ * any encountered errors -+ */ -+#define XEN_TYPE_CDROM_OPEN _IO('c', 2) -+ -+struct xen_cdrom_open -+{ -+ uint32_t type; -+ int8_t ret; -+ int8_t err; -+ int8_t pad; -+ int8_t media_present; /* returned */ -+ uint32_t sectors; /* returned */ -+ uint32_t sector_size; /* returned */ -+ int32_t payload_offset; /* offset to backend node name payload */ -+}; -+ -+/* -+ * Queries backend for media changed status -+ */ -+#define XEN_TYPE_CDROM_MEDIA_CHANGED _IO('c', 3) -+ -+struct xen_cdrom_media_changed -+{ -+ uint32_t type; -+ int8_t ret; -+ int8_t err; -+ int8_t media_changed; /* returned */ -+}; -+ -+/* -+ * Sends vcd generic CDROM packet to backend, followed -+ * immediately by the vcd_generic_command payload -+ */ -+#define XEN_TYPE_CDROM_PACKET _IO('c', 4) -+ -+struct xen_cdrom_packet -+{ -+ uint32_t type; -+ int8_t ret; -+ int8_t err; -+ int8_t pad[2]; -+ int32_t payload_offset; /* offset to vcd_generic_command payload */ -+}; -+ -+/* CDROM_PACKET_COMMAND, payload for XEN_TYPE_CDROM_PACKET */ -+struct vcd_generic_command -+{ -+ uint8_t cmd[CDROM_PACKET_SIZE]; -+ uint8_t pad[4]; -+ uint32_t buffer_offset; -+ uint32_t buflen; -+ int32_t stat; -+ uint32_t sense_offset; -+ uint8_t data_direction; -+ uint8_t pad1[3]; -+ int32_t quiet; -+ int32_t timeout; -+}; -+ -+union xen_block_packet -+{ -+ uint32_t type; -+ struct xen_cdrom_support xcs; -+ struct xen_cdrom_open xco; -+ struct xen_cdrom_media_changed xcmc; -+ struct xen_cdrom_packet xcp; -+}; -+ -+#define PACKET_PAYLOAD_OFFSET (sizeof(struct xen_cdrom_packet)) -+#define PACKET_SENSE_OFFSET (PACKET_PAYLOAD_OFFSET + sizeof(struct vcd_generic_command)) -+#define PACKET_BUFFER_OFFSET (PACKET_SENSE_OFFSET + sizeof(struct request_sense)) -+#define MAX_PACKET_DATA (PAGE_SIZE - sizeof(struct xen_cdrom_packet) - \ -+ sizeof(struct vcd_generic_command) - sizeof(struct request_sense)) -+ -+#endif diff --git a/patches.xen/xen-blkif-protocol-fallback-hack b/patches.xen/xen-blkif-protocol-fallback-hack deleted file mode 100644 index 2fa8016..0000000 --- a/patches.xen/xen-blkif-protocol-fallback-hack +++ /dev/null @@ -1,225 +0,0 @@ -Subject: 32-on-64 blkif protocol negotiation fallback for old guests. -From: kraxel@suse.de -References: 244055 -Patch-mainline: never. - -See the comment below. Oh well. - ---- head-2011-02-17.orig/drivers/xen/Kconfig 2011-02-02 17:03:22.000000000 +0100 -+++ head-2011-02-17/drivers/xen/Kconfig 2011-02-24 15:23:15.000000000 +0100 -@@ -26,6 +26,9 @@ config XEN_PRIVCMD - def_bool y - depends on PROC_FS - -+config XEN_DOMCTL -+ tristate -+ - config XEN_XENBUS_DEV - def_bool y - depends on PROC_FS -@@ -45,6 +48,7 @@ config XEN_BLKDEV_BACKEND - tristate "Block-device backend driver" - depends on BLOCK && XEN_BACKEND - default XEN_BACKEND -+ select XEN_DOMCTL - help - The block-device backend driver allows the kernel to export its - block devices to other guests via a high-performance shared-memory -@@ -54,6 +58,7 @@ config XEN_BLKDEV_TAP - tristate "Block-device tap backend driver" - depends on BLOCK && XEN_BACKEND - default XEN_BACKEND -+ select XEN_DOMCTL - help - The block tap driver is an alternative to the block back driver - and allows VM block requests to be redirected to userspace through ---- head-2011-02-17.orig/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:03.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blkback/xenbus.c 2010-11-25 10:57:11.000000000 +0100 -@@ -21,6 +21,7 @@ - #include - #include - #include "common.h" -+#include "../core/domctl.h" - - #undef DPRINTK - #define DPRINTK(fmt, args...) \ -@@ -498,8 +499,10 @@ static int connect_ring(struct backend_i - be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; - err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", - "%63s", protocol, NULL); -- if (err) -- strcpy(protocol, "unspecified, assuming native"); -+ if (err) { -+ strcpy(protocol, "unspecified"); -+ be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid); -+ } - else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; - else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) ---- head-2011-02-17.orig/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:00.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:14.000000000 +0100 -@@ -39,6 +39,7 @@ - #include - #include - #include "common.h" -+#include "../core/domctl.h" - - - struct backend_info -@@ -449,8 +450,10 @@ static int connect_ring(struct backend_i - be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; - err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", - "%63s", protocol, NULL); -- if (err) -- strcpy(protocol, "unspecified, assuming native"); -+ if (err) { -+ strcpy(protocol, "unspecified"); -+ be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid); -+ } - else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; - else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) ---- head-2011-02-17.orig/drivers/xen/core/Makefile 2011-02-01 14:44:12.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/Makefile 2010-04-19 14:55:02.000000000 +0200 -@@ -12,3 +12,6 @@ obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o - obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o - obj-$(CONFIG_SMP) += spinlock.o - obj-$(CONFIG_KEXEC) += machine_kexec.o -+obj-$(CONFIG_XEN_DOMCTL) += domctl.o -+CFLAGS_domctl.o := -D__XEN_PUBLIC_XEN_H__ -D__XEN_PUBLIC_GRANT_TABLE_H__ -+CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-02-17/drivers/xen/core/domctl.c 2010-10-11 10:31:06.000000000 +0200 -@@ -0,0 +1,127 @@ -+/* -+ * !!! dirty hack alert !!! -+ * -+ * Problem: old guests kernels don't have a "protocol" node -+ * in the frontend xenstore directory, so mixing -+ * 32 and 64bit domains doesn't work. -+ * -+ * Upstream plans to solve this in the tools, by letting them -+ * create a protocol node. Which certainly makes sense. -+ * But it isn't trivial and isn't done yet. Too bad. -+ * -+ * So for the time being we use the get_address_size domctl -+ * hypercall for a pretty good guess. Not nice as the domctl -+ * hypercall isn't supposed to be used by the kernel. Because -+ * we don't want to have dependencies between dom0 kernel and -+ * xen kernel versions. Now we have one. Ouch. -+ */ -+#undef __XEN_PUBLIC_XEN_H__ -+#undef __XEN_PUBLIC_GRANT_TABLE_H__ -+#undef __XEN_TOOLS__ -+#include -+#include -+#include -+#include -+ -+#include "domctl.h" -+ -+/* stuff copied from xen/interface/domctl.h, which we can't -+ * include directly for the reasons outlined above .... */ -+ -+typedef struct xen_domctl_address_size { -+ uint32_t size; -+} xen_domctl_address_size_t; -+ -+typedef __attribute__((aligned(8))) uint64_t uint64_aligned_t; -+ -+union xen_domctl { -+ /* v4: sle10 sp1: xen 3.0.4 + 32-on-64 patches */ -+ struct { -+ uint32_t cmd; -+ uint32_t interface_version; -+ domid_t domain; -+ union { -+ /* left out lots of other struct xen_domctl_foobar */ -+ struct xen_domctl_address_size address_size; -+ uint64_t dummy_align; -+ uint8_t dummy_pad[128]; -+ }; -+ } v4; -+ -+ /* -+ * v5: upstream: xen 3.1 -+ * v6: upstream: xen 4.0 -+ * v7: sle11 sp1: xen 4.0 + cpupools patches -+ */ -+ struct { -+ uint32_t cmd; -+ uint32_t interface_version; -+ domid_t domain; -+ union { -+ struct xen_domctl_address_size address_size; -+ uint64_aligned_t dummy_align; -+ uint8_t dummy_pad[128]; -+ }; -+ } v5, v6, v7; -+}; -+ -+/* The actual code comes here */ -+ -+static inline int hypervisor_domctl(void *domctl) -+{ -+ return _hypercall1(int, domctl, domctl); -+} -+ -+int xen_guest_address_size(int domid) -+{ -+ union xen_domctl domctl; -+ int low, ret; -+ -+#define guest_address_size(ver) do { \ -+ memset(&domctl, 0, sizeof(domctl)); \ -+ domctl.v##ver.cmd = XEN_DOMCTL_get_address_size; \ -+ domctl.v##ver.interface_version = low = ver; \ -+ domctl.v##ver.domain = domid; \ -+ ret = hypervisor_domctl(&domctl) ?: domctl.v##ver.address_size.size; \ -+ if (ret == 32 || ret == 64) { \ -+ pr_info("v" #ver " domctl worked ok: dom%d is %d-bit\n",\ -+ domid, ret); \ -+ return ret; \ -+ } \ -+} while (0) -+ -+ BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 7); -+ guest_address_size(7); -+#if CONFIG_XEN_COMPAT < 0x040100 -+ guest_address_size(6); -+#endif -+#if CONFIG_XEN_COMPAT < 0x040000 -+ guest_address_size(5); -+#endif -+#if CONFIG_XEN_COMPAT < 0x030100 -+ guest_address_size(4); -+#endif -+ -+ ret = BITS_PER_LONG; -+ pr_warning("v%d...%d domctls failed, assuming dom%d is native: %d\n", -+ low, XEN_DOMCTL_INTERFACE_VERSION, domid, ret); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(xen_guest_address_size); -+ -+int xen_guest_blkif_protocol(int domid) -+{ -+ int address_size = xen_guest_address_size(domid); -+ -+ if (address_size == BITS_PER_LONG) -+ return BLKIF_PROTOCOL_NATIVE; -+ if (address_size == 32) -+ return BLKIF_PROTOCOL_X86_32; -+ if (address_size == 64) -+ return BLKIF_PROTOCOL_X86_64; -+ return BLKIF_PROTOCOL_NATIVE; -+} -+EXPORT_SYMBOL_GPL(xen_guest_blkif_protocol); -+ -+MODULE_LICENSE("GPL"); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-02-17/drivers/xen/core/domctl.h 2010-03-25 14:37:59.000000000 +0100 -@@ -0,0 +1,2 @@ -+int xen_guest_address_size(int domid); -+int xen_guest_blkif_protocol(int domid); diff --git a/patches.xen/xen-blktap-modular b/patches.xen/xen-blktap-modular deleted file mode 100644 index 7c39b0e..0000000 --- a/patches.xen/xen-blktap-modular +++ /dev/null @@ -1,47 +0,0 @@ -From: ccoffing@novell.com -Subject: Retain backwards-compatible module name with CONFIG_XEN_BLKDEV_TAP=m -Patch-mainline: obsolete - ---- head-2011-02-17.orig/drivers/xen/blktap/Makefile 2007-06-12 13:13:44.000000000 +0200 -+++ head-2011-02-17/drivers/xen/blktap/Makefile 2009-05-29 12:39:04.000000000 +0200 -@@ -1,5 +1,5 @@ - LINUXINCLUDE += -I../xen/include/public/io - --obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o -+obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o - --xenblktap-y := xenbus.o interface.o blktap.o -+blktap-y := xenbus.o interface.o blocktap.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-02-17/drivers/xen/blktap/blocktap.c 2009-05-29 12:39:04.000000000 +0200 -@@ -0,0 +1 @@ -+#include "blktap.c" ---- head-2011-02-17.orig/drivers/xen/blktap2/Makefile 2009-05-29 10:25:53.000000000 +0200 -+++ head-2011-02-17/drivers/xen/blktap2/Makefile 2009-05-29 12:39:04.000000000 +0200 -@@ -1,3 +1,4 @@ --obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap.o -+obj-$(CONFIG_XEN_BLKDEV_TAP2) := blktap2.o - --blktap-objs := control.o ring.o wait_queue.o device.o request.o sysfs.o -+blktap2-y := control.o ring.o wait_queue.o device.o request.o -+blktap2-$(CONFIG_SYSFS) += sysfs.o ---- head-2011-02-17.orig/drivers/xen/blktap2/blktap.h 2011-02-01 15:03:03.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blktap2/blktap.h 2011-02-24 15:24:27.000000000 +0100 -@@ -216,10 +216,17 @@ int blktap_ring_pause(struct blktap *); - int blktap_ring_resume(struct blktap *); - void blktap_ring_kick_user(struct blktap *); - -+#ifdef CONFIG_SYSFS - int blktap_sysfs_init(void); - void blktap_sysfs_free(void); - int blktap_sysfs_create(struct blktap *); - int blktap_sysfs_destroy(struct blktap *); -+#else -+static inline int blktap_sysfs_init(void) { return 0; } -+static inline void blktap_sysfs_exit(void) {} -+static inline int blktap_sysfs_create(struct blktap *tapdev) { return 0; } -+static inline int blktap_sysfs_destroy(struct blktap *tapdev) { return 0; } -+#endif - - int blktap_device_init(int *); - void blktap_device_free(void); diff --git a/patches.xen/xen-blktap-write-barriers b/patches.xen/xen-blktap-write-barriers deleted file mode 100644 index 9f7a6fc..0000000 --- a/patches.xen/xen-blktap-write-barriers +++ /dev/null @@ -1,79 +0,0 @@ -From: kwolf@suse.de -Subject: blktap: Write Barriers -Patch-mainline: obsolete - ---- head-2011-02-17.orig/drivers/xen/blktap/blktap.c 2011-02-28 14:15:27.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blktap/blktap.c 2011-02-28 14:21:49.000000000 +0100 -@@ -1374,6 +1374,9 @@ static int do_block_io_op(blkif_t *blkif - dispatch_rw_block_io(blkif, &req, pending_req); - break; - -+ case BLKIF_OP_WRITE_BARRIER: -+ /* TODO Some counter? */ -+ /* Fall through */ - case BLKIF_OP_WRITE: - blkif->st_wr_req++; - dispatch_rw_block_io(blkif, &req, pending_req); -@@ -1432,7 +1435,7 @@ static void dispatch_rw_block_io(blkif_t - - /* Check that number of segments is sane. */ - nseg = req->nr_segments; -- if ( unlikely(nseg == 0) || -+ if (unlikely(nseg == 0 && req->operation != BLKIF_OP_WRITE_BARRIER) || - unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) { - WPRINTK("Bad number of segments in request (%d)\n", nseg); - goto fail_response; -@@ -1458,8 +1461,13 @@ static void dispatch_rw_block_io(blkif_t - pending_req->nr_pages = nseg; - - flags = GNTMAP_host_map; -- if (req->operation == BLKIF_OP_WRITE) -+ switch (req->operation) { -+ case BLKIF_OP_WRITE: -+ case BLKIF_OP_WRITE_BARRIER: - flags |= GNTMAP_readonly; -+ break; -+ } -+ - op = 0; - mm = info->mm; - if (!xen_feature(XENFEAT_auto_translated_physmap)) -@@ -1622,6 +1630,7 @@ static void dispatch_rw_block_io(blkif_t - blkif->st_rd_sect += nr_sects; - break; - case BLKIF_OP_WRITE: -+ case BLKIF_OP_WRITE_BARRIER: - blkif->st_wr_sect += nr_sects; - break; - } ---- head-2011-02-17.orig/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:14.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blktap/xenbus.c 2010-11-25 10:57:21.000000000 +0100 -@@ -418,7 +418,28 @@ static void connect(struct backend_info - int err; - - struct xenbus_device *dev = be->dev; -+ struct xenbus_transaction xbt; - -+ /* Write feature-barrier to xenstore */ -+again: -+ err = xenbus_transaction_start(&xbt); -+ if (err) { -+ xenbus_dev_fatal(dev, err, "starting transaction"); -+ return; -+ } -+ -+ err = xenbus_printf(xbt, dev->nodename, "feature-barrier", "1"); -+ if (err) { -+ xenbus_dev_fatal(dev, err, "writing feature-barrier"); -+ xenbus_transaction_end(xbt, 1); -+ return; -+ } -+ -+ err = xenbus_transaction_end(xbt, 0); -+ if (err == -EAGAIN) -+ goto again; -+ -+ /* Switch state */ - err = xenbus_switch_state(dev, XenbusStateConnected); - if (err) - xenbus_dev_fatal(dev, err, "switching to Connected state", diff --git a/patches.xen/xen-blktap2-use-after-free b/patches.xen/xen-blktap2-use-after-free deleted file mode 100644 index ed6d01c..0000000 --- a/patches.xen/xen-blktap2-use-after-free +++ /dev/null @@ -1,27 +0,0 @@ -From: Dominic Curran -Subject: blktap: Fix reference to freed struct request -Patch-mainline: tbd - -The request will be freed by the call to __blktap_end_rq(), so rq->q -is invalid before spin_unlock_irq(). - -Signed-off-by: Dominic Curran -Acked-by: Daniel Stodden -Acked-by: jbeulich@novell.com - ---- head-2011-03-11.orig/drivers/xen/blktap2-new/device.c 2011-02-24 16:31:17.000000000 +0100 -+++ head-2011-03-11/drivers/xen/blktap2-new/device.c 2011-03-11 00:00:00.000000000 +0100 -@@ -135,9 +135,11 @@ __blktap_end_rq(struct request *rq, int - static inline void - blktap_end_rq(struct request *rq, int err) - { -- spin_lock_irq(rq->q->queue_lock); -+ struct request_queue *q = rq->q; -+ -+ spin_lock_irq(q->queue_lock); - __blktap_end_rq(rq, err); -- spin_unlock_irq(rq->q->queue_lock); -+ spin_unlock_irq(q->queue_lock); - } - - void diff --git a/patches.xen/xen-block-backends-cleanup b/patches.xen/xen-block-backends-cleanup deleted file mode 100644 index 8a29c7c..0000000 --- a/patches.xen/xen-block-backends-cleanup +++ /dev/null @@ -1,242 +0,0 @@ -From: jbeulich@novell.com -Subject: cleanup to blkback and blktap -Patch-mainline: n/a - -Remove unused/unneccessary fields of their pending_req_t structures, -and reduce the width of those structures' nr_pages field. - -Move loop-invariant grant table flags calculation out of loops (also -in scsiback). - ---- head-2011-02-17.orig/drivers/xen/blkback/blkback.c 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blkback/blkback.c 2011-02-28 14:15:32.000000000 +0100 -@@ -73,10 +73,9 @@ module_param(debug_lvl, int, 0644); - typedef struct { - blkif_t *blkif; - u64 id; -- int nr_pages; - atomic_t pendcnt; -+ unsigned short nr_pages; - unsigned short operation; -- int status; - struct list_head free_list; - } pending_req_t; - -@@ -255,22 +254,24 @@ int blkif_schedule(void *arg) - - static void __end_block_io_op(pending_req_t *pending_req, int error) - { -+ int status = BLKIF_RSP_OKAY; -+ - /* An error fails the entire request. */ - if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && - (error == -EOPNOTSUPP)) { - DPRINTK("blkback: write barrier op failed, not supported\n"); - blkback_barrier(XBT_NIL, pending_req->blkif->be, 0); -- pending_req->status = BLKIF_RSP_EOPNOTSUPP; -+ status = BLKIF_RSP_EOPNOTSUPP; - } else if (error) { - DPRINTK("Buffer not up-to-date at end of operation, " - "error=%d\n", error); -- pending_req->status = BLKIF_RSP_ERROR; -+ status = BLKIF_RSP_ERROR; - } - - if (atomic_dec_and_test(&pending_req->pendcnt)) { - fast_flush_area(pending_req); - make_response(pending_req->blkif, pending_req->id, -- pending_req->operation, pending_req->status); -+ pending_req->operation, status); - blkif_put(pending_req->blkif); - free_req(pending_req); - } -@@ -387,7 +388,6 @@ static void dispatch_rw_block_io(blkif_t - blkif_request_t *req, - pending_req_t *pending_req) - { -- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); - struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - struct phys_req preq; - struct { -@@ -395,6 +395,7 @@ static void dispatch_rw_block_io(blkif_t - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - unsigned int nseg; - struct bio *bio = NULL; -+ uint32_t flags; - int ret, i; - int operation; - -@@ -428,12 +429,13 @@ static void dispatch_rw_block_io(blkif_t - pending_req->blkif = blkif; - pending_req->id = req->id; - pending_req->operation = req->operation; -- pending_req->status = BLKIF_RSP_OKAY; - pending_req->nr_pages = nseg; - -- for (i = 0; i < nseg; i++) { -- uint32_t flags; -+ flags = GNTMAP_host_map; -+ if (operation != READ) -+ flags |= GNTMAP_readonly; - -+ for (i = 0; i < nseg; i++) { - seg[i].nsec = req->seg[i].last_sect - - req->seg[i].first_sect + 1; - -@@ -442,9 +444,6 @@ static void dispatch_rw_block_io(blkif_t - goto fail_response; - preq.nr_sects += seg[i].nsec; - -- flags = GNTMAP_host_map; -- if (operation != READ) -- flags |= GNTMAP_readonly; - gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, - req->seg[i].gref, blkif->domid); - } ---- head-2011-02-17.orig/drivers/xen/blktap/blktap.c 2011-02-17 10:19:26.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blktap/blktap.c 2011-02-28 14:15:27.000000000 +0100 -@@ -134,20 +134,14 @@ module_param(debug_lvl, int, 0644); - - /* - * Each outstanding request that we've passed to the lower device layers has a -- * 'pending_req' allocated to it. Each buffer_head that completes decrements -- * the pendcnt towards zero. When it hits zero, the specified domain has a -- * response queued for it, with the saved 'id' passed back. -+ * 'pending_req' allocated to it. - */ - typedef struct { - blkif_t *blkif; - u64 id; - unsigned short mem_idx; -- int nr_pages; -- atomic_t pendcnt; -- unsigned short operation; -- int status; -+ unsigned short nr_pages; - struct list_head free_list; -- int inuse; - } pending_req_t; - - static pending_req_t *pending_reqs[MAX_PENDING_REQS]; -@@ -994,10 +988,8 @@ static pending_req_t* alloc_req(void) - list_del(&req->free_list); - } - -- if (req) { -- req->inuse = 1; -+ if (req) - alloc_pending_reqs++; -- } - spin_unlock_irqrestore(&pending_free_lock, flags); - - return req; -@@ -1011,7 +1003,6 @@ static void free_req(pending_req_t *req) - spin_lock_irqsave(&pending_free_lock, flags); - - alloc_pending_reqs--; -- req->inuse = 0; - if (mmap_lock && (req->mem_idx == mmap_alloc-1)) { - mmap_inuse--; - if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1); -@@ -1413,16 +1404,15 @@ static void dispatch_rw_block_io(blkif_t - blkif_request_t *req, - pending_req_t *pending_req) - { -- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); -- int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; - struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; - unsigned int nseg; -- int ret, i, nr_sects = 0; -+ int ret, i, op, nr_sects = 0; - tap_blkif_t *info; - blkif_request_t *target; - unsigned int mmap_idx = pending_req->mem_idx; - unsigned int pending_idx = RTN_PEND_IDX(pending_req, mmap_idx); - unsigned int usr_idx; -+ uint32_t flags; - struct mm_struct *mm; - struct vm_area_struct *vma = NULL; - -@@ -1465,9 +1455,11 @@ static void dispatch_rw_block_io(blkif_t - - pending_req->blkif = blkif; - pending_req->id = req->id; -- pending_req->operation = operation; -- pending_req->status = BLKIF_RSP_OKAY; - pending_req->nr_pages = nseg; -+ -+ flags = GNTMAP_host_map; -+ if (req->operation == BLKIF_OP_WRITE) -+ flags |= GNTMAP_readonly; - op = 0; - mm = info->mm; - if (!xen_feature(XENFEAT_auto_translated_physmap)) -@@ -1476,14 +1468,10 @@ static void dispatch_rw_block_io(blkif_t - unsigned long uvaddr; - unsigned long kvaddr; - uint64_t ptep; -- uint32_t flags; - - uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i); - kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i); - -- flags = GNTMAP_host_map; -- if (operation == WRITE) -- flags |= GNTMAP_readonly; - gnttab_set_map_op(&map[op], kvaddr, flags, - req->seg[i].gref, blkif->domid); - op++; -@@ -1497,11 +1485,9 @@ static void dispatch_rw_block_io(blkif_t - goto fail_flush; - } - -- flags = GNTMAP_host_map | GNTMAP_application_map -- | GNTMAP_contains_pte; -- if (operation == WRITE) -- flags |= GNTMAP_readonly; -- gnttab_set_map_op(&map[op], ptep, flags, -+ gnttab_set_map_op(&map[op], ptep, -+ flags | GNTMAP_application_map -+ | GNTMAP_contains_pte, - req->seg[i].gref, blkif->domid); - op++; - } -@@ -1631,10 +1617,14 @@ static void dispatch_rw_block_io(blkif_t - wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */ - info->ufe_ring.req_prod_pvt++; - -- if (operation == READ) -+ switch (req->operation) { -+ case BLKIF_OP_READ: - blkif->st_rd_sect += nr_sects; -- else if (operation == WRITE) -+ break; -+ case BLKIF_OP_WRITE: - blkif->st_wr_sect += nr_sects; -+ break; -+ } - - return; - ---- head-2011-02-17.orig/drivers/xen/scsiback/scsiback.c 2011-02-01 15:04:27.000000000 +0100 -+++ head-2011-02-17/drivers/xen/scsiback/scsiback.c 2011-02-28 14:51:23.000000000 +0100 -@@ -272,14 +272,14 @@ static int scsiback_gnttab_data_map(vscs - - sg_init_table(pending_req->sgl, nr_segments); - -- for (i = 0; i < nr_segments; i++) { -- flags = GNTMAP_host_map; -- if (write) -- flags |= GNTMAP_readonly; -+ flags = GNTMAP_host_map; -+ if (write) -+ flags |= GNTMAP_readonly; -+ -+ for (i = 0; i < nr_segments; i++) - gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, - ring_req->seg[i].gref, - info->domid); -- } - - err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments); - BUG_ON(err); diff --git a/patches.xen/xen-clockevents b/patches.xen/xen-clockevents deleted file mode 100644 index 25652ae..0000000 --- a/patches.xen/xen-clockevents +++ /dev/null @@ -1,1015 +0,0 @@ -From: jbeulich@novell.com -Subject: replace Xen's custom time handling with such using GENERIC_CLOCKEVENTS infrastructure -Patch-mainline: n/a - -Once validated this could be merged into the 2.6.?? patch. - ---- head-2011-03-11.orig/arch/x86/Kconfig 2011-02-01 16:43:32.000000000 +0100 -+++ head-2011-03-11/arch/x86/Kconfig 2011-02-02 15:09:52.000000000 +0100 -@@ -90,7 +90,6 @@ config CLOCKSOURCE_WATCHDOG - - config GENERIC_CLOCKEVENTS - def_bool y -- depends on !XEN - - config GENERIC_CLOCKEVENTS_BROADCAST - def_bool y ---- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-01 15:03:10.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-02 15:09:52.000000000 +0100 -@@ -74,7 +74,6 @@ extern start_info_t *xen_start_info; - #define init_hypervisor_platform() init_hypervisor(&boot_cpu_data) - - DECLARE_PER_CPU(struct vcpu_runstate_info, runstate); --struct vcpu_runstate_info *setup_runstate_area(unsigned int cpu); - #define vcpu_running(cpu) (per_cpu(runstate.state, cpu) == RUNSTATE_running) - - /* arch/xen/kernel/evtchn.c */ ---- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-02 15:09:52.000000000 +0100 -@@ -4,6 +4,8 @@ - #include - - #ifndef __ASSEMBLY__ -+#include -+#include - /* - * The use of 'barrier' in the following reflects their use as local-lock - * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following -@@ -43,10 +45,6 @@ do { \ - force_evtchn_callback(); \ - } while (0) - --void xen_safe_halt(void); -- --void xen_halt(void); -- - #define arch_local_save_flags() xen_save_fl() - - #define arch_local_irq_restore(flags) xen_restore_fl(flags) -@@ -59,19 +57,16 @@ void xen_halt(void); - * Used in the idle loop; sti takes one instruction cycle - * to complete: - */ --static inline void arch_safe_halt(void) --{ -- xen_safe_halt(); --} -+#define arch_safe_halt HYPERVISOR_block - - /* - * Used when interrupts are already enabled or to - * shutdown the processor: - */ --static inline void halt(void) --{ -- xen_halt(); --} -+#define halt() VOID(irqs_disabled() \ -+ ? HYPERVISOR_vcpu_op(VCPUOP_down, \ -+ smp_processor_id(), NULL) \ -+ : 0) - - /* - * For spinlocks, etc: ---- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2010-10-05 16:57:34.000000000 +0200 -+++ head-2011-03-11/arch/x86/kernel/time-xen.c 2011-02-02 15:09:52.000000000 +0100 -@@ -25,7 +25,7 @@ - #include - #include - --#include -+#include - #include - #include - -@@ -55,13 +55,7 @@ static DEFINE_PER_CPU(struct shadow_time - static struct timespec shadow_tv; - static u32 shadow_tv_version; - --/* Keep track of last time we did processing/updating of jiffies and xtime. */ --static u64 processed_system_time; /* System time (ns) at last processing. */ --static DEFINE_PER_CPU(u64, processed_system_time); -- --/* How much CPU time was spent blocked and how much was 'stolen'? */ --static DEFINE_PER_CPU(u64, processed_stolen_time); --static DEFINE_PER_CPU(u64, processed_blocked_time); -+static u64 jiffies_bias, system_time_bias; - - /* Current runstate of each CPU (updated automatically by the hypervisor). */ - DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); -@@ -69,16 +63,6 @@ DEFINE_PER_CPU(struct vcpu_runstate_info - /* Must be signed, as it's compared with s64 quantities which can be -ve. */ - #define NS_PER_TICK (1000000000LL/HZ) - --static struct vcpu_set_periodic_timer xen_set_periodic_tick = { -- .period_ns = NS_PER_TICK --}; -- --/* -- * GCC 4.3 can turn loops over an induction variable into division. We do -- * not support arbitrary 64-bit division, and so must break the induction. -- */ --#define clobber_induction_variable(v) asm ( "" : "+r" (v) ) -- - /* Does this guest OS track Xen time, or set its wall clock independently? */ - static int independent_wallclock = 0; - static int __init __independent_wallclock(char *str) -@@ -185,6 +169,11 @@ static u64 get_nsec_offset(struct shadow - return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); - } - -+static inline u64 processed_system_time(void) -+{ -+ return (jiffies_64 - jiffies_bias) * NS_PER_TICK + system_time_bias; -+} -+ - static void update_wallclock(void) - { - static DEFINE_MUTEX(uwc_mutex); -@@ -201,7 +190,7 @@ static void update_wallclock(void) - } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version)); - - if (!independent_wallclock) { -- u64 tmp = processed_system_time; -+ u64 tmp = processed_system_time(); - long nsec = do_div(tmp, NSEC_PER_SEC); - struct timespec tv; - -@@ -219,6 +208,13 @@ static void _update_wallclock(struct wor - } - static DECLARE_WORK(update_wallclock_work, _update_wallclock); - -+void xen_check_wallclock_update(void) -+{ -+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version -+ && keventd_up()) -+ schedule_work(&update_wallclock_work); -+} -+ - /* - * Reads a consistent set of time-base values from Xen, into a shadow data - * area. -@@ -285,7 +281,7 @@ static void sync_xen_wallclock(unsigned - op.cmd = XENPF_settime; - op.u.settime.secs = now.tv_sec; - op.u.settime.nsecs = now.tv_nsec; -- op.u.settime.system_time = processed_system_time; -+ op.u.settime.system_time = processed_system_time(); - WARN_ON(HYPERVISOR_platform_op(&op)); - - update_wallclock(); -@@ -294,7 +290,7 @@ static void sync_xen_wallclock(unsigned - mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ); - } - --static unsigned long long local_clock(void) -+unsigned long long xen_local_clock(void) - { - unsigned int cpu = get_cpu(); - struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); -@@ -318,7 +314,7 @@ static unsigned long long local_clock(vo - /* - * Runstate accounting - */ --static void get_runstate_snapshot(struct vcpu_runstate_info *res) -+void get_runstate_snapshot(struct vcpu_runstate_info *res) - { - u64 state_time; - struct vcpu_runstate_info *state; -@@ -354,7 +350,7 @@ unsigned long long sched_clock(void) - */ - preempt_disable(); - -- now = local_clock(); -+ now = xen_local_clock(); - - get_runstate_snapshot(&runstate); - -@@ -397,138 +393,6 @@ unsigned long profile_pc(struct pt_regs - } - EXPORT_SYMBOL(profile_pc); - --/* -- * Default timer interrupt handler -- */ --static irqreturn_t timer_interrupt(int irq, void *dev_id) --{ -- s64 delta, delta_cpu, stolen, blocked; -- unsigned int i, cpu = smp_processor_id(); -- struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); -- struct vcpu_runstate_info runstate; -- -- /* Keep nmi watchdog up to date */ -- inc_irq_stat(irq0_irqs); -- -- /* -- * Here we are in the timer irq handler. We just have irqs locally -- * disabled but we don't know if the timer_bh is running on the other -- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need -- * the irq version of write_lock because as just said we have irq -- * locally disabled. -arca -- */ -- write_seqlock(&xtime_lock); -- -- do { -- get_time_values_from_xen(cpu); -- -- /* Obtain a consistent snapshot of elapsed wallclock cycles. */ -- delta = delta_cpu = -- shadow->system_timestamp + get_nsec_offset(shadow); -- delta -= processed_system_time; -- delta_cpu -= per_cpu(processed_system_time, cpu); -- -- get_runstate_snapshot(&runstate); -- } while (!time_values_up_to_date(cpu)); -- -- if ((unlikely(delta < -(s64)permitted_clock_jitter) || -- unlikely(delta_cpu < -(s64)permitted_clock_jitter)) -- && printk_ratelimit()) { -- printk("Timer ISR/%u: Time went backwards: " -- "delta=%lld delta_cpu=%lld shadow=%lld " -- "off=%lld processed=%lld cpu_processed=%lld\n", -- cpu, delta, delta_cpu, shadow->system_timestamp, -- (s64)get_nsec_offset(shadow), -- processed_system_time, -- per_cpu(processed_system_time, cpu)); -- for (i = 0; i < num_online_cpus(); i++) -- printk(" %d: %lld\n", i, -- per_cpu(processed_system_time, i)); -- } -- -- /* System-wide jiffy work. */ -- if (delta >= NS_PER_TICK) { -- do_div(delta, NS_PER_TICK); -- processed_system_time += delta * NS_PER_TICK; -- while (delta > HZ) { -- clobber_induction_variable(delta); -- do_timer(HZ); -- delta -= HZ; -- } -- do_timer(delta); -- } -- -- write_sequnlock(&xtime_lock); -- -- if (shadow_tv_version != HYPERVISOR_shared_info->wc_version -- && keventd_up()) -- schedule_work(&update_wallclock_work); -- -- /* -- * Account stolen ticks. -- * ensures that the ticks are accounted as stolen. -- */ -- stolen = runstate.time[RUNSTATE_runnable] -- + runstate.time[RUNSTATE_offline] -- - per_cpu(processed_stolen_time, cpu); -- if ((stolen > 0) && (delta_cpu > 0)) { -- delta_cpu -= stolen; -- if (unlikely(delta_cpu < 0)) -- stolen += delta_cpu; /* clamp local-time progress */ -- do_div(stolen, NS_PER_TICK); -- per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK; -- per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK; -- account_steal_ticks(stolen); -- } -- -- /* -- * Account blocked ticks. -- * ensures that the ticks are accounted as idle/wait. -- */ -- blocked = runstate.time[RUNSTATE_blocked] -- - per_cpu(processed_blocked_time, cpu); -- if ((blocked > 0) && (delta_cpu > 0)) { -- delta_cpu -= blocked; -- if (unlikely(delta_cpu < 0)) -- blocked += delta_cpu; /* clamp local-time progress */ -- do_div(blocked, NS_PER_TICK); -- per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK; -- per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK; -- account_idle_ticks(blocked); -- } -- -- /* Account user/system ticks. */ -- if (delta_cpu > 0) { -- cputime_t ct; -- -- do_div(delta_cpu, NS_PER_TICK); -- per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK; -- ct = jiffies_to_cputime(delta_cpu); -- if (user_mode_vm(get_irq_regs())) -- account_user_time(current, ct, cputime_to_scaled(ct)); -- else if (current != idle_task(cpu) -- || irq_count() != HARDIRQ_OFFSET) -- account_system_time(current, HARDIRQ_OFFSET, -- ct, cputime_to_scaled(ct)); -- else -- account_idle_ticks(delta_cpu); -- } -- -- /* Offlined for more than a few seconds? Avoid lockup warnings. */ -- if (stolen > 5*HZ) -- touch_softlockup_watchdog(); -- -- /* Local timer processing (see update_process_times()). */ -- run_local_timers(); -- rcu_check_callbacks(cpu, user_mode_vm(get_irq_regs())); -- printk_tick(); -- scheduler_tick(); -- run_posix_cpu_timers(current); -- profile_tick(CPU_PROFILING); -- -- return IRQ_HANDLED; --} -- - void mark_tsc_unstable(char *reason) - { - #ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */ -@@ -537,24 +401,13 @@ void mark_tsc_unstable(char *reason) - } - EXPORT_SYMBOL_GPL(mark_tsc_unstable); - --static void init_missing_ticks_accounting(unsigned int cpu) --{ -- struct vcpu_runstate_info *runstate = setup_runstate_area(cpu); -- -- per_cpu(processed_blocked_time, cpu) = -- runstate->time[RUNSTATE_blocked]; -- per_cpu(processed_stolen_time, cpu) = -- runstate->time[RUNSTATE_runnable] + -- runstate->time[RUNSTATE_offline]; --} -- - static cycle_t cs_last; - - static cycle_t xen_clocksource_read(struct clocksource *cs) - { - #ifdef CONFIG_SMP - cycle_t last = get64(&cs_last); -- cycle_t ret = local_clock(); -+ cycle_t ret = xen_local_clock(); - - if (unlikely((s64)(ret - last) < 0)) { - if (last - ret > permitted_clock_jitter -@@ -580,37 +433,28 @@ static cycle_t xen_clocksource_read(stru - last = cur; - } - #else -- return local_clock(); -+ return xen_local_clock(); - #endif - } - - /* No locking required. Interrupts are disabled on all CPUs. */ - static void xen_clocksource_resume(struct clocksource *cs) - { -+ unsigned long seq; - unsigned int cpu; - - init_cpu_khz(); - -- for_each_online_cpu(cpu) { -- switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu, -- &xen_set_periodic_tick)) { -- case 0: --#if CONFIG_XEN_COMPAT <= 0x030004 -- case -ENOSYS: --#endif -- break; -- default: -- BUG(); -- } -+ for_each_online_cpu(cpu) - get_time_values_from_xen(cpu); -- per_cpu(processed_system_time, cpu) = -- per_cpu(shadow_time, 0).system_timestamp; -- init_missing_ticks_accounting(cpu); -- } - -- processed_system_time = per_cpu(shadow_time, 0).system_timestamp; -+ do { -+ seq = read_seqbegin(&xtime_lock); -+ jiffies_bias = jiffies_64; -+ } while (read_seqretry(&xtime_lock, seq)); -+ system_time_bias = per_cpu(shadow_time, 0).system_timestamp; - -- cs_last = local_clock(); -+ cs_last = xen_local_clock(); - } - - static struct clocksource clocksource_xen = { -@@ -655,7 +499,7 @@ void xen_read_persistent_clock(struct ti - rmb(); - } while ((s->wc_version & 1) | (version ^ s->wc_version)); - -- delta = local_clock() + (u64)sec * NSEC_PER_SEC + nsec; -+ delta = xen_local_clock() + (u64)sec * NSEC_PER_SEC + nsec; - do_div(delta, NSEC_PER_SEC); - - ts->tv_sec = delta; -@@ -670,24 +514,10 @@ int xen_update_persistent_clock(void) - return 0; - } - --/* Dynamically-mapped IRQ. */ --static int __read_mostly timer_irq = -1; --static struct irqaction timer_action = { -- .handler = timer_interrupt, -- .flags = IRQF_DISABLED|IRQF_TIMER, -- .name = "timer" --}; -- --static void __init setup_cpu0_timer_irq(void) --{ -- timer_irq = bind_virq_to_irqaction(VIRQ_TIMER, 0, &timer_action); -- BUG_ON(timer_irq < 0); --} -- - static void __init _late_time_init(void) - { - update_wallclock(); -- setup_cpu0_timer_irq(); -+ xen_clockevents_init(); - } - - void __init time_init(void) -@@ -696,22 +526,11 @@ void __init time_init(void) - printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n", - cpu_khz / 1000, cpu_khz % 1000); - -- switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0, -- &xen_set_periodic_tick)) { -- case 0: --#if CONFIG_XEN_COMPAT <= 0x030004 -- case -ENOSYS: --#endif -- break; -- default: -- BUG(); -- } -- -+ setup_runstate_area(0); - get_time_values_from_xen(0); - -- processed_system_time = per_cpu(shadow_time, 0).system_timestamp; -- per_cpu(processed_system_time, 0) = processed_system_time; -- init_missing_ticks_accounting(0); -+ jiffies_bias = jiffies_64; -+ system_time_bias = per_cpu(shadow_time, 0).system_timestamp; - - clocksource_register(&clocksource_xen); - -@@ -737,13 +556,13 @@ u64 jiffies_to_st(unsigned long j) - if (delta < 1) { - /* Triggers in some wrap-around cases, but that's okay: - * we just end up with a shorter timeout. */ -- st = processed_system_time + NS_PER_TICK; -+ st = processed_system_time() + NS_PER_TICK; - } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) { - /* Very long timeout means there is no pending timer. - * We indicate this to Xen by passing zero timeout. */ - st = 0; - } else { -- st = processed_system_time + delta * (u64)NS_PER_TICK; -+ st = processed_system_time() + delta * (u64)NS_PER_TICK; - } - } while (read_seqretry(&xtime_lock, seq)); - -@@ -751,122 +570,6 @@ u64 jiffies_to_st(unsigned long j) - } - EXPORT_SYMBOL(jiffies_to_st); - --/* -- * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu -- * These functions are based on implementations from arch/s390/kernel/time.c -- */ --static void stop_hz_timer(void) --{ -- struct vcpu_set_singleshot_timer singleshot; -- unsigned int cpu = smp_processor_id(); -- unsigned long j; -- int rc; -- -- cpumask_set_cpu(cpu, nohz_cpu_mask); -- -- /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */ -- /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */ -- /* value of rcp->cur that matches rdp->quiescbatch and allows us to */ -- /* stop the hz timer then the cpumasks created for subsequent values */ -- /* of cur in rcu_start_batch are guaranteed to pick up the updated */ -- /* nohz_cpu_mask and so will not depend on this cpu. */ -- -- smp_mb(); -- -- /* Leave ourselves in tick mode if rcu or softirq or timer pending. */ -- if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || -- local_softirq_pending() || -- (j = get_next_timer_interrupt(jiffies), -- time_before_eq(j, jiffies))) { -- cpumask_clear_cpu(cpu, nohz_cpu_mask); -- j = jiffies + 1; -- } -- -- singleshot.timeout_abs_ns = jiffies_to_st(j); -- if (!singleshot.timeout_abs_ns) -- return; -- singleshot.timeout_abs_ns += NS_PER_TICK / 2; -- singleshot.flags = 0; -- rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot); --#if CONFIG_XEN_COMPAT <= 0x030004 -- if (rc) { -- BUG_ON(rc != -ENOSYS); -- rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns); -- } --#endif -- BUG_ON(rc); --} -- --static void start_hz_timer(void) --{ -- unsigned int cpu = smp_processor_id(); -- int rc = HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL); -- --#if CONFIG_XEN_COMPAT <= 0x030004 -- if (rc) { -- BUG_ON(rc != -ENOSYS); -- rc = HYPERVISOR_set_timer_op(0); -- } --#endif -- BUG_ON(rc); -- cpumask_clear_cpu(cpu, nohz_cpu_mask); --} -- --void xen_safe_halt(void) --{ -- stop_hz_timer(); -- /* Blocking includes an implicit local_irq_enable(). */ -- HYPERVISOR_block(); -- start_hz_timer(); --} -- --void xen_halt(void) --{ -- if (irqs_disabled()) -- VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL)); --} -- --#ifdef CONFIG_SMP --int __cpuinit local_setup_timer(unsigned int cpu) --{ -- int seq, irq; -- -- BUG_ON(cpu == 0); -- -- switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu, -- &xen_set_periodic_tick)) { -- case 0: --#if CONFIG_XEN_COMPAT <= 0x030004 -- case -ENOSYS: --#endif -- break; -- default: -- BUG(); -- } -- -- do { -- seq = read_seqbegin(&xtime_lock); -- /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */ -- per_cpu(processed_system_time, cpu) = -- per_cpu(shadow_time, 0).system_timestamp; -- init_missing_ticks_accounting(cpu); -- } while (read_seqretry(&xtime_lock, seq)); -- -- irq = bind_virq_to_irqaction(VIRQ_TIMER, cpu, &timer_action); -- if (irq < 0) -- return irq; -- BUG_ON(timer_irq != irq); -- -- return 0; --} -- --void __cpuinit local_teardown_timer(unsigned int cpu) --{ -- BUG_ON(cpu == 0); -- unbind_from_per_cpu_irq(timer_irq, cpu, &timer_action); --} --#endif -- - #ifdef CONFIG_CPU_FREQ - static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) ---- head-2011-03-11.orig/drivers/xen/Kconfig 2011-02-03 14:49:15.000000000 +0100 -+++ head-2011-03-11/drivers/xen/Kconfig 2011-02-17 10:32:19.000000000 +0100 -@@ -356,9 +356,6 @@ endmenu - config HAVE_IRQ_IGNORE_UNHANDLED - def_bool y - --config NO_IDLE_HZ -- def_bool y -- - config ARCH_HAS_WALK_MEMORY - def_bool y - depends on X86 ---- head-2011-03-11.orig/drivers/xen/core/Makefile 2010-04-19 14:55:02.000000000 +0200 -+++ head-2011-03-11/drivers/xen/core/Makefile 2011-02-02 15:09:52.000000000 +0100 -@@ -12,6 +12,7 @@ obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o - obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o - obj-$(CONFIG_SMP) += spinlock.o - obj-$(CONFIG_KEXEC) += machine_kexec.o -+obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o - obj-$(CONFIG_XEN_DOMCTL) += domctl.o - CFLAGS_domctl.o := -D__XEN_PUBLIC_XEN_H__ -D__XEN_PUBLIC_GRANT_TABLE_H__ - CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/xen/core/clockevents.c 2011-02-02 15:09:52.000000000 +0100 -@@ -0,0 +1,298 @@ -+/* -+ * Xen clockevent functions -+ * -+ * See arch/x86/xen/time.c for copyright and credits for derived -+ * portions of this file. -+ * -+ * Xen clockevent implementation -+ * -+ * Xen has two clockevent implementations: -+ * -+ * The old timer_op one works with all released versions of Xen prior -+ * to version 3.0.4. This version of the hypervisor provides a -+ * single-shot timer with nanosecond resolution. However, sharing the -+ * same event channel is a 100Hz tick which is delivered while the -+ * vcpu is running. We don't care about or use this tick, but it will -+ * cause the core time code to think the timer fired too soon, and -+ * will end up resetting it each time. It could be filtered, but -+ * doing so has complications when the ktime clocksource is not yet -+ * the xen clocksource (ie, at boot time). -+ * -+ * The new vcpu_op-based timer interface allows the tick timer period -+ * to be changed or turned off. The tick timer is not useful as a -+ * periodic timer because events are only delivered to running vcpus. -+ * The one-shot timer can report when a timeout is in the past, so -+ * set_next_event is capable of returning -ETIME when appropriate. -+ * This interface is used when available. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define XEN_SHIFT 22 -+ -+/* Xen may fire a timer up to this many ns early */ -+#define TIMER_SLOP 100000 -+#define NS_PER_TICK (1000000000LL / HZ) -+ -+/* -+ * Get a hypervisor absolute time. In theory we could maintain an -+ * offset between the kernel's time and the hypervisor's time, and -+ * apply that to a kernel's absolute timeout. Unfortunately the -+ * hypervisor and kernel times can drift even if the kernel is using -+ * the Xen clocksource, because ntp can warp the kernel's clocksource. -+ */ -+static u64 get_abs_timeout(unsigned long delta) -+{ -+ return xen_local_clock() + delta; -+} -+ -+#if CONFIG_XEN_COMPAT <= 0x030004 -+static void timerop_set_mode(enum clock_event_mode mode, -+ struct clock_event_device *evt) -+{ -+ switch (mode) { -+ case CLOCK_EVT_MODE_PERIODIC: -+ WARN_ON(1); /* unsupported */ -+ break; -+ -+ case CLOCK_EVT_MODE_ONESHOT: -+ case CLOCK_EVT_MODE_RESUME: -+ break; -+ -+ case CLOCK_EVT_MODE_UNUSED: -+ case CLOCK_EVT_MODE_SHUTDOWN: -+ if (HYPERVISOR_set_timer_op(0)) /* cancel timeout */ -+ BUG(); -+ break; -+ } -+} -+ -+static int timerop_set_next_event(unsigned long delta, -+ struct clock_event_device *evt) -+{ -+ WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); -+ -+ if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0) -+ BUG(); -+ -+ /* -+ * We may have missed the deadline, but there's no real way of -+ * knowing for sure. If the event was in the past, then we'll -+ * get an immediate interrupt. -+ */ -+ -+ return 0; -+} -+#endif -+ -+static void vcpuop_set_mode(enum clock_event_mode mode, -+ struct clock_event_device *evt) -+{ -+ switch (mode) { -+ case CLOCK_EVT_MODE_PERIODIC: -+ WARN_ON(1); /* unsupported */ -+ break; -+ -+ case CLOCK_EVT_MODE_UNUSED: -+ case CLOCK_EVT_MODE_SHUTDOWN: -+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, -+ smp_processor_id(), NULL)) -+ BUG(); -+ /* fall through */ -+ case CLOCK_EVT_MODE_ONESHOT: -+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, -+ smp_processor_id(), NULL)) -+ BUG(); -+ break; -+ -+ case CLOCK_EVT_MODE_RESUME: -+ break; -+ } -+} -+ -+static int vcpuop_set_next_event(unsigned long delta, -+ struct clock_event_device *evt) -+{ -+ struct vcpu_set_singleshot_timer single; -+ int ret; -+ -+ WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); -+ -+ single.timeout_abs_ns = get_abs_timeout(delta); -+ single.flags = VCPU_SSHOTTMR_future; -+ -+ ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, -+ smp_processor_id(), &single); -+ -+ BUG_ON(ret != 0 && ret != -ETIME); -+ -+ return ret; -+} -+ -+static DEFINE_PER_CPU(struct clock_event_device, xen_clock_event) = { -+ .name = "xen", -+ .features = CLOCK_EVT_FEAT_ONESHOT, -+ -+ .max_delta_ns = 0xffffffff, -+ .min_delta_ns = TIMER_SLOP, -+ -+ .mult = 1, -+ .shift = 0, -+ .rating = 500, -+ -+ .irq = -1, -+}; -+ -+/* snapshots of runstate info */ -+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); -+ -+/* unused ns of stolen and blocked time */ -+static DEFINE_PER_CPU(unsigned int, xen_residual_stolen); -+static DEFINE_PER_CPU(unsigned int, xen_residual_blocked); -+ -+static void init_missing_ticks_accounting(unsigned int cpu) -+{ -+ per_cpu(xen_runstate_snapshot, cpu) = *setup_runstate_area(cpu); -+ if (cpu == smp_processor_id()) -+ get_runstate_snapshot(&__get_cpu_var(xen_runstate_snapshot)); -+ per_cpu(xen_residual_stolen, cpu) = 0; -+ per_cpu(xen_residual_blocked, cpu) = 0; -+} -+ -+static irqreturn_t timer_interrupt(int irq, void *dev_id) -+{ -+ struct clock_event_device *evt = &__get_cpu_var(xen_clock_event); -+ struct vcpu_runstate_info state, *snap; -+ s64 blocked, stolen; -+ irqreturn_t ret = IRQ_NONE; -+ -+ if (evt->event_handler) { -+ evt->event_handler(evt); -+ ret = IRQ_HANDLED; -+ } -+ -+ xen_check_wallclock_update(); -+ -+ get_runstate_snapshot(&state); -+ snap = &__get_cpu_var(xen_runstate_snapshot); -+ -+ stolen = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable] -+ + state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline] -+ + percpu_read(xen_residual_stolen); -+ -+ if (stolen >= NS_PER_TICK) -+ account_steal_ticks(div_u64_rem(stolen, NS_PER_TICK, -+ &__get_cpu_var(xen_residual_stolen))); -+ else -+ percpu_write(xen_residual_stolen, stolen > 0 ? stolen : 0); -+ -+ blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked] -+ + percpu_read(xen_residual_blocked); -+ -+ if (blocked >= NS_PER_TICK) -+ account_idle_ticks(div_u64_rem(blocked, NS_PER_TICK, -+ &__get_cpu_var(xen_residual_blocked))); -+ else -+ percpu_write(xen_residual_blocked, blocked > 0 ? blocked : 0); -+ -+ *snap = state; -+ -+ return ret; -+} -+ -+static struct irqaction timer_action = { -+ .handler = timer_interrupt, -+ .flags = IRQF_DISABLED|IRQF_TIMER, -+ .name = "timer" -+}; -+ -+void __cpuinit xen_setup_cpu_clockevents(void) -+{ -+ unsigned int cpu = smp_processor_id(); -+ struct clock_event_device *evt = &per_cpu(xen_clock_event, cpu); -+ -+ init_missing_ticks_accounting(cpu); -+ -+ evt->cpumask = cpumask_of(cpu); -+ clockevents_register_device(evt); -+} -+ -+#ifdef CONFIG_SMP -+int __cpuinit local_setup_timer(unsigned int cpu) -+{ -+ struct clock_event_device *evt = &per_cpu(xen_clock_event, cpu); -+ -+ BUG_ON(cpu == smp_processor_id()); -+ -+ evt->irq = bind_virq_to_irqaction(VIRQ_TIMER, cpu, &timer_action); -+ if (evt->irq < 0) -+ return evt->irq; -+ BUG_ON(per_cpu(xen_clock_event.irq, 0) != evt->irq); -+ -+ evt->set_mode = percpu_read(xen_clock_event.set_mode); -+ evt->set_next_event = percpu_read(xen_clock_event.set_next_event); -+ -+ return 0; -+} -+ -+void __cpuinit local_teardown_timer(unsigned int cpu) -+{ -+ struct clock_event_device *evt = &per_cpu(xen_clock_event, cpu); -+ -+ BUG_ON(cpu == 0); -+ unbind_from_per_cpu_irq(evt->irq, cpu, &timer_action); -+} -+#endif -+ -+void xen_clockevents_resume(void) -+{ -+ unsigned int cpu; -+ -+ if (percpu_read(xen_clock_event.set_mode) != vcpuop_set_mode) -+ return; -+ -+ for_each_online_cpu(cpu) { -+ init_missing_ticks_accounting(cpu); -+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) -+ BUG(); -+ } -+} -+ -+void __init xen_clockevents_init(void) -+{ -+ unsigned int cpu = smp_processor_id(); -+ struct clock_event_device *evt = &__get_cpu_var(xen_clock_event); -+ -+ switch (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, -+ cpu, NULL)) { -+ case 0: -+ /* -+ * Successfully turned off 100Hz tick, so we have the -+ * vcpuop-based timer interface -+ */ -+ evt->set_mode = vcpuop_set_mode; -+ evt->set_next_event = vcpuop_set_next_event; -+ break; -+#if CONFIG_XEN_COMPAT <= 0x030004 -+ case -ENOSYS: -+ printk(KERN_DEBUG "Xen: using timerop interface\n"); -+ evt->set_mode = timerop_set_mode; -+ evt->set_next_event = timerop_set_next_event; -+ break; -+#endif -+ default: -+ BUG(); -+ } -+ -+ evt->irq = bind_virq_to_irqaction(VIRQ_TIMER, cpu, &timer_action); -+ BUG_ON(evt->irq < 0); -+ -+ xen_setup_cpu_clockevents(); -+} ---- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-02-16 08:29:06.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-02-02 15:09:52.000000000 +0100 -@@ -382,6 +382,7 @@ asmlinkage void __irq_entry evtchn_do_up - wmb(); - #endif - -+#ifndef CONFIG_NO_HZ - /* - * Handle timer interrupts before all others, so that all - * hardirq handlers see an up-to-date system time even if we -@@ -407,6 +408,7 @@ asmlinkage void __irq_entry evtchn_do_up - BUG(); - } - } -+#endif /* CONFIG_NO_HZ */ - - l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); - ---- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-01-13 16:21:42.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-02-02 15:09:52.000000000 +0100 -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -158,10 +159,12 @@ static int take_machine_down(void *_susp - } else - BUG_ON(suspend_cancelled > 0); - suspend->resume_notifier(suspend_cancelled); -- if (suspend_cancelled >= 0) { -+ if (suspend_cancelled >= 0) - post_suspend(suspend_cancelled); -+ if (!suspend_cancelled) -+ xen_clockevents_resume(); -+ if (suspend_cancelled >= 0) - sysdev_resume(); -- } - if (!suspend_cancelled) { - #ifdef __x86_64__ - /* ---- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-03-03 16:14:51.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-02-07 12:28:20.000000000 +0100 -@@ -18,6 +18,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -146,6 +147,7 @@ static void __cpuinit cpu_bringup(void) - identify_secondary_cpu(__this_cpu_ptr(&cpu_info)); - touch_softlockup_watchdog(); - preempt_disable(); -+ xen_setup_cpu_clockevents(); - local_irq_enable(); - } - ---- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:18:17.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:18:37.000000000 +0100 -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - - struct spinning { ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/include/xen/clock.h 2011-02-02 15:09:52.000000000 +0100 -@@ -0,0 +1,19 @@ -+#ifndef __XEN_CPU_CLOCK_H__ -+#define __XEN_CPU_CLOCK_H__ -+ -+struct vcpu_runstate_info *setup_runstate_area(unsigned int cpu); -+void get_runstate_snapshot(struct vcpu_runstate_info *); -+ -+unsigned long long xen_local_clock(void); -+void xen_check_wallclock_update(void); -+ -+#ifdef CONFIG_GENERIC_CLOCKEVENTS -+void xen_clockevents_init(void); -+void xen_setup_cpu_clockevents(void); -+void xen_clockevents_resume(void); -+#else -+static inline void xen_setup_cpu_clockevents(void) {} -+static inline void xen_clockevents_resume(void) {} -+#endif -+ -+#endif /* __XEN_CPU_CLOCK_H__ */ diff --git a/patches.xen/xen-configurable-guest-devices b/patches.xen/xen-configurable-guest-devices deleted file mode 100644 index 65a2e81..0000000 --- a/patches.xen/xen-configurable-guest-devices +++ /dev/null @@ -1,88 +0,0 @@ -From: jbeulich@novell.com -Subject: allow number of guest devices to be configurable -Patch-mainline: obsolete - -... and derive NR_DYNIRQS from this (rather than having a hard-coded -value). -Similarly, allow the number of simultaneous transmits in netback to be -configurable. - ---- head-2011-02-08.orig/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-15 17:50:13.000000000 +0100 -+++ head-2011-02-08/arch/x86/include/mach-xen/asm/irq_vectors.h 2011-02-16 08:29:39.000000000 +0100 -@@ -97,9 +97,9 @@ extern int nr_pirqs; - - #define DYNIRQ_BASE (PIRQ_BASE + nr_pirqs) - #ifdef CONFIG_SPARSE_IRQ --#define NR_DYNIRQS CPU_VECTOR_LIMIT -+#define NR_DYNIRQS (CPU_VECTOR_LIMIT + CONFIG_XEN_NR_GUEST_DEVICES) - #else --#define NR_DYNIRQS 256 -+#define NR_DYNIRQS (64 + CONFIG_XEN_NR_GUEST_DEVICES) - #endif - - #define NR_IRQS (NR_PIRQS + NR_DYNIRQS) ---- head-2011-02-08.orig/drivers/xen/Kconfig 2011-02-03 14:49:25.000000000 +0100 -+++ head-2011-02-08/drivers/xen/Kconfig 2010-11-26 13:38:08.000000000 +0100 -@@ -94,6 +94,15 @@ config XEN_NETDEV_BACKEND - network devices to other guests via a high-performance shared-memory - interface. - -+config XEN_NETDEV_TX_SHIFT -+ int "Maximum simultaneous transmit requests (as a power of 2)" -+ depends on XEN_NETDEV_BACKEND -+ range 5 16 -+ default 8 -+ help -+ The maximum number transmits the driver can hold pending, expressed -+ as the exponent of a power of 2. -+ - config XEN_NETDEV_PIPELINED_TRANSMITTER - bool "Pipelined transmitter (DANGEROUS)" - depends on XEN_NETDEV_BACKEND -@@ -305,6 +314,16 @@ config XEN_SYSFS - help - Xen hypervisor attributes will show up under /sys/hypervisor/. - -+config XEN_NR_GUEST_DEVICES -+ int "Number of guest devices" -+ range 0 4032 if 64BIT -+ range 0 960 -+ default 256 if XEN_BACKEND -+ default 16 -+ help -+ Specify the total number of virtual devices (i.e. both frontend -+ and backend) that you want the kernel to be able to service. -+ - choice - prompt "Xen version compatibility" - default XEN_COMPAT_030002_AND_LATER ---- head-2011-02-08.orig/drivers/xen/core/evtchn.c 2011-02-02 15:09:52.000000000 +0100 -+++ head-2011-02-08/drivers/xen/core/evtchn.c 2011-02-16 08:29:29.000000000 +0100 -@@ -1766,7 +1766,7 @@ EXPORT_SYMBOL_GPL(nr_pirqs); - - int __init arch_probe_nr_irqs(void) - { -- int nr = 256, nr_irqs_gsi; -+ int nr = 64 + CONFIG_XEN_NR_GUEST_DEVICES, nr_irqs_gsi; - - if (is_initial_xendomain()) { - nr_irqs_gsi = NR_IRQS_LEGACY; ---- head-2011-02-08.orig/drivers/xen/netback/netback.c 2011-02-09 16:06:37.000000000 +0100 -+++ head-2011-02-08/drivers/xen/netback/netback.c 2011-01-03 13:29:58.000000000 +0100 -@@ -74,7 +74,7 @@ static DECLARE_TASKLET(net_rx_tasklet, n - static struct timer_list net_timer; - static struct timer_list netbk_tx_pending_timer; - --#define MAX_PENDING_REQS 256 -+#define MAX_PENDING_REQS (1U << CONFIG_XEN_NETDEV_TX_SHIFT) - - static struct sk_buff_head rx_queue; - -@@ -1263,6 +1263,7 @@ static void net_tx_action(unsigned long - net_tx_action_dealloc(); - - mop = tx_map_ops; -+ BUILD_BUG_ON(MAX_SKB_FRAGS >= MAX_PENDING_REQS); - while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && - !list_empty(&net_schedule_list)) { - /* Get a netif from the list with work to do. */ diff --git a/patches.xen/xen-cpufreq-report b/patches.xen/xen-cpufreq-report deleted file mode 100644 index aca69d2..0000000 --- a/patches.xen/xen-cpufreq-report +++ /dev/null @@ -1,57 +0,0 @@ -From: jbeulich@novell.com -Subject: make /proc/cpuinfo track CPU speed -Patch-mainline: obsolete - ---- head-2010-10-05.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-10-06 17:02:47.000000000 +0200 -+++ head-2010-10-05/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-10-06 17:04:43.000000000 +0200 -@@ -208,3 +208,14 @@ static int __init init_extcntl( - return 0; - } - arch_initcall(init_extcntl); -+ -+unsigned int cpufreq_quick_get(unsigned int cpu) -+{ -+ xen_platform_op_t op = { -+ .cmd = XENPF_get_cpu_freq, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ .u.get_cpu_freq.vcpu = cpu -+ }; -+ -+ return HYPERVISOR_platform_op(&op) == 0 ? op.u.get_cpu_freq.freq : 0; -+} ---- head-2010-10-05.orig/include/linux/cpufreq.h 2010-10-06 16:54:08.000000000 +0200 -+++ head-2010-10-05/include/linux/cpufreq.h 2010-08-25 14:41:45.000000000 +0200 -@@ -328,7 +328,7 @@ static inline unsigned int cpufreq_get(u - #endif - - /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ --#ifdef CONFIG_CPU_FREQ -+#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) - unsigned int cpufreq_quick_get(unsigned int cpu); - #else - static inline unsigned int cpufreq_quick_get(unsigned int cpu) ---- head-2010-10-05.orig/include/xen/interface/platform.h 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-10-05/include/xen/interface/platform.h 2010-06-22 15:48:58.000000000 +0200 -@@ -355,6 +355,14 @@ struct xenpf_mem_hotadd - uint32_t flags; - }; - -+#define XENPF_get_cpu_freq ('N' << 24) -+struct xenpf_get_cpu_freq { -+ /* IN variables */ -+ uint32_t vcpu; -+ /* OUT variables */ -+ uint32_t freq; /* in kHz */ -+}; -+ - struct xen_platform_op { - uint32_t cmd; - uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ -@@ -374,6 +382,7 @@ struct xen_platform_op { - struct xenpf_cpu_ol cpu_ol; - struct xenpf_cpu_hotadd cpu_add; - struct xenpf_mem_hotadd mem_add; -+ struct xenpf_get_cpu_freq get_cpu_freq; - uint8_t pad[128]; - } u; - }; diff --git a/patches.xen/xen-cxgb3 b/patches.xen/xen-cxgb3 deleted file mode 100644 index 4f777f9..0000000 --- a/patches.xen/xen-cxgb3 +++ /dev/null @@ -1,151 +0,0 @@ -From: http://xenbits.xen.org/XCP/linux-2.6.32.pq.hg?rev/20e4634f7b7b -Subject: apply xen specific patch to the Chelsio ethernet drivers -as a result of their feedback from the Cowly Beta -Patch-mainline: n/a - -* Disable LRO by default. The kernel.org driver does enable it, but it -does not play very well with the bridging layer. (Please note that the -kernel.org driver does now implement GRO) - -* Allocate SKBs instead of pages for incoming data. Using pages causes -traffic to stall when the VMs use large MTUs. - -* Disable lazy completion to Tx buffers. cxgb3 completion mechanism -coalesces TX completion notifications, but this breaks the VM's -behavior: The VMs networking stacks rely on skb to be freed in the -hypervisor to open the Tx buffer. - -Acked-by: bphilips@suse.de - ---- head-2011-01-30.orig/drivers/net/cxgb3/cxgb3_main.c 2011-01-31 12:42:17.000000000 +0100 -+++ head-2011-01-30/drivers/net/cxgb3/cxgb3_main.c 2011-02-03 14:45:48.000000000 +0100 -@@ -1923,7 +1923,11 @@ static int set_rx_csum(struct net_device - } else { - int i; - -+#ifndef CONFIG_XEN - p->rx_offload &= ~(T3_RX_CSUM | T3_LRO); -+#else -+ p->rx_offload &= ~(T3_RX_CSUM); -+#endif - for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) - set_qset_lro(dev, i, 0); - } -@@ -3298,7 +3302,11 @@ static int __devinit init_one(struct pci - adapter->port[i] = netdev; - pi = netdev_priv(netdev); - pi->adapter = adapter; -+#ifndef CONFIG_XEN - pi->rx_offload = T3_RX_CSUM | T3_LRO; -+#else -+ pi->rx_offload = T3_RX_CSUM; -+#endif - pi->port_id = i; - netif_carrier_off(netdev); - netdev->irq = pdev->irq; ---- head-2011-01-30.orig/drivers/net/cxgb3/sge.c 2011-01-05 01:50:19.000000000 +0100 -+++ head-2011-01-30/drivers/net/cxgb3/sge.c 2011-02-03 14:45:48.000000000 +0100 -@@ -58,11 +58,24 @@ - * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs - * directly. - */ -+#ifndef CONFIG_XEN - #define FL0_PG_CHUNK_SIZE 2048 -+#else -+/* Use skbuffs for XEN kernels. LRO is already disabled */ -+#define FL0_PG_CHUNK_SIZE 0 -+#endif -+ - #define FL0_PG_ORDER 0 - #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) -+ -+#ifndef CONFIG_XEN - #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) - #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) -+#else -+#define FL1_PG_CHUNK_SIZE 0 -+#define FL1_PG_ORDER 0 -+#endif -+ - #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) - - #define SGE_RX_DROP_THRES 16 -@@ -1267,7 +1280,27 @@ netdev_tx_t t3_eth_xmit(struct sk_buff * - - gen = q->gen; - q->unacked += ndesc; -+#ifdef CONFIG_XEN -+ /* -+ * Some Guest OS clients get terrible performance when they have bad -+ * message size / socket send buffer space parameters. For instance, -+ * if an application selects an 8KB message size and an 8KB send -+ * socket buffer size. This forces the application into a single -+ * packet stop-and-go mode where it's only willing to have a single -+ * message outstanding. The next message is only sent when the -+ * previous message is noted as having been sent. Until we issue a -+ * kfree_skb() against the TX skb, the skb is charged against the -+ * application's send buffer space. We only free up TX skbs when we -+ * get a TX credit return from the hardware / firmware which is fairly -+ * lazy about this. So we request a TX WR Completion Notification on -+ * every TX descriptor in order to accellerate TX credit returns. See -+ * also the change in handle_rsp_cntrl_info() to free up TX skb's when -+ * we receive the TX WR Completion Notifications ... -+ */ -+ compl = F_WR_COMPL; -+#else - compl = (q->unacked & 8) << (S_WR_COMPL - 3); -+#endif - q->unacked &= 7; - pidx = q->pidx; - q->pidx += ndesc; -@@ -2176,8 +2209,35 @@ static inline void handle_rsp_cntrl_info - #endif - - credits = G_RSPD_TXQ0_CR(flags); -- if (credits) -+ if (credits) { - qs->txq[TXQ_ETH].processed += credits; -+#ifdef CONFIG_XEN -+ /* -+ * In the normal Linux driver t3_eth_xmit() routine, we call -+ * skb_orphan() on unshared TX skb. This results in a call to -+ * the destructor for the skb which frees up the send buffer -+ * space it was holding down. This, in turn, allows the -+ * application to make forward progress generating more data -+ * which is important at 10Gb/s. For Virtual Machine Guest -+ * Operating Systems this doesn't work since the send buffer -+ * space is being held down in the Virtual Machine. Thus we -+ * need to get the TX skb's freed up as soon as possible in -+ * order to prevent applications from stalling. -+ * -+ * This code is largely copied from the corresponding code in -+ * sge_timer_tx() and should probably be kept in sync with any -+ * changes there. -+ */ -+ if (__netif_tx_trylock(qs->tx_q)) { -+ struct port_info *pi = netdev_priv(qs->netdev); -+ struct adapter *adap = pi->adapter; -+ -+ reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], -+ TX_RECLAIM_CHUNK); -+ __netif_tx_unlock(qs->tx_q); -+ } -+#endif -+ } - - credits = G_RSPD_TXQ2_CR(flags); - if (credits) ---- head-2011-01-30.orig/drivers/net/cxgb3/version.h 2010-10-20 22:30:22.000000000 +0200 -+++ head-2011-01-30/drivers/net/cxgb3/version.h 2011-02-03 14:45:48.000000000 +0100 -@@ -35,7 +35,11 @@ - #define DRV_DESC "Chelsio T3 Network Driver" - #define DRV_NAME "cxgb3" - /* Driver version */ -+#ifndef CONFIG_XEN - #define DRV_VERSION "1.1.4-ko" -+#else -+#define DRV_VERSION "1.1.4-xen-ko" -+#endif - - /* Firmware version */ - #define FW_VERSION_MAJOR 7 diff --git a/patches.xen/xen-dcdbas b/patches.xen/xen-dcdbas deleted file mode 100644 index ea526ca..0000000 --- a/patches.xen/xen-dcdbas +++ /dev/null @@ -1,295 +0,0 @@ -From: jbeulich@novell.com -Subject: force proper address translation in DCDBAS -Patch-mainline: n/a - -The only caveat is that this doesn't work when Dom0 has its vCPU-s pinned. - ---- - drivers/firmware/Kconfig | 1 - drivers/firmware/dcdbas.c | 28 ++++++++- - drivers/xen/core/domctl.c | 141 ++++++++++++++++++++++++++++++++++++++++++++++ - drivers/xen/core/domctl.h | 1 - 4 files changed, 169 insertions(+), 2 deletions(-) - ---- a/drivers/firmware/Kconfig -+++ b/drivers/firmware/Kconfig -@@ -91,6 +91,7 @@ config DELL_RBU - config DCDBAS - tristate "Dell Systems Management Base Driver" - depends on X86 -+ select XEN_DOMCTL if XEN - help - The Dell Systems Management Base Driver provides a sysfs interface - for systems management software to perform System Management ---- a/drivers/firmware/dcdbas.c -+++ b/drivers/firmware/dcdbas.c -@@ -37,6 +37,10 @@ - #include - #include - -+#ifdef CONFIG_XEN -+#include "../xen/core/domctl.h" -+#endif -+ - #include "dcdbas.h" - - #define DRIVER_NAME "dcdbas" -@@ -107,7 +111,7 @@ static int smi_data_buf_realloc(unsigned - /* set up new buffer for use */ - smi_data_buf = buf; - smi_data_buf_handle = handle; -- smi_data_buf_phys_addr = (u32) virt_to_phys(buf); -+ smi_data_buf_phys_addr = (u32) handle; - smi_data_buf_size = size; - - dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", -@@ -245,7 +249,9 @@ static ssize_t host_control_on_shutdown_ - */ - int dcdbas_smi_request(struct smi_cmd *smi_cmd) - { -+#ifndef CONFIG_XEN - cpumask_var_t old_mask; -+#endif - int ret = 0; - - if (smi_cmd->magic != SMI_CMD_MAGIC) { -@@ -255,6 +261,7 @@ int dcdbas_smi_request(struct smi_cmd *s - } - - /* SMI requires CPU 0 */ -+#ifndef CONFIG_XEN - if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) - return -ENOMEM; - -@@ -266,6 +273,14 @@ int dcdbas_smi_request(struct smi_cmd *s - ret = -EBUSY; - goto out; - } -+#else -+ ret = xen_set_physical_cpu_affinity(0); -+ if (ret) { -+ dev_dbg(&dcdbas_pdev->dev, "%s: failed (%d) to get CPU 0\n", -+ __func__, ret); -+ return ret; -+ } -+#endif - - /* generate SMI */ - /* inb to force posted write through and make SMI happen now */ -@@ -280,9 +295,13 @@ int dcdbas_smi_request(struct smi_cmd *s - : "memory" - ); - -+#ifndef CONFIG_XEN - out: - set_cpus_allowed_ptr(current, old_mask); - free_cpumask_var(old_mask); -+#else -+ xen_set_physical_cpu_affinity(-1); -+#endif - return ret; - } - -@@ -322,7 +341,7 @@ static ssize_t smi_request_store(struct - break; - case 1: - /* Calling Interface SMI */ -- smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer); -+ smi_cmd->ebx = (u32) virt_to_bus(smi_cmd->command_buffer); - ret = dcdbas_smi_request(smi_cmd); - if (!ret) - ret = count; -@@ -603,6 +622,11 @@ static int __init dcdbas_init(void) - { - int error; - -+#ifdef CONFIG_XEN -+ if (!is_initial_xendomain()) -+ return -ENODEV; -+#endif -+ - error = platform_driver_register(&dcdbas_driver); - if (error) - return error; ---- a/drivers/xen/core/domctl.c -+++ b/drivers/xen/core/domctl.c -@@ -20,6 +20,8 @@ - #undef __XEN_TOOLS__ - #include - #include -+#include -+#include - #include - #include - -@@ -34,6 +36,29 @@ typedef struct xen_domctl_address_size { - - typedef __attribute__((aligned(8))) uint64_t uint64_aligned_t; - -+struct xenctl_cpumap_v4 { -+ XEN_GUEST_HANDLE(uint8) bitmap; -+ uint32_t nr_cpus; -+}; -+ -+struct xenctl_cpumap_v5 { -+ union { -+ XEN_GUEST_HANDLE(uint8) bitmap; -+ uint64_aligned_t _align; -+ }; -+ uint32_t nr_cpus; -+}; -+ -+struct xen_domctl_vcpuaffinity_v4 { -+ uint32_t vcpu; -+ struct xenctl_cpumap_v4 cpumap; -+}; -+ -+struct xen_domctl_vcpuaffinity_v5 { -+ uint32_t vcpu; -+ struct xenctl_cpumap_v5 cpumap; -+}; -+ - union xen_domctl { - /* v4: sle10 sp1: xen 3.0.4 + 32-on-64 patches */ - struct { -@@ -43,6 +68,7 @@ union xen_domctl { - union { - /* left out lots of other struct xen_domctl_foobar */ - struct xen_domctl_address_size address_size; -+ struct xen_domctl_vcpuaffinity_v4 vcpu_affinity; - uint64_t dummy_align; - uint8_t dummy_pad[128]; - }; -@@ -59,6 +85,7 @@ union xen_domctl { - domid_t domain; - union { - struct xen_domctl_address_size address_size; -+ struct xen_domctl_vcpuaffinity_v5 vcpu_affinity; - uint64_aligned_t dummy_align; - uint8_t dummy_pad[128]; - }; -@@ -124,4 +151,118 @@ int xen_guest_blkif_protocol(int domid) - } - EXPORT_SYMBOL_GPL(xen_guest_blkif_protocol); - -+#ifdef CONFIG_X86 -+ -+#define vcpuaffinity(what, ver) ({ \ -+ memset(&domctl, 0, sizeof(domctl)); \ -+ domctl.v##ver.cmd = XEN_DOMCTL_##what##vcpuaffinity; \ -+ domctl.v##ver.interface_version = ver; \ -+ /* domctl.v##ver.domain = 0; */ \ -+ domctl.v##ver.vcpu_affinity.vcpu = smp_processor_id(); \ -+ domctl.v##ver.vcpu_affinity.cpumap.nr_cpus = nr; \ -+ set_xen_guest_handle(domctl.v##ver.vcpu_affinity.cpumap.bitmap, \ -+ mask); \ -+ hypervisor_domctl(&domctl); \ -+}) -+ -+static inline int get_vcpuaffinity(unsigned int nr, void *mask) -+{ -+ union xen_domctl domctl; -+ int rc; -+ -+ BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 7); -+ rc = vcpuaffinity(get, 7); -+#if CONFIG_XEN_COMPAT < 0x040100 -+ if (rc) -+ rc = vcpuaffinity(get, 6); -+#endif -+#if CONFIG_XEN_COMPAT < 0x040000 -+ if (rc) -+ rc = vcpuaffinity(get, 5); -+#endif -+#if CONFIG_XEN_COMPAT < 0x030100 -+ if (rc) -+ rc = vcpuaffinity(get, 4); -+#endif -+ return rc; -+} -+ -+static inline int set_vcpuaffinity(unsigned int nr, void *mask) -+{ -+ union xen_domctl domctl; -+ int rc; -+ -+ BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 7); -+ rc = vcpuaffinity(set, 7); -+#if CONFIG_XEN_COMPAT < 0x040100 -+ if (rc) -+ rc = vcpuaffinity(set, 6); -+#endif -+#if CONFIG_XEN_COMPAT < 0x040000 -+ if (rc) -+ rc = vcpuaffinity(set, 5); -+#endif -+#if CONFIG_XEN_COMPAT < 0x030100 -+ if (rc) -+ rc = vcpuaffinity(set, 4); -+#endif -+ return rc; -+} -+ -+static DEFINE_PER_CPU(void *, saved_pcpu_affinity); -+ -+#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_LONG / sizeof(long)) -+ -+int xen_set_physical_cpu_affinity(int pcpu) -+{ -+ int rc; -+ -+ if (!is_initial_xendomain()) -+ return -EPERM; -+ -+ if (pcpu >= 0) { -+ void *oldmap; -+ -+ if (pcpu > BITS_PER_PAGE) -+ return -ERANGE; -+ -+ if (percpu_read(saved_pcpu_affinity)) -+ return -EBUSY; -+ -+ oldmap = (void *)get_zeroed_page(GFP_KERNEL); -+ if (!oldmap) -+ return -ENOMEM; -+ -+ rc = get_vcpuaffinity(BITS_PER_PAGE, oldmap); -+ if (!rc) { -+ void *newmap = kzalloc(BITS_TO_LONGS(pcpu + 1) -+ * sizeof(long), GFP_KERNEL); -+ -+ if (newmap) { -+ __set_bit(pcpu, newmap); -+ rc = set_vcpuaffinity(pcpu + 1, newmap); -+ kfree(newmap); -+ } else -+ rc = -ENOMEM; -+ } -+ -+ if (!rc) -+ percpu_write(saved_pcpu_affinity, oldmap); -+ else -+ free_page((unsigned long)oldmap); -+ } else { -+ if (!percpu_read(saved_pcpu_affinity)) -+ return 0; -+ rc = set_vcpuaffinity(BITS_PER_PAGE, -+ percpu_read(saved_pcpu_affinity)); -+ free_page((unsigned long)percpu_read(saved_pcpu_affinity)); -+ percpu_write(saved_pcpu_affinity, NULL); -+ } -+ -+ return rc; -+} -+EXPORT_SYMBOL_GPL(xen_set_physical_cpu_affinity); -+ -+#endif /* CONFIG_X86 */ -+ - MODULE_LICENSE("GPL"); ---- a/drivers/xen/core/domctl.h -+++ b/drivers/xen/core/domctl.h -@@ -1,2 +1,3 @@ - int xen_guest_address_size(int domid); - int xen_guest_blkif_protocol(int domid); -+int xen_set_physical_cpu_affinity(int pcpu); diff --git a/patches.xen/xen-floppy b/patches.xen/xen-floppy deleted file mode 100644 index b840f16..0000000 --- a/patches.xen/xen-floppy +++ /dev/null @@ -1,28 +0,0 @@ -From: jbeulich@novell.com -Subject: Xen: improve floppy behavior -Patch-mainline: n/a -References: bnc#584216 - -Timing is significantly different from native both because Xen traps -I/O port accesses and using DMA not being possible (without intrusive -changes). Due to the overhead of trapped port accesses, I/O is already -slow enough (and Xen doesn't run on very old hardware anyway), so the -situation can easily be improved by not enforcing REALLY_SLOW_IO. - -This doesn't completely address the issue - Xen just cannot guarantee -scheduling of a particular vCPU with a maximum latency of about 80us -(needed for the default FIFO threshold value of 10). The only complete -solution would require making ISA DMA usable on Xen. - ---- head-2010-03-24.orig/drivers/block/floppy.c 2010-03-24 13:43:18.000000000 +0100 -+++ head-2010-03-24/drivers/block/floppy.c 2010-03-25 14:39:44.000000000 +0100 -@@ -146,7 +146,9 @@ - - #undef FLOPPY_SILENT_DCL_CLEAR - -+#ifndef CONFIG_XEN - #define REALLY_SLOW_IO -+#endif - - #define DEBUGT 2 - diff --git a/patches.xen/xen-ipi-per-cpu-irq b/patches.xen/xen-ipi-per-cpu-irq deleted file mode 100644 index f521183..0000000 --- a/patches.xen/xen-ipi-per-cpu-irq +++ /dev/null @@ -1,901 +0,0 @@ -From: jbeulich@novell.com -Subject: fold IPIs onto a single IRQ each -Patch-mainline: n/a - ---- head-2011-02-17.orig/arch/x86/include/asm/hw_irq.h 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-02-17/arch/x86/include/asm/hw_irq.h 2011-02-02 15:09:42.000000000 +0100 -@@ -132,7 +132,6 @@ extern void smp_error_interrupt(struct p - extern asmlinkage void smp_irq_move_cleanup_interrupt(void); - #endif - #ifdef CONFIG_SMP --#ifndef CONFIG_XEN - extern void smp_reschedule_interrupt(struct pt_regs *); - extern void smp_call_function_interrupt(struct pt_regs *); - extern void smp_call_function_single_interrupt(struct pt_regs *); -@@ -141,13 +140,9 @@ extern void smp_invalidate_interrupt(str - #else - extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *); - #endif --#else --#include --extern irqreturn_t smp_reschedule_interrupt(int, void *); --extern irqreturn_t smp_call_function_interrupt(int, void *); --extern irqreturn_t smp_call_function_single_interrupt(int, void *); --extern irqreturn_t smp_reboot_interrupt(int, void *); --extern irqreturn_t smp_irq_work_interrupt(int, void *); -+extern void smp_irq_work_interrupt(struct pt_regs *); -+#ifdef CONFIG_XEN -+extern void smp_reboot_interrupt(struct pt_regs *); - #endif - #endif - ---- head-2011-02-17.orig/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:57:40.000000000 +0100 -+++ head-2011-02-17/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:58:00.000000000 +0100 -@@ -6,25 +6,6 @@ - - #include - --DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]); -- --static inline void __send_IPI_one(unsigned int cpu, int vector) --{ -- int irq = per_cpu(ipi_to_irq, cpu)[vector]; -- -- if (vector == NMI_VECTOR) { -- static int __read_mostly printed; -- int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); -- -- if (rc && !printed) -- pr_warning("Unable (%d) to send NMI to CPU#%u\n", -- printed = rc, cpu); -- return; -- } -- BUG_ON(irq < 0); -- notify_remote_via_irq(irq); --} -- - void xen_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) - { - unsigned int cpu, this_cpu = smp_processor_id(); -@@ -32,7 +13,7 @@ void xen_send_IPI_mask_allbutself(const - WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); - for_each_cpu_and(cpu, cpumask, cpu_online_mask) - if (cpu != this_cpu) -- __send_IPI_one(cpu, vector); -+ notify_remote_via_ipi(vector, cpu); - } - - void xen_send_IPI_mask(const struct cpumask *cpumask, int vector) -@@ -41,7 +22,7 @@ void xen_send_IPI_mask(const struct cpum - - WARN_ON(!cpumask_subset(cpumask, cpu_online_mask)); - for_each_cpu_and(cpu, cpumask, cpu_online_mask) -- __send_IPI_one(cpu, vector); -+ notify_remote_via_ipi(vector, cpu); - } - - void xen_send_IPI_allbutself(int vector) -@@ -56,5 +37,5 @@ void xen_send_IPI_all(int vector) - - void xen_send_IPI_self(int vector) - { -- __send_IPI_one(smp_processor_id(), vector); -+ notify_remote_via_ipi(vector, smp_processor_id()); - } ---- head-2011-02-17.orig/arch/x86/kernel/irq-xen.c 2011-02-18 15:17:23.000000000 +0100 -+++ head-2011-02-17/arch/x86/kernel/irq-xen.c 2011-02-02 15:09:43.000000000 +0100 -@@ -331,6 +331,7 @@ void fixup_irqs(void) - data = &desc->irq_data; - affinity = data->affinity; - if (!irq_has_action(irq) || -+ (desc->status & IRQ_PER_CPU) || - cpumask_subset(affinity, cpu_online_mask)) { - raw_spin_unlock(&desc->lock); - continue; ---- head-2011-02-17.orig/arch/x86/kernel/irq_work-xen.c 2011-02-03 11:19:35.000000000 +0100 -+++ head-2011-02-17/arch/x86/kernel/irq_work-xen.c 2011-02-03 13:56:43.000000000 +0100 -@@ -8,12 +8,10 @@ - #include - - #ifdef CONFIG_SMP --irqreturn_t smp_irq_work_interrupt(int irq, void *dev_id) -+void smp_irq_work_interrupt(struct pt_regs *regs) - { - inc_irq_stat(apic_irq_work_irqs); - irq_work_run(); -- -- return IRQ_HANDLED; - } - - void arch_irq_work_raise(void) ---- head-2011-02-17.orig/arch/x86/kernel/smp-xen.c 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-02-17/arch/x86/kernel/smp-xen.c 2011-02-02 15:09:43.000000000 +0100 -@@ -136,11 +136,9 @@ void xen_send_call_func_ipi(const struct - * this function calls the 'stop' function on all other CPUs in the system. - */ - --irqreturn_t smp_reboot_interrupt(int irq, void *dev_id) -+void smp_reboot_interrupt(struct pt_regs *regs) - { - stop_this_cpu(NULL); -- -- return IRQ_HANDLED; - } - - void xen_stop_other_cpus(int wait) -@@ -179,24 +177,19 @@ void xen_stop_other_cpus(int wait) - * all the work is done automatically when - * we return from the interrupt. - */ --irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) -+void smp_reschedule_interrupt(struct pt_regs *regs) - { - inc_irq_stat(irq_resched_count); -- return IRQ_HANDLED; - } - --irqreturn_t smp_call_function_interrupt(int irq, void *dev_id) -+void smp_call_function_interrupt(struct pt_regs *regs) - { - generic_smp_call_function_interrupt(); - inc_irq_stat(irq_call_count); -- -- return IRQ_HANDLED; - } - --irqreturn_t smp_call_function_single_interrupt(int irq, void *dev_id) -+void smp_call_function_single_interrupt(struct pt_regs *regs) - { - generic_smp_call_function_single_interrupt(); - inc_irq_stat(irq_call_count); -- -- return IRQ_HANDLED; - } ---- head-2011-02-17.orig/drivers/xen/Kconfig 2011-02-03 14:48:57.000000000 +0100 -+++ head-2011-02-17/drivers/xen/Kconfig 2011-02-03 14:49:15.000000000 +0100 -@@ -4,6 +4,7 @@ - - config XEN - bool -+ select IRQ_PER_CPU if SMP - - if XEN - config XEN_INTERFACE_VERSION ---- head-2011-02-17.orig/drivers/xen/core/evtchn.c 2011-02-10 16:18:00.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/evtchn.c 2011-02-15 17:52:39.000000000 +0100 -@@ -59,6 +59,20 @@ static DEFINE_SPINLOCK(irq_mapping_updat - static int evtchn_to_irq[NR_EVENT_CHANNELS] = { - [0 ... NR_EVENT_CHANNELS-1] = -1 }; - -+/* IRQ <-> IPI mapping. */ -+#if defined(CONFIG_SMP) && defined(CONFIG_X86) -+static int __read_mostly ipi_irq = -1; -+DEFINE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending); -+static DEFINE_PER_CPU_READ_MOSTLY(evtchn_port_t, ipi_evtchn); -+#else -+#define PER_CPU_IPI_IRQ -+#endif -+#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ) -+#define BUG_IF_IPI(irq_cfg) BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_IPI) -+#else -+#define BUG_IF_IPI(irq_cfg) ((void)0) -+#endif -+ - /* Binding types. */ - enum { - IRQT_UNBOUND, -@@ -108,7 +122,9 @@ static inline u32 mk_irq_info(u32 type, - - BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS)); - BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS)); -+#if defined(PER_CPU_IPI_IRQ) && defined(NR_IPIS) - BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS)); -+#endif - BUG_ON(index >> _INDEX_BITS); - - BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS)); -@@ -120,25 +136,6 @@ static inline u32 mk_irq_info(u32 type, - * Accessors for packed IRQ information. - */ - --static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg) --{ -- return cfg->info & ((1U << _EVTCHN_BITS) - 1); --} -- --static inline unsigned int evtchn_from_irq_data(struct irq_data *data) --{ -- const struct irq_cfg *cfg = irq_data_cfg(data); -- -- return cfg ? evtchn_from_irq_cfg(cfg) : 0; --} -- --static inline unsigned int evtchn_from_irq(int irq) --{ -- struct irq_data *data = irq_get_irq_data(irq); -- -- return data ? evtchn_from_irq_data(data) : 0; --} -- - static inline unsigned int index_from_irq_cfg(const struct irq_cfg *cfg) - { - return (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1); -@@ -163,6 +160,38 @@ static inline unsigned int type_from_irq - return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND; - } - -+#ifndef PER_CPU_IPI_IRQ -+static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg, -+ unsigned int cpu) -+{ -+ BUG_ON(type_from_irq_cfg(cfg) != IRQT_IPI); -+ return per_cpu(ipi_evtchn, cpu); -+} -+#endif -+ -+static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg) -+{ -+#ifndef PER_CPU_IPI_IRQ -+ if (type_from_irq_cfg(cfg) == IRQT_IPI) -+ return evtchn_from_per_cpu_irq(cfg, smp_processor_id()); -+#endif -+ return cfg->info & ((1U << _EVTCHN_BITS) - 1); -+} -+ -+static inline unsigned int evtchn_from_irq_data(struct irq_data *data) -+{ -+ const struct irq_cfg *cfg = irq_data_cfg(data); -+ -+ return cfg ? evtchn_from_irq_cfg(cfg) : 0; -+} -+ -+static inline unsigned int evtchn_from_irq(int irq) -+{ -+ struct irq_data *data = irq_get_irq_data(irq); -+ -+ return data ? evtchn_from_irq_data(data) : 0; -+} -+ - unsigned int irq_from_evtchn(unsigned int port) - { - return evtchn_to_irq[port]; -@@ -172,11 +201,13 @@ EXPORT_SYMBOL_GPL(irq_from_evtchn); - /* IRQ <-> VIRQ mapping. */ - DEFINE_PER_CPU(int[NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; - -+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) - /* IRQ <-> IPI mapping. */ - #ifndef NR_IPIS - #define NR_IPIS 1 - #endif - DEFINE_PER_CPU(int[NR_IPIS], ipi_to_irq) = {[0 ... NR_IPIS-1] = -1}; -+#endif - - #ifdef CONFIG_SMP - -@@ -204,8 +235,14 @@ static void bind_evtchn_to_cpu(unsigned - - BUG_ON(!test_bit(chn, s->evtchn_mask)); - -- if (irq != -1) -- cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu)); -+ if (irq != -1) { -+ struct irq_desc *desc = irq_to_desc(irq); -+ -+ if (!(desc->status & IRQ_PER_CPU)) -+ cpumask_copy(desc->irq_data.affinity, cpumask_of(cpu)); -+ else -+ cpumask_set_cpu(cpu, desc->irq_data.affinity); -+ } - - clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn])); - set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); -@@ -370,7 +407,10 @@ asmlinkage void __irq_entry evtchn_do_up - port = (l1i * BITS_PER_LONG) + l2i; - mask_evtchn(port); - if ((irq = evtchn_to_irq[port]) != -1) { -- clear_evtchn(port); -+#ifndef PER_CPU_IPI_IRQ -+ if (port != percpu_read(ipi_evtchn)) -+#endif -+ clear_evtchn(port); - handled = handle_irq(irq, regs); - } - if (!handled && printk_ratelimit()) -@@ -404,7 +444,7 @@ asmlinkage void __irq_entry evtchn_do_up - } - - static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg, -- struct irq_chip *chip) -+ struct irq_chip *chip, bool percpu) - { - static int warned; - int irq; -@@ -420,11 +460,20 @@ static int find_unbound_irq(unsigned int - continue; - - if (!cfg->bindcount) { -+ irq_flow_handler_t handle; -+ const char *name; -+ - *pcfg = cfg; - desc->status |= IRQ_NOPROBE; -+ if (!percpu) { -+ handle = handle_fasteoi_irq; -+ name = "fasteoi"; -+ } else { -+ handle = handle_percpu_irq; -+ name = "percpu"; -+ } - set_irq_chip_and_handler_name(irq, chip, -- handle_fasteoi_irq, -- "fasteoi"); -+ handle, name); - return irq; - } - } -@@ -449,7 +498,7 @@ static int bind_caller_port_to_irq(unsig - - if ((irq = evtchn_to_irq[caller_port]) == -1) { - if ((irq = find_unbound_irq(numa_node_id(), &cfg, -- &dynirq_chip)) < 0) -+ &dynirq_chip, false)) < 0) - goto out; - - evtchn_to_irq[caller_port] = irq; -@@ -473,7 +522,8 @@ static int bind_local_port_to_irq(unsign - - BUG_ON(evtchn_to_irq[local_port] != -1); - -- if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip)) < 0) { -+ if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip, -+ false)) < 0) { - if (close_evtchn(local_port)) - BUG(); - goto out; -@@ -527,7 +577,7 @@ static int bind_virq_to_irq(unsigned int - - if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { - if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, -- &dynirq_chip)) < 0) -+ &dynirq_chip, false)) < 0) - goto out; - - bind_virq.virq = virq; -@@ -553,6 +603,7 @@ static int bind_virq_to_irq(unsigned int - return irq; - } - -+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) - static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) - { - struct evtchn_bind_ipi bind_ipi; -@@ -563,7 +614,7 @@ static int bind_ipi_to_irq(unsigned int - - if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) { - if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, -- &dynirq_chip)) < 0) -+ &dynirq_chip, false)) < 0) - goto out; - - bind_ipi.vcpu = cpu; -@@ -587,6 +638,7 @@ static int bind_ipi_to_irq(unsigned int - spin_unlock(&irq_mapping_update_lock); - return irq; - } -+#endif - - static void unbind_from_irq(unsigned int irq) - { -@@ -594,6 +646,7 @@ static void unbind_from_irq(unsigned int - struct irq_cfg *cfg = irq_cfg(irq); - int evtchn = evtchn_from_irq_cfg(cfg); - -+ BUG_IF_IPI(cfg); - spin_lock(&irq_mapping_update_lock); - - if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) { -@@ -606,10 +659,12 @@ static void unbind_from_irq(unsigned int - per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) - [index_from_irq_cfg(cfg)] = -1; - break; -+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) - case IRQT_IPI: - per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) - [index_from_irq_cfg(cfg)] = -1; - break; -+#endif - default: - break; - } -@@ -636,6 +691,46 @@ static void unbind_from_irq(unsigned int - spin_unlock(&irq_mapping_update_lock); - } - -+#ifndef PER_CPU_IPI_IRQ -+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu) -+{ -+ struct evtchn_close close; -+ struct irq_data *data = irq_get_irq_data(irq); -+ struct irq_cfg *cfg = irq_data_cfg(data); -+ int evtchn = evtchn_from_per_cpu_irq(cfg, cpu); -+ -+ spin_lock(&irq_mapping_update_lock); -+ -+ if (VALID_EVTCHN(evtchn)) { -+ mask_evtchn(evtchn); -+ -+ BUG_ON(cfg->bindcount <= 1); -+ cfg->bindcount--; -+ cpumask_clear_cpu(cpu, data->affinity); -+ -+ close.port = evtchn; -+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) -+ BUG(); -+ -+ switch (type_from_irq_cfg(cfg)) { -+ case IRQT_IPI: -+ per_cpu(ipi_evtchn, cpu) = 0; -+ break; -+ default: -+ BUG(); -+ break; -+ } -+ -+ /* Closed ports are implicitly re-bound to VCPU0. */ -+ bind_evtchn_to_cpu(evtchn, 0); -+ -+ evtchn_to_irq[evtchn] = -1; -+ } -+ -+ spin_unlock(&irq_mapping_update_lock); -+} -+#endif /* !PER_CPU_IPI_IRQ */ -+ - int bind_caller_port_to_irqhandler( - unsigned int caller_port, - irq_handler_t handler, -@@ -730,6 +825,8 @@ int bind_virq_to_irqhandler( - } - EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); - -+#ifdef CONFIG_SMP -+#ifdef PER_CPU_IPI_IRQ - int bind_ipi_to_irqhandler( - unsigned int ipi, - unsigned int cpu, -@@ -753,7 +850,71 @@ int bind_ipi_to_irqhandler( - - return irq; - } --EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler); -+#else -+int __cpuinit bind_ipi_to_irqaction( -+ unsigned int cpu, -+ struct irqaction *action) -+{ -+ struct evtchn_bind_ipi bind_ipi; -+ struct irq_cfg *cfg; -+ int evtchn, retval = 0; -+ -+ spin_lock(&irq_mapping_update_lock); -+ -+ if (VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) { -+ spin_unlock(&irq_mapping_update_lock); -+ return -EBUSY; -+ } -+ -+ if (ipi_irq < 0) { -+ if ((ipi_irq = find_unbound_irq(cpu_to_node(cpu), &cfg, -+ &dynirq_chip, true)) < 0) { -+ spin_unlock(&irq_mapping_update_lock); -+ return ipi_irq; -+ } -+ -+ /* Extra reference so count will never drop to zero. */ -+ cfg->bindcount++; -+ -+ cfg->info = mk_irq_info(IRQT_IPI, 0, 0); -+ retval = 1; -+ } else -+ cfg = irq_cfg(ipi_irq); -+ -+ bind_ipi.vcpu = cpu; -+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi)) -+ BUG(); -+ -+ evtchn = bind_ipi.port; -+ evtchn_to_irq[evtchn] = ipi_irq; -+ per_cpu(ipi_evtchn, cpu) = evtchn; -+ -+ bind_evtchn_to_cpu(evtchn, cpu); -+ -+ cfg->bindcount++; -+ -+ spin_unlock(&irq_mapping_update_lock); -+ -+ if (retval == 0) { -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ unmask_evtchn(evtchn); -+ local_irq_restore(flags); -+ } else { -+ action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND; -+ retval = setup_irq(ipi_irq, action); -+ if (retval) { -+ unbind_from_per_cpu_irq(ipi_irq, cpu); -+ BUG_ON(retval > 0); -+ ipi_irq = retval; -+ } -+ } -+ -+ return ipi_irq; -+} -+#endif /* PER_CPU_IPI_IRQ */ -+#endif /* CONFIG_SMP */ - - void unbind_from_irqhandler(unsigned int irq, void *dev_id) - { -@@ -777,8 +938,10 @@ void rebind_evtchn_to_cpu(int port, unsi - - static void rebind_irq_to_cpu(struct irq_data *data, unsigned int tcpu) - { -- int evtchn = evtchn_from_irq_data(data); -+ const struct irq_cfg *cfg = irq_data_cfg(data); -+ int evtchn = evtchn_from_irq_cfg(cfg); - -+ BUG_IF_IPI(cfg); - if (VALID_EVTCHN(evtchn)) - rebind_evtchn_to_cpu(evtchn, tcpu); - } -@@ -1031,10 +1194,47 @@ int irq_ignore_unhandled(unsigned int ir - return !!(irq_status.flags & XENIRQSTAT_shared); - } - -+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ) -+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu) -+{ -+ int evtchn = per_cpu(ipi_evtchn, cpu); -+ -+#ifdef NMI_VECTOR -+ if (ipi == NMI_VECTOR) { -+ static int __read_mostly printed; -+ int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); -+ -+ if (rc && !printed) -+ pr_warning("Unable (%d) to send NMI to CPU#%u\n", -+ printed = rc, cpu); -+ return; -+ } -+#endif -+ -+ if (VALID_EVTCHN(evtchn) -+ && !test_and_set_bit(ipi, per_cpu(ipi_pending, cpu)) -+ && !test_evtchn(evtchn)) -+ notify_remote_via_evtchn(evtchn); -+} -+ -+void clear_ipi_evtchn(void) -+{ -+ int evtchn = percpu_read(ipi_evtchn); -+ -+ BUG_ON(!VALID_EVTCHN(evtchn)); -+ clear_evtchn(evtchn); -+} -+#endif -+ - void notify_remote_via_irq(int irq) - { -- int evtchn = evtchn_from_irq(irq); -+ const struct irq_cfg *cfg = irq_cfg(irq); -+ int evtchn; - -+ if (WARN_ON_ONCE(!cfg)) -+ return; -+ BUG_IF_IPI(cfg); -+ evtchn = evtchn_from_irq_cfg(cfg); - if (VALID_EVTCHN(evtchn)) - notify_remote_via_evtchn(evtchn); - } -@@ -1042,7 +1242,12 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq) - - int irq_to_evtchn_port(int irq) - { -- return evtchn_from_irq(irq); -+ const struct irq_cfg *cfg = irq_cfg(irq); -+ -+ if (!cfg) -+ return 0; -+ BUG_IF_IPI(cfg); -+ return evtchn_from_irq_cfg(cfg); - } - EXPORT_SYMBOL_GPL(irq_to_evtchn_port); - -@@ -1130,12 +1335,22 @@ static void restore_cpu_virqs(unsigned i - - static void restore_cpu_ipis(unsigned int cpu) - { -+#ifdef CONFIG_SMP - struct evtchn_bind_ipi bind_ipi; -- int ipi, irq, evtchn; -+ int evtchn; -+#ifdef PER_CPU_IPI_IRQ -+ int ipi, irq; - - for (ipi = 0; ipi < NR_IPIS; ipi++) { - if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) - continue; -+#else -+#define ipi 0 -+#define irq ipi_irq -+ if (irq == -1 -+ || !VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) -+ return; -+#endif - - BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_IPI, ipi, 0)); - -@@ -1148,13 +1363,23 @@ static void restore_cpu_ipis(unsigned in - - /* Record the new mapping. */ - evtchn_to_irq[evtchn] = irq; -+#ifdef PER_CPU_IPI_IRQ - irq_cfg(irq)->info = mk_irq_info(IRQT_IPI, ipi, evtchn); -+#else -+ per_cpu(ipi_evtchn, cpu) = evtchn; -+#endif - bind_evtchn_to_cpu(evtchn, cpu); - - /* Ready for use. */ - if (!(irq_to_desc(irq)->status & IRQ_DISABLED)) - unmask_evtchn(evtchn); -+#ifdef PER_CPU_IPI_IRQ - } -+#else -+#undef irq -+#undef ipi -+#endif -+#endif /* CONFIG_SMP */ - } - - static int evtchn_resume(struct sys_device *dev) -@@ -1358,7 +1583,8 @@ int evtchn_map_pirq(int irq, int xen_pir - struct irq_cfg *cfg; - - spin_lock(&irq_mapping_update_lock); -- irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip); -+ irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip, -+ false); - if (irq >= 0) { - BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND); - cfg->bindcount++; ---- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:13:04.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-03-03 16:14:20.000000000 +0100 -@@ -36,19 +36,7 @@ cpumask_var_t vcpu_initialized_mask; - DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); - EXPORT_PER_CPU_SYMBOL(cpu_info); - --static DEFINE_PER_CPU(int, resched_irq); --static DEFINE_PER_CPU(int, callfunc_irq); --static DEFINE_PER_CPU(int, call1func_irq); --static DEFINE_PER_CPU(int, reboot_irq); --static char resched_name[NR_CPUS][15]; --static char callfunc_name[NR_CPUS][15]; --static char call1func_name[NR_CPUS][15]; --static char reboot_name[NR_CPUS][15]; -- --#ifdef CONFIG_IRQ_WORK --static DEFINE_PER_CPU(int, irq_work_irq); --static char irq_work_name[NR_CPUS][15]; --#endif -+static int __read_mostly ipi_irq = -1; - - void __init prefill_possible_map(void) - { -@@ -75,76 +63,59 @@ void __init prefill_possible_map(void) - ++total_cpus; - } - --static int __cpuinit xen_smp_intr_init(unsigned int cpu) -+static irqreturn_t ipi_interrupt(int irq, void *dev_id) - { -- int rc; -- -- per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -+ static void (*const handlers[])(struct pt_regs *) = { -+ [RESCHEDULE_VECTOR] = smp_reschedule_interrupt, -+ [CALL_FUNCTION_VECTOR] = smp_call_function_interrupt, -+ [CALL_FUNC_SINGLE_VECTOR] = smp_call_function_single_interrupt, -+ [REBOOT_VECTOR] = smp_reboot_interrupt, - #ifdef CONFIG_IRQ_WORK -- per_cpu(irq_work_irq, cpu) = -+ [IRQ_WORK_VECTOR] = smp_irq_work_interrupt, - #endif -- per_cpu(call1func_irq, cpu) = per_cpu(reboot_irq, cpu) = -1; -- -- sprintf(resched_name[cpu], "resched%u", cpu); -- rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, -- cpu, -- smp_reschedule_interrupt, -- IRQF_DISABLED|IRQF_NOBALANCING, -- resched_name[cpu], -- NULL); -- if (rc < 0) -- goto fail; -- per_cpu(resched_irq, cpu) = rc; -- -- sprintf(callfunc_name[cpu], "callfunc%u", cpu); -- rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR, -- cpu, -- smp_call_function_interrupt, -- IRQF_DISABLED|IRQF_NOBALANCING, -- callfunc_name[cpu], -- NULL); -- if (rc < 0) -- goto fail; -- per_cpu(callfunc_irq, cpu) = rc; -- -- sprintf(call1func_name[cpu], "call1func%u", cpu); -- rc = bind_ipi_to_irqhandler(CALL_FUNC_SINGLE_VECTOR, -- cpu, -- smp_call_function_single_interrupt, -- IRQF_DISABLED|IRQF_NOBALANCING, -- call1func_name[cpu], -- NULL); -- if (rc < 0) -- goto fail; -- per_cpu(call1func_irq, cpu) = rc; -+ }; -+ unsigned long *pending = __get_cpu_var(ipi_pending); -+ struct pt_regs *regs = get_irq_regs(); -+ irqreturn_t ret = IRQ_NONE; -+ -+ for (;;) { -+ unsigned int ipi = find_first_bit(pending, NR_IPIS); -+ -+ if (ipi >= NR_IPIS) { -+ clear_ipi_evtchn(); -+ ipi = find_first_bit(pending, NR_IPIS); -+ } -+ if (ipi >= NR_IPIS) -+ return ret; -+ ret = IRQ_HANDLED; -+ do { -+ clear_bit(ipi, pending); -+ handlers[ipi](regs); -+ ipi = find_next_bit(pending, NR_IPIS, ipi); -+ } while (ipi < NR_IPIS); -+ } -+} - -- sprintf(reboot_name[cpu], "reboot%u", cpu); -- rc = bind_ipi_to_irqhandler(REBOOT_VECTOR, -- cpu, -- smp_reboot_interrupt, -- IRQF_DISABLED|IRQF_NOBALANCING, -- reboot_name[cpu], -- NULL); -- if (rc < 0) -- goto fail; -- per_cpu(reboot_irq, cpu) = rc; -+static int __cpuinit xen_smp_intr_init(unsigned int cpu) -+{ -+ static struct irqaction ipi_action = { -+ .handler = ipi_interrupt, -+ .flags = IRQF_DISABLED, -+ .name = "ipi" -+ }; -+ int rc; - --#ifdef CONFIG_IRQ_WORK -- sprintf(irq_work_name[cpu], "irqwork%u", cpu); -- rc = bind_ipi_to_irqhandler(IRQ_WORK_VECTOR, -- cpu, -- smp_irq_work_interrupt, -- IRQF_DISABLED|IRQF_NOBALANCING, -- irq_work_name[cpu], -- NULL); -+ rc = bind_ipi_to_irqaction(cpu, &ipi_action); - if (rc < 0) -- goto fail; -- per_cpu(irq_work_irq, cpu) = rc; --#endif -+ return rc; -+ if (ipi_irq < 0) -+ ipi_irq = rc; -+ else -+ BUG_ON(ipi_irq != rc); - - rc = xen_spinlock_init(cpu); - if (rc < 0) -- goto fail; -+ goto unbind_ipi; - - if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0)) - goto fail; -@@ -152,19 +123,9 @@ static int __cpuinit xen_smp_intr_init(u - return 0; - - fail: -- if (per_cpu(resched_irq, cpu) >= 0) -- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); -- if (per_cpu(callfunc_irq, cpu) >= 0) -- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); -- if (per_cpu(call1func_irq, cpu) >= 0) -- unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); -- if (per_cpu(reboot_irq, cpu) >= 0) -- unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL); --#ifdef CONFIG_IRQ_WORK -- if (per_cpu(irq_work_irq, cpu) >= 0) -- unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL); --#endif - xen_spinlock_cleanup(cpu); -+ unbind_ipi: -+ unbind_from_per_cpu_irq(ipi_irq, cpu); - return rc; - } - -@@ -174,13 +135,7 @@ static void __cpuinit xen_smp_intr_exit( - if (cpu != 0) - local_teardown_timer(cpu); - -- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); -- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); -- unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); -- unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL); --#ifdef CONFIG_IRQ_WORK -- unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL); --#endif -+ unbind_from_per_cpu_irq(ipi_irq, cpu); - xen_spinlock_cleanup(cpu); - } - #endif ---- head-2011-02-17.orig/include/xen/evtchn.h 2010-11-23 15:07:01.000000000 +0100 -+++ head-2011-02-17/include/xen/evtchn.h 2011-02-02 15:09:43.000000000 +0100 -@@ -94,6 +94,8 @@ int bind_virq_to_irqhandler( - unsigned long irqflags, - const char *devname, - void *dev_id); -+#if defined(CONFIG_SMP) && !defined(MODULE) -+#ifndef CONFIG_X86 - int bind_ipi_to_irqhandler( - unsigned int ipi, - unsigned int cpu, -@@ -101,6 +103,13 @@ int bind_ipi_to_irqhandler( - unsigned long irqflags, - const char *devname, - void *dev_id); -+#else -+int bind_ipi_to_irqaction( -+ unsigned int cpu, -+ struct irqaction *action); -+DECLARE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending); -+#endif -+#endif - - /* - * Common unbind function for all event sources. Takes IRQ to unbind from. -@@ -109,6 +118,11 @@ int bind_ipi_to_irqhandler( - */ - void unbind_from_irqhandler(unsigned int irq, void *dev_id); - -+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) -+/* Specialized unbind function for per-CPU IRQs. */ -+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu); -+#endif -+ - #ifndef CONFIG_XEN - void irq_resume(void); - #endif -@@ -180,5 +194,10 @@ int xen_test_irq_pending(int irq); - void notify_remote_via_irq(int irq); - int irq_to_evtchn_port(int irq); - -+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) -+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu); -+void clear_ipi_evtchn(void); -+#endif -+ - #endif /* __ASM_EVTCHN_H__ */ - #endif /* CONFIG_PARAVIRT_XEN */ diff --git a/patches.xen/xen-kconfig-compat b/patches.xen/xen-kconfig-compat deleted file mode 100644 index d2d9ae8..0000000 --- a/patches.xen/xen-kconfig-compat +++ /dev/null @@ -1,40 +0,0 @@ -From: jbeulich@novell.com -Subject: add backward-compatibility configure options -Patch-mainline: n/a - ---- head-2011-01-30.orig/drivers/xen/Kconfig 2010-11-26 13:37:36.000000000 +0100 -+++ head-2011-01-30/drivers/xen/Kconfig 2011-02-03 14:48:57.000000000 +0100 -@@ -317,6 +317,21 @@ choice - config XEN_COMPAT_030100_AND_LATER - bool "3.1.0 and later" - -+ config XEN_COMPAT_030200_AND_LATER -+ bool "3.2.0 and later" -+ -+ config XEN_COMPAT_030300_AND_LATER -+ bool "3.3.0 and later" -+ -+ config XEN_COMPAT_030400_AND_LATER -+ bool "3.4.0 and later" -+ -+ config XEN_COMPAT_040000_AND_LATER -+ bool "4.0.0 and later" -+ -+ config XEN_COMPAT_040100_AND_LATER -+ bool "4.1.0 and later" -+ - config XEN_COMPAT_LATEST_ONLY - bool "no compatibility code" - -@@ -325,6 +340,11 @@ endchoice - config XEN_COMPAT - hex - default 0xffffff if XEN_COMPAT_LATEST_ONLY -+ default 0x040100 if XEN_COMPAT_040100_AND_LATER -+ default 0x040000 if XEN_COMPAT_040000_AND_LATER -+ default 0x030400 if XEN_COMPAT_030400_AND_LATER -+ default 0x030300 if XEN_COMPAT_030300_AND_LATER -+ default 0x030200 if XEN_COMPAT_030200_AND_LATER - default 0x030100 if XEN_COMPAT_030100_AND_LATER - default 0x030004 if XEN_COMPAT_030004_AND_LATER - default 0x030002 if XEN_COMPAT_030002_AND_LATER diff --git a/patches.xen/xen-kzalloc b/patches.xen/xen-kzalloc deleted file mode 100644 index 1d5587c..0000000 --- a/patches.xen/xen-kzalloc +++ /dev/null @@ -1,186 +0,0 @@ -From: jbeulich@novell.com -Subject: use kzalloc() in favor of kmalloc()+memset() -Patch-mainline: n/a - -Also use clear_page() in favor of memset(, 0, PAGE_SIZE). - ---- head-2011-02-17.orig/arch/x86/mm/init_32-xen.c 2011-02-02 15:07:16.000000000 +0100 -+++ head-2011-02-17/arch/x86/mm/init_32-xen.c 2011-02-02 15:10:16.000000000 +0100 -@@ -724,7 +724,7 @@ unsigned long __init extend_init_mapping - if (pmd_none(*pmd)) { - unsigned long pa = start_pfn++ << PAGE_SHIFT; - -- memset(__va(pa), 0, PAGE_SIZE); -+ clear_page(__va(pa)); - make_lowmem_page_readonly(__va(pa), - XENFEAT_writable_page_tables); - xen_l2_entry_update(pmd, __pmd(pa | _KERNPG_TABLE)); ---- head-2011-02-17.orig/arch/x86/mm/init_64-xen.c 2011-02-02 15:07:16.000000000 +0100 -+++ head-2011-02-17/arch/x86/mm/init_64-xen.c 2011-02-02 15:10:16.000000000 +0100 -@@ -213,7 +213,7 @@ static __ref void *spp_getpage(void) - else if (e820_table_end < e820_table_top) { - ptr = __va(e820_table_end << PAGE_SHIFT); - e820_table_end++; -- memset(ptr, 0, PAGE_SIZE); -+ clear_page(ptr); - } else - ptr = alloc_bootmem_pages(PAGE_SIZE); - ---- head-2011-02-17.orig/drivers/xen/blkback/blkback.c 2011-02-28 14:23:53.000000000 +0100 -+++ head-2011-02-17/drivers/xen/blkback/blkback.c 2011-02-28 14:26:29.000000000 +0100 -@@ -639,7 +639,7 @@ static int __init blkif_init(void) - - mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; - -- pending_reqs = kmalloc(sizeof(pending_reqs[0]) * -+ pending_reqs = kzalloc(sizeof(pending_reqs[0]) * - blkif_reqs, GFP_KERNEL); - pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) * - mmap_pages, GFP_KERNEL); -@@ -656,7 +656,6 @@ static int __init blkif_init(void) - - blkif_interface_init(); - -- memset(pending_reqs, 0, sizeof(pending_reqs)); - INIT_LIST_HEAD(&pending_free); - - for (i = 0; i < blkif_reqs; i++) ---- head-2011-02-17.orig/drivers/xen/core/gnttab.c 2010-09-23 17:06:35.000000000 +0200 -+++ head-2011-02-17/drivers/xen/core/gnttab.c 2011-02-02 15:10:16.000000000 +0100 -@@ -546,7 +546,7 @@ int gnttab_copy_grant_page(grant_ref_t r - - new_addr = page_address(new_page); - addr = page_address(page); -- memcpy(new_addr, addr, PAGE_SIZE); -+ copy_page(new_addr, addr); - - pfn = page_to_pfn(page); - mfn = pfn_to_mfn(pfn); ---- head-2011-02-17.orig/drivers/xen/core/machine_reboot.c 2011-02-02 15:09:52.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/machine_reboot.c 2011-02-02 15:10:16.000000000 +0100 -@@ -97,7 +97,7 @@ static void post_suspend(int suspend_can - BUG(); - HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); - -- memset(empty_zero_page, 0, PAGE_SIZE); -+ clear_page(empty_zero_page); - - fpp = PAGE_SIZE/sizeof(unsigned long); - for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) { ---- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-02-07 12:28:20.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-02-02 15:10:16.000000000 +0100 -@@ -176,17 +176,12 @@ static void __cpuinit cpu_initialize_con - ctxt.flags = VGCF_IN_KERNEL; - ctxt.user_regs.ds = __USER_DS; - ctxt.user_regs.es = __USER_DS; -- ctxt.user_regs.fs = 0; -- ctxt.user_regs.gs = 0; - ctxt.user_regs.ss = __KERNEL_DS; - ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle; - ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */ - -- memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt)); -- - smp_trap_init(ctxt.trap_ctxt); - -- ctxt.ldt_ents = 0; - ctxt.gdt_frames[0] = arbitrary_virt_to_mfn(get_cpu_gdt_table(cpu)); - ctxt.gdt_ents = GDT_SIZE / 8; - ---- head-2011-02-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:34:28.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/interface.c 2011-02-17 10:36:11.000000000 +0100 -@@ -270,7 +270,6 @@ netif_t *netif_alloc(struct device *pare - SET_NETDEV_DEV(dev, parent); - - netif = netdev_priv(dev); -- memset(netif, 0, sizeof(*netif)); - netif->domid = domid; - netif->group = UINT_MAX; - netif->handle = handle; ---- head-2011-02-17.orig/drivers/xen/scsiback/emulate.c 2011-02-08 10:04:09.000000000 +0100 -+++ head-2011-02-17/drivers/xen/scsiback/emulate.c 2011-02-08 10:45:57.000000000 +0100 -@@ -243,13 +243,11 @@ static void __report_luns(pending_req_t - alloc_len = sizeof(struct scsi_lun) * alloc_luns - + VSCSI_REPORT_LUNS_HEADER; - retry: -- if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) { -+ if ((buff = kzalloc(alloc_len, GFP_KERNEL)) == NULL) { - pr_err("scsiback:%s kmalloc err\n", __FUNCTION__); - goto fail; - } - -- memset(buff, 0, alloc_len); -- - one_lun = (struct scsi_lun *) &buff[8]; - spin_lock_irqsave(&info->v2p_lock, flags); - list_for_each_entry(entry, head, l) { ---- head-2011-02-17.orig/drivers/xen/scsiback/scsiback.c 2011-02-28 14:51:23.000000000 +0100 -+++ head-2011-02-17/drivers/xen/scsiback/scsiback.c 2011-02-28 14:53:21.000000000 +0100 -@@ -668,7 +668,7 @@ static int __init scsiback_init(void) - - mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE; - -- pending_reqs = kmalloc(sizeof(pending_reqs[0]) * -+ pending_reqs = kzalloc(sizeof(pending_reqs[0]) * - vscsiif_reqs, GFP_KERNEL); - pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) * - mmap_pages, GFP_KERNEL); -@@ -683,7 +683,6 @@ static int __init scsiback_init(void) - if (scsiback_interface_init() < 0) - goto out_of_kmem; - -- memset(pending_reqs, 0, sizeof(pending_reqs)); - INIT_LIST_HEAD(&pending_free); - - for (i = 0; i < vscsiif_reqs; i++) ---- head-2011-02-17.orig/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2011-02-01 15:03:03.000000000 +0100 -+++ head-2011-02-17/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2011-02-02 15:10:16.000000000 +0100 -@@ -77,7 +77,7 @@ int cuckoo_hash_init(cuckoo_hash_table * - BUG_ON(length_bits >= sizeof(unsigned) * 8); - BUG_ON(key_length > sizeof(cuckoo_hash_key)); - -- table_mem = kmalloc(sizeof(cuckoo_hash_entry) * 2 * length, GFP_KERNEL); -+ table_mem = kzalloc(sizeof(cuckoo_hash_entry) * 2 * length, GFP_KERNEL); - - if (table_mem == NULL) - return -ENOMEM; -@@ -93,9 +93,6 @@ int cuckoo_hash_init(cuckoo_hash_table * - - set_hash_parameters(hashtab); - -- /* Zero the table */ -- memset(hashtab->table0, 0, length * 2 * sizeof(cuckoo_hash_entry)); -- - return 0; - } - EXPORT_SYMBOL_GPL(cuckoo_hash_init); ---- head-2011-02-17.orig/drivers/xen/tpmback/interface.c 2011-01-31 17:56:27.000000000 +0100 -+++ head-2011-02-17/drivers/xen/tpmback/interface.c 2011-02-02 15:10:16.000000000 +0100 -@@ -128,7 +128,7 @@ int tpmif_map(tpmif_t *tpmif, unsigned l - } - - tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr; -- memset(tpmif->tx, 0, PAGE_SIZE); -+ clear_page(tpmif->tx); - - err = bind_interdomain_evtchn_to_irqhandler( - tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif); ---- head-2011-02-17.orig/drivers/xen/usbback/usbback.c 2011-02-01 15:03:10.000000000 +0100 -+++ head-2011-02-17/drivers/xen/usbback/usbback.c 2011-02-02 15:10:16.000000000 +0100 -@@ -1140,7 +1140,7 @@ static int __init usbback_init(void) - return -ENODEV; - - mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST; -- pending_reqs = kmalloc(sizeof(pending_reqs[0]) * -+ pending_reqs = kzalloc(sizeof(pending_reqs[0]) * - usbif_reqs, GFP_KERNEL); - pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) * - mmap_pages, GFP_KERNEL); -@@ -1154,7 +1154,6 @@ static int __init usbback_init(void) - for (i = 0; i < mmap_pages; i++) - pending_grant_handles[i] = USBBACK_INVALID_HANDLE; - -- memset(pending_reqs, 0, sizeof(pending_reqs)); - INIT_LIST_HEAD(&pending_free); - - for (i = 0; i < usbif_reqs; i++) diff --git a/patches.xen/xen-mem-hotplug b/patches.xen/xen-mem-hotplug deleted file mode 100644 index 07fd402..0000000 --- a/patches.xen/xen-mem-hotplug +++ /dev/null @@ -1,285 +0,0 @@ -From: Jiang, Yunhong -Subject: xen/acpi: Add memory hotadd to pvops dom0 -References: bnc#651066 -Patch-mainline: n/a - -When memory hotadd event happen, a Xen hook will be called, to notify -hypervisor of the new added memory. - -Because xen hypervisor will use the new memory to setup frametable/m2p -table, so dom0 will always return success to acpi bios, and notify xen -hypervisor later. - -It add a hook in driver/acpi/acpi_memhotplug.c, but that change is quite -small, not sure if it is acceptable. Other method is to provide a xen -specific acpi_memory_device_driver, but I'm not sure if it worth to add -so much changes, to simply avoid two hooks. - -jb: Integrate into base module; cleanup. -Acked-by: jbeulich@novell.com - ---- head-2011-01-30.orig/drivers/acpi/Kconfig 2011-02-01 14:42:26.000000000 +0100 -+++ head-2011-01-30/drivers/acpi/Kconfig 2011-02-02 15:10:05.000000000 +0100 -@@ -349,7 +349,7 @@ config ACPI_CONTAINER - - config ACPI_HOTPLUG_MEMORY - tristate "Memory Hotplug" -- depends on MEMORY_HOTPLUG -+ depends on MEMORY_HOTPLUG || XEN_PRIVILEGED_GUEST - default n - help - This driver supports ACPI memory hotplug. The driver ---- head-2011-01-30.orig/drivers/acpi/acpi_memhotplug.c 2010-05-16 23:17:36.000000000 +0200 -+++ head-2011-01-30/drivers/acpi/acpi_memhotplug.c 2011-02-02 15:10:06.000000000 +0100 -@@ -88,6 +88,14 @@ struct acpi_memory_device { - - static int acpi_hotmem_initialized; - -+#ifdef CONFIG_XEN -+#include "../xen/core/acpi_memhotplug.c" -+#define memory_add_physaddr_to_nid(start) 0 -+#else -+static inline int xen_hotadd_mem_init(void) { return 0; } -+static inline void xen_hotadd_mem_exit(void) {} -+#endif -+ - static acpi_status - acpi_memory_get_resource(struct acpi_resource *resource, void *context) - { -@@ -229,6 +237,10 @@ static int acpi_memory_enable_device(str - return result; - } - -+#ifdef CONFIG_XEN -+ return xen_hotadd_memory(mem_device); -+#endif -+ - node = acpi_get_node(mem_device->device->handle); - /* - * Tell the VM there is more memory here... -@@ -312,6 +324,10 @@ static int acpi_memory_disable_device(st - struct acpi_memory_info *info, *n; - - -+#ifdef CONFIG_XEN -+ return -EOPNOTSUPP; -+#endif -+ - /* - * Ask the VM to offline this memory range. - * Note: Assume that this function returns zero on success -@@ -531,6 +547,10 @@ static int __init acpi_memory_device_ini - acpi_status status; - - -+ result = xen_hotadd_mem_init(); -+ if (result < 0) -+ return result; -+ - result = acpi_bus_register_driver(&acpi_memory_device_driver); - - if (result < 0) -@@ -570,6 +590,8 @@ static void __exit acpi_memory_device_ex - - acpi_bus_unregister_driver(&acpi_memory_device_driver); - -+ xen_hotadd_mem_exit(); -+ - return; - } - ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-01-30/drivers/xen/core/acpi_memhotplug.c 2011-02-02 15:10:06.000000000 +0100 -@@ -0,0 +1,192 @@ -+/* -+ * xen_acpi_memhotplug.c - interface to notify Xen on memory device hotadd -+ * -+ * Copyright (C) 2008, Intel corporation -+ * -+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or (at -+ * your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -+ * -+ */ -+ -+#include -+#include -+ -+struct xen_hotmem_entry { -+ struct list_head hotmem_list; -+ uint64_t start; -+ uint64_t end; -+ uint32_t flags; -+ uint32_t pxm; -+}; -+ -+struct xen_hotmem_list { -+ struct list_head list; -+ unsigned int entry_nr; -+}; -+ -+static struct xen_hotmem_list xen_hotmem = { -+ .list = LIST_HEAD_INIT(xen_hotmem.list) -+}; -+static DEFINE_SPINLOCK(xen_hotmem_lock); -+ -+static int xen_hyper_addmem(struct xen_hotmem_entry *entry) -+{ -+ xen_platform_op_t op = { -+ .cmd = XENPF_mem_hotadd, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ }; -+ -+ op.u.mem_add.spfn = entry->start >> PAGE_SHIFT; -+ op.u.mem_add.epfn = entry->end >> PAGE_SHIFT; -+ op.u.mem_add.flags = entry->flags; -+ op.u.mem_add.pxm = entry->pxm; -+ -+ return HYPERVISOR_platform_op(&op); -+} -+ -+static int add_hotmem_entry(int pxm, uint64_t start, -+ uint64_t length, uint32_t flags) -+{ -+ struct xen_hotmem_entry *entry; -+ -+ if (pxm < 0 || !length) -+ return -EINVAL; -+ -+ entry = kzalloc(sizeof(struct xen_hotmem_entry), GFP_ATOMIC); -+ if (!entry) -+ return -ENOMEM; -+ -+ INIT_LIST_HEAD(&entry->hotmem_list); -+ entry->start = start; -+ entry->end = start + length; -+ entry->flags = flags; -+ entry->pxm = pxm; -+ -+ spin_lock(&xen_hotmem_lock); -+ -+ list_add_tail(&entry->hotmem_list, &xen_hotmem.list); -+ xen_hotmem.entry_nr++; -+ -+ spin_unlock(&xen_hotmem_lock); -+ -+ return 0; -+} -+ -+static int free_hotmem_entry(struct xen_hotmem_entry *entry) -+{ -+ list_del(&entry->hotmem_list); -+ kfree(entry); -+ -+ return 0; -+} -+ -+static void xen_hotadd_mem_dpc(struct work_struct *work) -+{ -+ struct list_head *elem, *tmp; -+ struct xen_hotmem_entry *entry; -+ unsigned long flags; -+ int ret; -+ -+ spin_lock_irqsave(&xen_hotmem_lock, flags); -+ list_for_each_safe(elem, tmp, &xen_hotmem.list) { -+ entry = list_entry(elem, struct xen_hotmem_entry, hotmem_list); -+ ret = xen_hyper_addmem(entry); -+ if (ret) -+ pr_warning("xen addmem failed with %x\n", ret); -+ free_hotmem_entry(entry); -+ xen_hotmem.entry_nr--; -+ } -+ spin_unlock_irqrestore(&xen_hotmem_lock, flags); -+} -+ -+static DECLARE_WORK(xen_hotadd_mem_work, xen_hotadd_mem_dpc); -+ -+static int xen_acpi_get_pxm(acpi_handle h) -+{ -+ unsigned long long pxm; -+ acpi_status status; -+ acpi_handle handle; -+ acpi_handle phandle = h; -+ -+ do { -+ handle = phandle; -+ status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); -+ if (ACPI_SUCCESS(status)) -+ return pxm; -+ status = acpi_get_parent(handle, &phandle); -+ } while (ACPI_SUCCESS(status)); -+ -+ return -1; -+} -+ -+static int xen_hotadd_memory(struct acpi_memory_device *mem_device) -+{ -+ int pxm, result; -+ int num_enabled = 0; -+ struct acpi_memory_info *info; -+ -+ if (!mem_device) -+ return -EINVAL; -+ -+ pxm = xen_acpi_get_pxm(mem_device->device->handle); -+ -+ if (pxm < 0) -+ return -EINVAL; -+ -+ /* -+ * Always return success to ACPI driver, and notify hypervisor later -+ * because hypervisor will utilize the memory in memory hotadd hypercall -+ */ -+ list_for_each_entry(info, &mem_device->res_list, list) { -+ if (info->enabled) { /* just sanity check...*/ -+ num_enabled++; -+ continue; -+ } -+ /* -+ * If the memory block size is zero, please ignore it. -+ * Don't try to do the following memory hotplug flowchart. -+ */ -+ if (!info->length) -+ continue; -+ -+ result = add_hotmem_entry(pxm, info->start_addr, -+ info->length, 0); -+ if (result) -+ continue; -+ info->enabled = 1; -+ num_enabled++; -+ } -+ -+ if (!num_enabled) -+ return -EINVAL; -+ -+ schedule_work(&xen_hotadd_mem_work); -+ -+ return 0; -+} -+ -+static int xen_hotadd_mem_init(void) -+{ -+ if (!is_initial_xendomain()) -+ return -ENODEV; -+ -+ return 0; -+} -+ -+static void xen_hotadd_mem_exit(void) -+{ -+ flush_scheduled_work(); -+} diff --git a/patches.xen/xen-netback-generalize b/patches.xen/xen-netback-generalize deleted file mode 100644 index 6f84c6d..0000000 --- a/patches.xen/xen-netback-generalize +++ /dev/null @@ -1,1317 +0,0 @@ -From: Dongxiao Xu -Subject: [PATCH 1/3] Netback: Generalize static/global variables into 'struct xen_netbk'. -Patch-mainline: n/a - - Put all the static/global variables in netback.c into xen_netback - structure. Do some preparations for the support of netback multiple - threads. - -Signed-off-by: Dongxiao Xu - -jb: various cleanups -Acked-by: jbeulich@novell.com - ---- head-2011-02-17.orig/drivers/xen/netback/common.h 2011-01-31 17:56:27.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/common.h 2011-02-17 10:33:48.000000000 +0100 -@@ -219,4 +219,74 @@ static inline int netbk_can_sg(struct ne - return netif->can_sg; - } - -+struct pending_tx_info { -+ netif_tx_request_t req; -+ netif_t *netif; -+}; -+typedef unsigned int pending_ring_idx_t; -+ -+struct netbk_rx_meta { -+ skb_frag_t frag; -+ int id; -+ u8 copy:1; -+}; -+ -+struct netbk_tx_pending_inuse { -+ struct list_head list; -+ unsigned long alloc_time; -+}; -+ -+#define MAX_PENDING_REQS (1U << CONFIG_XEN_NETDEV_TX_SHIFT) -+#define MAX_MFN_ALLOC 64 -+ -+struct xen_netbk { -+ struct tasklet_struct net_tx_tasklet; -+ struct tasklet_struct net_rx_tasklet; -+ -+ struct sk_buff_head rx_queue; -+ struct sk_buff_head tx_queue; -+ -+ struct timer_list net_timer; -+ struct timer_list tx_pending_timer; -+ -+ pending_ring_idx_t pending_prod; -+ pending_ring_idx_t pending_cons; -+ pending_ring_idx_t dealloc_prod; -+ pending_ring_idx_t dealloc_cons; -+ -+ struct list_head pending_inuse_head; -+ struct list_head schedule_list; -+ -+ spinlock_t schedule_list_lock; -+ spinlock_t release_lock; -+ -+ struct page **mmap_pages; -+ -+ unsigned int alloc_index; -+ -+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; -+ struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS]; -+ struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; -+ struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; -+ -+ grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; -+ u16 pending_ring[MAX_PENDING_REQS]; -+ u16 dealloc_ring[MAX_PENDING_REQS]; -+ -+ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3]; -+ struct mmu_update rx_mmu[NET_RX_RING_SIZE]; -+ struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE]; -+ struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE]; -+ DECLARE_BITMAP(rx_notify, NR_DYNIRQS); -+#if !defined(NR_DYNIRQS) -+# error -+#elif NR_DYNIRQS <= 0x10000 -+ u16 notify_list[NET_RX_RING_SIZE]; -+#else -+ int notify_list[NET_RX_RING_SIZE]; -+#endif -+ struct netbk_rx_meta meta[NET_RX_RING_SIZE]; -+ -+ unsigned long mfn_list[MAX_MFN_ALLOC]; -+}; - #endif /* __NETIF__BACKEND__COMMON_H__ */ ---- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-01-03 13:30:15.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-01 11:53:28.000000000 +0100 -@@ -36,6 +36,7 @@ - - #include "common.h" - #include -+#include - #include - #include - #include -@@ -43,18 +44,12 @@ - - /*define NETBE_DEBUG_INTERRUPT*/ - --struct netbk_rx_meta { -- skb_frag_t frag; -- int id; -- u8 copy:1; --}; -+static struct xen_netbk *__read_mostly xen_netbk; -+static const unsigned int netbk_nr_groups = 1; - --struct netbk_tx_pending_inuse { -- struct list_head list; -- unsigned long alloc_time; --}; -+#define GET_GROUP_INDEX(netif) (0) - --static void netif_idx_release(u16 pending_idx); -+static void netif_idx_release(struct xen_netbk *, u16 pending_idx); - static void make_tx_response(netif_t *netif, - netif_tx_request_t *txp, - s8 st); -@@ -65,47 +60,56 @@ static netif_rx_response_t *make_rx_resp - u16 size, - u16 flags); - --static void net_tx_action(unsigned long unused); --static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0); -- --static void net_rx_action(unsigned long unused); --static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0); -- --static struct timer_list net_timer; --static struct timer_list netbk_tx_pending_timer; -+static void net_tx_action(unsigned long group); -+static void net_rx_action(unsigned long group); - --#define MAX_PENDING_REQS (1U << CONFIG_XEN_NETDEV_TX_SHIFT) -- --static struct sk_buff_head rx_queue; -- --static struct page **mmap_pages; --static inline unsigned long idx_to_pfn(unsigned int idx) -+static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, unsigned int idx) - { -- return page_to_pfn(mmap_pages[idx]); -+ return page_to_pfn(netbk->mmap_pages[idx]); - } - --static inline unsigned long idx_to_kaddr(unsigned int idx) -+static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, unsigned int idx) - { -- return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx)); -+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); - } - - /* extra field used in struct page */ --static inline void netif_set_page_index(struct page *pg, unsigned int index) -+union page_ext { -+ struct { -+#if BITS_PER_LONG < 64 -+#define GROUP_WIDTH (BITS_PER_LONG - CONFIG_XEN_NETDEV_TX_SHIFT) -+#define MAX_GROUPS ((1U << GROUP_WIDTH) - 1) -+ unsigned int grp:GROUP_WIDTH; -+ unsigned int idx:CONFIG_XEN_NETDEV_TX_SHIFT; -+#else -+#define MAX_GROUPS UINT_MAX -+ unsigned int grp, idx; -+#endif -+ } e; -+ void *mapping; -+}; -+ -+static inline void netif_set_page_ext(struct page *pg, unsigned int group, -+ unsigned int idx) - { -- *(unsigned long *)&pg->mapping = index; -+ union page_ext ext = { .e = { .grp = group + 1, .idx = idx } }; -+ -+ BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); -+ pg->mapping = ext.mapping; - } - --static inline int netif_page_index(struct page *pg) -+static inline unsigned int netif_page_group(const struct page *pg) - { -- unsigned long idx = (unsigned long)pg->mapping; -+ union page_ext ext = { .mapping = pg->mapping }; - -- if (!PageForeign(pg)) -- return -1; -+ return ext.e.grp - 1; -+} - -- if ((idx >= MAX_PENDING_REQS) || (mmap_pages[idx] != pg)) -- return -1; -+static inline unsigned int netif_page_index(const struct page *pg) -+{ -+ union page_ext ext = { .mapping = pg->mapping }; - -- return idx; -+ return ext.e.idx; - } - - /* -@@ -117,36 +121,13 @@ static inline int netif_page_index(struc - sizeof(struct iphdr) + MAX_IPOPTLEN + \ - sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) - --static struct pending_tx_info { -- netif_tx_request_t req; -- netif_t *netif; --} pending_tx_info[MAX_PENDING_REQS]; --static u16 pending_ring[MAX_PENDING_REQS]; --typedef unsigned int PEND_RING_IDX; - #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1)) --static PEND_RING_IDX pending_prod, pending_cons; --#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons) -- --/* Freed TX SKBs get batched on this ring before return to pending_ring. */ --static u16 dealloc_ring[MAX_PENDING_REQS]; --static PEND_RING_IDX dealloc_prod, dealloc_cons; - --/* Doubly-linked list of in-use pending entries. */ --static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS]; --static LIST_HEAD(pending_inuse_head); -- --static struct sk_buff_head tx_queue; -- --static grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; --static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS]; --static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS]; -- --static struct list_head net_schedule_list; --static spinlock_t net_schedule_list_lock; -- --#define MAX_MFN_ALLOC 64 --static unsigned long mfn_list[MAX_MFN_ALLOC]; --static unsigned int alloc_index = 0; -+static inline pending_ring_idx_t nr_pending_reqs(const struct xen_netbk *netbk) -+{ -+ return MAX_PENDING_REQS - -+ netbk->pending_prod + netbk->pending_cons; -+} - - /* Setting this allows the safe use of this driver without netloop. */ - static int MODPARM_copy_skb = 1; -@@ -158,13 +139,13 @@ MODULE_PARM_DESC(permute_returns, "Rando - - int netbk_copy_skb_mode; - --static inline unsigned long alloc_mfn(void) -+static inline unsigned long alloc_mfn(struct xen_netbk *netbk) - { -- BUG_ON(alloc_index == 0); -- return mfn_list[--alloc_index]; -+ BUG_ON(netbk->alloc_index == 0); -+ return netbk->mfn_list[--netbk->alloc_index]; - } - --static int check_mfn(int nr) -+static int check_mfn(struct xen_netbk *netbk, unsigned int nr) - { - struct xen_memory_reservation reservation = { - .extent_order = 0, -@@ -172,24 +153,27 @@ static int check_mfn(int nr) - }; - int rc; - -- if (likely(alloc_index >= nr)) -+ if (likely(netbk->alloc_index >= nr)) - return 0; - -- set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index); -- reservation.nr_extents = MAX_MFN_ALLOC - alloc_index; -+ set_xen_guest_handle(reservation.extent_start, -+ netbk->mfn_list + netbk->alloc_index); -+ reservation.nr_extents = MAX_MFN_ALLOC - netbk->alloc_index; - rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation); - if (likely(rc > 0)) -- alloc_index += rc; -+ netbk->alloc_index += rc; - -- return alloc_index >= nr ? 0 : -ENOMEM; -+ return netbk->alloc_index >= nr ? 0 : -ENOMEM; - } - --static inline void maybe_schedule_tx_action(void) -+static inline void maybe_schedule_tx_action(unsigned int group) - { -+ struct xen_netbk *netbk = &xen_netbk[group]; -+ - smp_mb(); -- if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && -- !list_empty(&net_schedule_list)) -- tasklet_schedule(&net_tx_tasklet); -+ if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && -+ !list_empty(&netbk->schedule_list)) -+ tasklet_schedule(&netbk->net_tx_tasklet); - } - - static struct sk_buff *netbk_copy_skb(struct sk_buff *skb) -@@ -298,6 +282,7 @@ static void tx_queue_callback(unsigned l - int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev) - { - netif_t *netif = netdev_priv(dev); -+ struct xen_netbk *netbk; - - BUG_ON(skb->dev != dev); - -@@ -346,8 +331,9 @@ int netif_be_start_xmit(struct sk_buff * - } - } - -- skb_queue_tail(&rx_queue, skb); -- tasklet_schedule(&net_rx_tasklet); -+ netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; -+ skb_queue_tail(&netbk->rx_queue, skb); -+ tasklet_schedule(&netbk->net_rx_tasklet); - - return NETDEV_TX_OK; - -@@ -402,19 +388,29 @@ static u16 netbk_gop_frag(netif_t *netif - multicall_entry_t *mcl; - netif_rx_request_t *req; - unsigned long old_mfn, new_mfn; -- int idx = netif_page_index(page); -+ struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; - - old_mfn = virt_to_mfn(page_address(page)); - - req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i); - if (netif->copying_receiver) { -+ unsigned int group, idx; -+ - /* The fragment needs to be copied rather than - flipped. */ - meta->copy = 1; - copy_gop = npo->copy + npo->copy_prod++; - copy_gop->flags = GNTCOPY_dest_gref; -- if (idx > -1) { -- struct pending_tx_info *src_pend = &pending_tx_info[idx]; -+ if (PageForeign(page) && -+ page->mapping != NULL && -+ (idx = netif_page_index(page)) < MAX_PENDING_REQS && -+ (group = netif_page_group(page)) < netbk_nr_groups) { -+ struct pending_tx_info *src_pend; -+ -+ netbk = &xen_netbk[group]; -+ BUG_ON(netbk->mmap_pages[idx] != page); -+ src_pend = &netbk->pending_tx_info[idx]; -+ BUG_ON(group != GET_GROUP_INDEX(src_pend->netif)); - copy_gop->source.domid = src_pend->netif->domid; - copy_gop->source.u.ref = src_pend->req.gref; - copy_gop->flags |= GNTCOPY_source_gref; -@@ -430,7 +426,7 @@ static u16 netbk_gop_frag(netif_t *netif - } else { - meta->copy = 0; - if (!xen_feature(XENFEAT_auto_translated_physmap)) { -- new_mfn = alloc_mfn(); -+ new_mfn = alloc_mfn(netbk); - - /* - * Set the new P2M table entry before -@@ -570,7 +566,7 @@ static void netbk_add_frag_responses(net - } - } - --static void net_rx_action(unsigned long unused) -+static void net_rx_action(unsigned long group) - { - netif_t *netif = NULL; - s8 status; -@@ -584,47 +580,33 @@ static void net_rx_action(unsigned long - int nr_frags; - int count; - unsigned long offset; -- -- /* -- * Putting hundreds of bytes on the stack is considered rude. -- * Static works because a tasklet can only be on one CPU at any time. -- */ -- static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3]; -- static mmu_update_t rx_mmu[NET_RX_RING_SIZE]; -- static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE]; -- static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE]; -- static DECLARE_BITMAP(rx_notify, NR_DYNIRQS); --#if NR_DYNIRQS <= 0x10000 -- static u16 notify_list[NET_RX_RING_SIZE]; --#else -- static int notify_list[NET_RX_RING_SIZE]; --#endif -- static struct netbk_rx_meta meta[NET_RX_RING_SIZE]; -+ struct xen_netbk *netbk = &xen_netbk[group]; - - struct netrx_pending_operations npo = { -- mmu: rx_mmu, -- trans: grant_trans_op, -- copy: grant_copy_op, -- mcl: rx_mcl, -- meta: meta}; -+ .mmu = netbk->rx_mmu, -+ .trans = netbk->grant_trans_op, -+ .copy = netbk->grant_copy_op, -+ .mcl = netbk->rx_mcl, -+ .meta = netbk->meta, -+ }; - - skb_queue_head_init(&rxq); - - count = 0; - -- while ((skb = skb_dequeue(&rx_queue)) != NULL) { -+ while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { - nr_frags = skb_shinfo(skb)->nr_frags; - *(int *)skb->cb = nr_frags; - - if (!xen_feature(XENFEAT_auto_translated_physmap) && - !((netif_t *)netdev_priv(skb->dev))->copying_receiver && -- check_mfn(nr_frags + 1)) { -+ check_mfn(netbk, nr_frags + 1)) { - /* Memory squeeze? Back off for an arbitrary while. */ - if ( net_ratelimit() ) - WPRINTK("Memory squeeze in netback " - "driver.\n"); -- mod_timer(&net_timer, jiffies + HZ); -- skb_queue_head(&rx_queue, skb); -+ mod_timer(&netbk->net_timer, jiffies + HZ); -+ skb_queue_head(&netbk->rx_queue, skb); - break; - } - -@@ -639,39 +621,39 @@ static void net_rx_action(unsigned long - break; - } - -- BUG_ON(npo.meta_prod > ARRAY_SIZE(meta)); -+ BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); - - npo.mmu_mcl = npo.mcl_prod; - if (npo.mcl_prod) { - BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); -- BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu)); -+ BUG_ON(npo.mmu_prod > ARRAY_SIZE(netbk->rx_mmu)); - mcl = npo.mcl + npo.mcl_prod++; - - BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping); - mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; - - mcl->op = __HYPERVISOR_mmu_update; -- mcl->args[0] = (unsigned long)rx_mmu; -+ mcl->args[0] = (unsigned long)netbk->rx_mmu; - mcl->args[1] = npo.mmu_prod; - mcl->args[2] = 0; - mcl->args[3] = DOMID_SELF; - } - - if (npo.trans_prod) { -- BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op)); -+ BUG_ON(npo.trans_prod > ARRAY_SIZE(netbk->grant_trans_op)); - mcl = npo.mcl + npo.mcl_prod++; - mcl->op = __HYPERVISOR_grant_table_op; - mcl->args[0] = GNTTABOP_transfer; -- mcl->args[1] = (unsigned long)grant_trans_op; -+ mcl->args[1] = (unsigned long)netbk->grant_trans_op; - mcl->args[2] = npo.trans_prod; - } - - if (npo.copy_prod) { -- BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op)); -+ BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); - mcl = npo.mcl + npo.mcl_prod++; - mcl->op = __HYPERVISOR_grant_table_op; - mcl->args[0] = GNTTABOP_copy; -- mcl->args[1] = (unsigned long)grant_copy_op; -+ mcl->args[1] = (unsigned long)netbk->grant_copy_op; - mcl->args[2] = npo.copy_prod; - } - -@@ -679,7 +661,7 @@ static void net_rx_action(unsigned long - if (!npo.mcl_prod) - return; - -- BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl)); -+ BUG_ON(npo.mcl_prod > ARRAY_SIZE(netbk->rx_mcl)); - - ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod); - BUG_ON(ret != 0); -@@ -705,13 +687,13 @@ static void net_rx_action(unsigned long - atomic_set(&(skb_shinfo(skb)->dataref), 1); - skb_shinfo(skb)->frag_list = NULL; - skb_shinfo(skb)->nr_frags = 0; -- netbk_free_pages(nr_frags, meta + npo.meta_cons + 1); -+ netbk_free_pages(nr_frags, netbk->meta + npo.meta_cons + 1); - } - - skb->dev->stats.tx_bytes += skb->len; - skb->dev->stats.tx_packets++; - -- id = meta[npo.meta_cons].id; -+ id = netbk->meta[npo.meta_cons].id; - flags = nr_frags ? NETRXF_more_data : 0; - - switch (skb->ip_summed) { -@@ -723,14 +705,14 @@ static void net_rx_action(unsigned long - break; - } - -- if (meta[npo.meta_cons].copy) -+ if (netbk->meta[npo.meta_cons].copy) - offset = 0; - else - offset = offset_in_page(skb->data); - resp = make_rx_response(netif, id, status, offset, - skb_headlen(skb), flags); - -- if (meta[npo.meta_cons].frag.size) { -+ if (netbk->meta[npo.meta_cons].frag.size) { - struct netif_extra_info *gso = - (struct netif_extra_info *) - RING_GET_RESPONSE(&netif->rx, -@@ -738,7 +720,7 @@ static void net_rx_action(unsigned long - - resp->flags |= NETRXF_extra_info; - -- gso->u.gso.size = meta[npo.meta_cons].frag.size; -+ gso->u.gso.size = netbk->meta[npo.meta_cons].frag.size; - gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; - gso->u.gso.pad = 0; - gso->u.gso.features = 0; -@@ -748,13 +730,13 @@ static void net_rx_action(unsigned long - } - - netbk_add_frag_responses(netif, status, -- meta + npo.meta_cons + 1, -+ netbk->meta + npo.meta_cons + 1, - nr_frags); - - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret); - irq = netif->irq - DYNIRQ_BASE; -- if (ret && !__test_and_set_bit(irq, rx_notify)) -- notify_list[notify_nr++] = irq; -+ if (ret && !__test_and_set_bit(irq, netbk->rx_notify)) -+ netbk->notify_list[notify_nr++] = irq; - - if (netif_queue_stopped(netif->dev) && - netif_schedulable(netif) && -@@ -768,38 +750,39 @@ static void net_rx_action(unsigned long - } - - if (notify_nr == 1) { -- irq = *notify_list; -- __clear_bit(irq, rx_notify); -+ irq = *netbk->notify_list; -+ __clear_bit(irq, netbk->rx_notify); - notify_remote_via_irq(irq + DYNIRQ_BASE); - } else { - for (count = ret = 0; ret < notify_nr; ++ret) { -- irq = notify_list[ret]; -- __clear_bit(irq, rx_notify); -- if (!multi_notify_remote_via_irq(rx_mcl + count, -+ irq = netbk->notify_list[ret]; -+ __clear_bit(irq, netbk->rx_notify); -+ if (!multi_notify_remote_via_irq(netbk->rx_mcl + count, - irq + DYNIRQ_BASE)) - ++count; - } -- if (HYPERVISOR_multicall(rx_mcl, count)) -+ if (HYPERVISOR_multicall(netbk->rx_mcl, count)) - BUG(); - } - - /* More work to do? */ -- if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer)) -- tasklet_schedule(&net_rx_tasklet); -+ if (!skb_queue_empty(&netbk->rx_queue) && -+ !timer_pending(&netbk->net_timer)) -+ tasklet_schedule(&netbk->net_rx_tasklet); - #if 0 - else - xen_network_done_notify(); - #endif - } - --static void net_alarm(unsigned long unused) -+static void net_alarm(unsigned long group) - { -- tasklet_schedule(&net_rx_tasklet); -+ tasklet_schedule(&xen_netbk[group].net_rx_tasklet); - } - --static void netbk_tx_pending_timeout(unsigned long unused) -+static void netbk_tx_pending_timeout(unsigned long group) - { -- tasklet_schedule(&net_tx_tasklet); -+ tasklet_schedule(&xen_netbk[group].net_tx_tasklet); - } - - static int __on_net_schedule_list(netif_t *netif) -@@ -807,7 +790,7 @@ static int __on_net_schedule_list(netif_ - return netif->list.next != NULL; - } - --/* Must be called with net_schedule_list_lock held. */ -+/* Must be called with netbk->schedule_list_lock held. */ - static void remove_from_net_schedule_list(netif_t *netif) - { - if (likely(__on_net_schedule_list(netif))) { -@@ -817,34 +800,35 @@ static void remove_from_net_schedule_lis - } - } - --static netif_t *poll_net_schedule_list(void) -+static netif_t *poll_net_schedule_list(struct xen_netbk *netbk) - { - netif_t *netif = NULL; - -- spin_lock_irq(&net_schedule_list_lock); -- if (!list_empty(&net_schedule_list)) { -- netif = list_first_entry(&net_schedule_list, netif_t, list); -+ spin_lock_irq(&netbk->schedule_list_lock); -+ if (!list_empty(&netbk->schedule_list)) { -+ netif = list_first_entry(&netbk->schedule_list, netif_t, list); - netif_get(netif); - remove_from_net_schedule_list(netif); - } -- spin_unlock_irq(&net_schedule_list_lock); -+ spin_unlock_irq(&netbk->schedule_list_lock); - return netif; - } - - static void add_to_net_schedule_list_tail(netif_t *netif) - { -+ struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; - unsigned long flags; - - if (__on_net_schedule_list(netif)) - return; - -- spin_lock_irqsave(&net_schedule_list_lock, flags); -+ spin_lock_irqsave(&netbk->schedule_list_lock, flags); - if (!__on_net_schedule_list(netif) && - likely(netif_schedulable(netif))) { -- list_add_tail(&netif->list, &net_schedule_list); -+ list_add_tail(&netif->list, &netbk->schedule_list); - netif_get(netif); - } -- spin_unlock_irqrestore(&net_schedule_list_lock, flags); -+ spin_unlock_irqrestore(&netbk->schedule_list_lock, flags); - } - - /* -@@ -867,15 +851,17 @@ void netif_schedule_work(netif_t *netif) - - if (more_to_do) { - add_to_net_schedule_list_tail(netif); -- maybe_schedule_tx_action(); -+ maybe_schedule_tx_action(GET_GROUP_INDEX(netif)); - } - } - - void netif_deschedule_work(netif_t *netif) - { -- spin_lock_irq(&net_schedule_list_lock); -+ struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; -+ -+ spin_lock_irq(&netbk->schedule_list_lock); - remove_from_net_schedule_list(netif); -- spin_unlock_irq(&net_schedule_list_lock); -+ spin_unlock_irq(&netbk->schedule_list_lock); - } - - -@@ -906,17 +892,19 @@ static void tx_credit_callback(unsigned - netif_schedule_work(netif); - } - --static inline int copy_pending_req(PEND_RING_IDX pending_idx) -+static inline int copy_pending_req(struct xen_netbk *netbk, -+ pending_ring_idx_t pending_idx) - { -- return gnttab_copy_grant_page(grant_tx_handle[pending_idx], -- &mmap_pages[pending_idx]); -+ return gnttab_copy_grant_page(netbk->grant_tx_handle[pending_idx], -+ &netbk->mmap_pages[pending_idx]); - } - --static void permute_dealloc_ring(PEND_RING_IDX dc, PEND_RING_IDX dp) -+static void permute_dealloc_ring(u16 *dealloc_ring, pending_ring_idx_t dc, -+ pending_ring_idx_t dp) - { - static unsigned random_src = 0x12345678; - unsigned dst_offset; -- PEND_RING_IDX dest; -+ pending_ring_idx_t dest; - u16 tmp; - - while (dc != dp) { -@@ -931,67 +919,73 @@ static void permute_dealloc_ring(PEND_RI - } - } - --inline static void net_tx_action_dealloc(void) -+inline static void net_tx_action_dealloc(struct xen_netbk *netbk) - { - struct netbk_tx_pending_inuse *inuse, *n; - gnttab_unmap_grant_ref_t *gop; - u16 pending_idx; -- PEND_RING_IDX dc, dp; -+ pending_ring_idx_t dc, dp; - netif_t *netif; - LIST_HEAD(list); - -- dc = dealloc_cons; -- gop = tx_unmap_ops; -+ dc = netbk->dealloc_cons; -+ gop = netbk->tx_unmap_ops; - - /* - * Free up any grants we have finished using - */ - do { -- dp = dealloc_prod; -+ dp = netbk->dealloc_prod; - - /* Ensure we see all indices enqueued by netif_idx_release(). */ - smp_rmb(); - - if (MODPARM_permute_returns) -- permute_dealloc_ring(dc, dp); -+ permute_dealloc_ring(netbk->dealloc_ring, dc, dp); - - while (dc != dp) { - unsigned long pfn; -+ struct netbk_tx_pending_inuse *pending_inuse = -+ netbk->pending_inuse; - -- pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)]; -+ pending_idx = netbk->dealloc_ring[MASK_PEND_IDX(dc++)]; - list_move_tail(&pending_inuse[pending_idx].list, &list); - -- pfn = idx_to_pfn(pending_idx); -+ pfn = idx_to_pfn(netbk, pending_idx); - /* Already unmapped? */ - if (!phys_to_machine_mapping_valid(pfn)) - continue; - -- gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx), -+ gnttab_set_unmap_op(gop, idx_to_kaddr(netbk, pending_idx), - GNTMAP_host_map, -- grant_tx_handle[pending_idx]); -+ netbk->grant_tx_handle[pending_idx]); - gop++; - } - -- } while (dp != dealloc_prod); -+ } while (dp != netbk->dealloc_prod); - -- dealloc_cons = dc; -+ netbk->dealloc_cons = dc; - - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, -- tx_unmap_ops, gop - tx_unmap_ops)) -+ netbk->tx_unmap_ops, -+ gop - netbk->tx_unmap_ops)) - BUG(); - - /* Copy any entries that have been pending for too long. */ - if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB && -- !list_empty(&pending_inuse_head)) { -- list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) { -+ !list_empty(&netbk->pending_inuse_head)) { -+ list_for_each_entry_safe(inuse, n, &netbk->pending_inuse_head, list) { -+ struct pending_tx_info *pending_tx_info -+ = netbk->pending_tx_info; -+ - if (time_after(inuse->alloc_time + HZ / 2, jiffies)) - break; - -- pending_idx = inuse - pending_inuse; -+ pending_idx = inuse - netbk->pending_inuse; - - pending_tx_info[pending_idx].netif->nr_copied_skbs++; - -- switch (copy_pending_req(pending_idx)) { -+ switch (copy_pending_req(netbk, pending_idx)) { - case 0: - list_move_tail(&inuse->list, &list); - continue; -@@ -1007,17 +1001,20 @@ inline static void net_tx_action_dealloc - } - - list_for_each_entry_safe(inuse, n, &list, list) { -- pending_idx = inuse - pending_inuse; -+ struct pending_tx_info *pending_tx_info = -+ netbk->pending_tx_info; - -+ pending_idx = inuse - netbk->pending_inuse; - netif = pending_tx_info[pending_idx].netif; - - make_tx_response(netif, &pending_tx_info[pending_idx].req, - NETIF_RSP_OKAY); - - /* Ready for next use. */ -- gnttab_reset_grant_page(mmap_pages[pending_idx]); -+ gnttab_reset_grant_page(netbk->mmap_pages[pending_idx]); - -- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; -+ netbk->pending_ring[MASK_PEND_IDX(netbk->pending_prod++)] = -+ pending_idx; - - netif_put(netif); - -@@ -1094,9 +1091,14 @@ static gnttab_map_grant_ref_t *netbk_get - start = ((unsigned long)shinfo->frags[0].page == pending_idx); - - for (i = start; i < shinfo->nr_frags; i++, txp++) { -- pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)]; -+ struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; -+ pending_ring_idx_t index = MASK_PEND_IDX(netbk->pending_cons++); -+ struct pending_tx_info *pending_tx_info = -+ netbk->pending_tx_info; - -- gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx), -+ pending_idx = netbk->pending_ring[index]; -+ -+ gnttab_set_map_op(mop++, idx_to_kaddr(netbk, pending_idx), - GNTMAP_host_map | GNTMAP_readonly, - txp->gref, netif->domid); - -@@ -1109,11 +1111,12 @@ static gnttab_map_grant_ref_t *netbk_get - return mop; - } - --static int netbk_tx_check_mop(struct sk_buff *skb, -- gnttab_map_grant_ref_t **mopp) -+static int netbk_tx_check_mop(struct xen_netbk *netbk, struct sk_buff *skb, -+ gnttab_map_grant_ref_t **mopp) - { - gnttab_map_grant_ref_t *mop = *mopp; - int pending_idx = *((u16 *)skb->data); -+ struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; - netif_t *netif = pending_tx_info[pending_idx].netif; - netif_tx_request_t *txp; - struct skb_shared_info *shinfo = skb_shinfo(skb); -@@ -1123,14 +1126,16 @@ static int netbk_tx_check_mop(struct sk_ - /* Check status of header. */ - err = mop->status; - if (unlikely(err != GNTST_okay)) { -+ pending_ring_idx_t index = MASK_PEND_IDX(netbk->pending_prod++); -+ - txp = &pending_tx_info[pending_idx].req; - make_tx_response(netif, txp, NETIF_RSP_ERROR); -- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; -+ netbk->pending_ring[index] = pending_idx; - netif_put(netif); - } else { -- set_phys_to_machine(idx_to_pfn(pending_idx), -+ set_phys_to_machine(idx_to_pfn(netbk, pending_idx), - FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT)); -- grant_tx_handle[pending_idx] = mop->handle; -+ netbk->grant_tx_handle[pending_idx] = mop->handle; - } - - /* Skip first skb fragment if it is on same page as header fragment. */ -@@ -1138,25 +1143,27 @@ static int netbk_tx_check_mop(struct sk_ - - for (i = start; i < nr_frags; i++) { - int j, newerr; -+ pending_ring_idx_t index; - - pending_idx = (unsigned long)shinfo->frags[i].page; - - /* Check error status: if okay then remember grant handle. */ - newerr = (++mop)->status; - if (likely(newerr == GNTST_okay)) { -- set_phys_to_machine(idx_to_pfn(pending_idx), -+ set_phys_to_machine(idx_to_pfn(netbk, pending_idx), - FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT)); -- grant_tx_handle[pending_idx] = mop->handle; -+ netbk->grant_tx_handle[pending_idx] = mop->handle; - /* Had a previous error? Invalidate this fragment. */ - if (unlikely(err != GNTST_okay)) -- netif_idx_release(pending_idx); -+ netif_idx_release(netbk, pending_idx); - continue; - } - - /* Error on this fragment: respond to client with an error. */ - txp = &pending_tx_info[pending_idx].req; - make_tx_response(netif, txp, NETIF_RSP_ERROR); -- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; -+ index = MASK_PEND_IDX(netbk->pending_prod++); -+ netbk->pending_ring[index] = pending_idx; - netif_put(netif); - - /* Not the first error? Preceding frags already invalidated. */ -@@ -1165,10 +1172,10 @@ static int netbk_tx_check_mop(struct sk_ - - /* First error: invalidate header and preceding fragments. */ - pending_idx = *((u16 *)skb->data); -- netif_idx_release(pending_idx); -+ netif_idx_release(netbk, pending_idx); - for (j = start; j < i; j++) { - pending_idx = (unsigned long)shinfo->frags[i].page; -- netif_idx_release(pending_idx); -+ netif_idx_release(netbk, pending_idx); - } - - /* Remember the error: invalidate all subsequent fragments. */ -@@ -1179,7 +1186,7 @@ static int netbk_tx_check_mop(struct sk_ - return err; - } - --static void netbk_fill_frags(struct sk_buff *skb) -+static void netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) - { - struct skb_shared_info *shinfo = skb_shinfo(skb); - int nr_frags = shinfo->nr_frags; -@@ -1192,12 +1199,12 @@ static void netbk_fill_frags(struct sk_b - - pending_idx = (unsigned long)frag->page; - -- pending_inuse[pending_idx].alloc_time = jiffies; -- list_add_tail(&pending_inuse[pending_idx].list, -- &pending_inuse_head); -+ netbk->pending_inuse[pending_idx].alloc_time = jiffies; -+ list_add_tail(&netbk->pending_inuse[pending_idx].list, -+ &netbk->pending_inuse_head); - -- txp = &pending_tx_info[pending_idx].req; -- frag->page = mmap_pages[pending_idx]; -+ txp = &netbk->pending_tx_info[pending_idx].req; -+ frag->page = netbk->mmap_pages[pending_idx]; - frag->size = txp->size; - frag->page_offset = txp->offset; - -@@ -1259,8 +1266,9 @@ static int netbk_set_skb_gso(struct sk_b - } - - /* Called after netfront has transmitted */ --static void net_tx_action(unsigned long unused) -+static void net_tx_action(unsigned long group) - { -+ struct xen_netbk *netbk = &xen_netbk[group]; - struct sk_buff *skb; - netif_t *netif; - netif_tx_request_t txreq; -@@ -1272,14 +1280,14 @@ static void net_tx_action(unsigned long - unsigned int data_len; - int ret, work_to_do; - -- net_tx_action_dealloc(); -+ net_tx_action_dealloc(netbk); - -- mop = tx_map_ops; -+ mop = netbk->tx_map_ops; - BUILD_BUG_ON(MAX_SKB_FRAGS >= MAX_PENDING_REQS); -- while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && -- !list_empty(&net_schedule_list)) { -+ while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && -+ !list_empty(&netbk->schedule_list)) { - /* Get a netif from the list with work to do. */ -- netif = poll_net_schedule_list(); -+ netif = poll_net_schedule_list(netbk); - if (!netif) - continue; - -@@ -1361,7 +1369,7 @@ static void net_tx_action(unsigned long - continue; - } - -- pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; -+ pending_idx = netbk->pending_ring[MASK_PEND_IDX(netbk->pending_cons)]; - - data_len = (txreq.size > PKT_PROT_LEN && - ret < MAX_SKB_FRAGS) ? -@@ -1389,14 +1397,14 @@ static void net_tx_action(unsigned long - } - } - -- gnttab_set_map_op(mop, idx_to_kaddr(pending_idx), -+ gnttab_set_map_op(mop, idx_to_kaddr(netbk, pending_idx), - GNTMAP_host_map | GNTMAP_readonly, - txreq.gref, netif->domid); - mop++; - -- memcpy(&pending_tx_info[pending_idx].req, -+ memcpy(&netbk->pending_tx_info[pending_idx].req, - &txreq, sizeof(txreq)); -- pending_tx_info[pending_idx].netif = netif; -+ netbk->pending_tx_info[pending_idx].netif = netif; - *((u16 *)skb->data) = pending_idx; - - __skb_put(skb, data_len); -@@ -1411,20 +1419,20 @@ static void net_tx_action(unsigned long - skb_shinfo(skb)->frags[0].page = (void *)~0UL; - } - -- __skb_queue_tail(&tx_queue, skb); -+ __skb_queue_tail(&netbk->tx_queue, skb); - -- pending_cons++; -+ netbk->pending_cons++; - - mop = netbk_get_requests(netif, skb, txfrags, mop); - - netif->tx.req_cons = i; - netif_schedule_work(netif); - -- if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops)) -+ if ((mop - netbk->tx_map_ops) >= ARRAY_SIZE(netbk->tx_map_ops)) - break; - } - -- if (mop == tx_map_ops) -+ if (mop == netbk->tx_map_ops) - goto out; - - /* NOTE: some maps may fail with GNTST_eagain, which could be successfully -@@ -1432,22 +1440,23 @@ static void net_tx_action(unsigned long - * req and let the frontend resend the relevant packet again. This is fine - * because it is unlikely that a network buffer will be paged out or shared, - * and therefore it is unlikely to fail with GNTST_eagain. */ -- ret = HYPERVISOR_grant_table_op( -- GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops); -+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, -+ netbk->tx_map_ops, -+ mop - netbk->tx_map_ops); - BUG_ON(ret); - -- mop = tx_map_ops; -- while ((skb = __skb_dequeue(&tx_queue)) != NULL) { -+ mop = netbk->tx_map_ops; -+ while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { - struct net_device *dev; - netif_tx_request_t *txp; - - pending_idx = *((u16 *)skb->data); -- netif = pending_tx_info[pending_idx].netif; -+ netif = netbk->pending_tx_info[pending_idx].netif; - dev = netif->dev; -- txp = &pending_tx_info[pending_idx].req; -+ txp = &netbk->pending_tx_info[pending_idx].req; - - /* Check the remap error code. */ -- if (unlikely(netbk_tx_check_mop(skb, &mop))) { -+ if (unlikely(netbk_tx_check_mop(netbk, skb, &mop))) { - DPRINTK("netback grant failed.\n"); - skb_shinfo(skb)->nr_frags = 0; - kfree_skb(skb); -@@ -1457,7 +1466,7 @@ static void net_tx_action(unsigned long - - data_len = skb->len; - memcpy(skb->data, -- (void *)(idx_to_kaddr(pending_idx)|txp->offset), -+ (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), - data_len); - if (data_len < txp->size) { - /* Append the packet payload as a fragment. */ -@@ -1465,7 +1474,7 @@ static void net_tx_action(unsigned long - txp->size -= data_len; - } else { - /* Schedule a response immediately. */ -- netif_idx_release(pending_idx); -+ netif_idx_release(netbk, pending_idx); - } - - if (txp->flags & NETTXF_csum_blank) -@@ -1475,7 +1484,7 @@ static void net_tx_action(unsigned long - else - skb->ip_summed = CHECKSUM_NONE; - -- netbk_fill_frags(skb); -+ netbk_fill_frags(netbk, skb); - - /* - * If the initial fragment was < PKT_PROT_LEN then -@@ -1511,36 +1520,39 @@ static void net_tx_action(unsigned long - - out: - if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB && -- !list_empty(&pending_inuse_head)) { -+ !list_empty(&netbk->pending_inuse_head)) { - struct netbk_tx_pending_inuse *oldest; - -- oldest = list_entry(pending_inuse_head.next, -+ oldest = list_entry(netbk->pending_inuse_head.next, - struct netbk_tx_pending_inuse, list); -- mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ); -+ mod_timer(&netbk->tx_pending_timer, oldest->alloc_time + HZ); - } - } - --static void netif_idx_release(u16 pending_idx) -+static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx) - { -- static DEFINE_SPINLOCK(_lock); - unsigned long flags; - -- spin_lock_irqsave(&_lock, flags); -- dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx; -+ spin_lock_irqsave(&netbk->release_lock, flags); -+ netbk->dealloc_ring[MASK_PEND_IDX(netbk->dealloc_prod)] = pending_idx; - /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */ - smp_wmb(); -- dealloc_prod++; -- spin_unlock_irqrestore(&_lock, flags); -+ netbk->dealloc_prod++; -+ spin_unlock_irqrestore(&netbk->release_lock, flags); - -- tasklet_schedule(&net_tx_tasklet); -+ tasklet_schedule(&netbk->net_tx_tasklet); - } - - static void netif_page_release(struct page *page, unsigned int order) - { -- int idx = netif_page_index(page); -+ unsigned int idx = netif_page_index(page); -+ unsigned int group = netif_page_group(page); -+ struct xen_netbk *netbk = &xen_netbk[group]; -+ - BUG_ON(order); -- BUG_ON(idx < 0); -- netif_idx_release(idx); -+ BUG_ON(group >= netbk_nr_groups || idx >= MAX_PENDING_REQS); -+ BUG_ON(netbk->mmap_pages[idx] != page); -+ netif_idx_release(netbk, idx); - } - - irqreturn_t netif_be_int(int irq, void *dev_id) -@@ -1548,7 +1560,7 @@ irqreturn_t netif_be_int(int irq, void * - netif_t *netif = dev_id; - - add_to_net_schedule_list_tail(netif); -- maybe_schedule_tx_action(); -+ maybe_schedule_tx_action(GET_GROUP_INDEX(netif)); - - if (netif_schedulable(netif) && !netbk_queue_full(netif)) - netif_wake_queue(netif->dev); -@@ -1612,33 +1624,38 @@ static netif_rx_response_t *make_rx_resp - #ifdef NETBE_DEBUG_INTERRUPT - static irqreturn_t netif_be_dbg(int irq, void *dev_id) - { -- struct list_head *ent; - netif_t *netif; -- int i = 0; -+ unsigned int i = 0, group; - - pr_alert("netif_schedule_list:\n"); -- spin_lock_irq(&net_schedule_list_lock); - -- list_for_each (ent, &net_schedule_list) { -- netif = list_entry(ent, netif_t, list); -- pr_alert(" %d: private(rx_req_cons=%08x " -- "rx_resp_prod=%08x\n", -- i, netif->rx.req_cons, netif->rx.rsp_prod_pvt); -- pr_alert(" tx_req_cons=%08x tx_resp_prod=%08x)\n", -- netif->tx.req_cons, netif->tx.rsp_prod_pvt); -- pr_alert(" shared(rx_req_prod=%08x " -- "rx_resp_prod=%08x\n", -- netif->rx.sring->req_prod, netif->rx.sring->rsp_prod); -- pr_alert(" rx_event=%08x tx_req_prod=%08x\n", -- netif->rx.sring->rsp_event, -- netif->tx.sring->req_prod); -- pr_alert(" tx_resp_prod=%08x, tx_event=%08x)\n", -- netif->tx.sring->rsp_prod, -- netif->tx.sring->rsp_event); -- i++; -+ for (group = 0; group < netbk_nr_groups; ++group) { -+ struct xen_netbk *netbk = &xen_netbk[group]; -+ -+ spin_lock_irq(&netbk->schedule_list_lock); -+ -+ list_for_each_entry(netif, &netbk->schedule_list, list) { -+ pr_alert(" %d: private(rx_req_cons=%08x " -+ "rx_resp_prod=%08x\n", i, -+ netif->rx.req_cons, netif->rx.rsp_prod_pvt); -+ pr_alert(" tx_req_cons=%08x tx_resp_prod=%08x)\n", -+ netif->tx.req_cons, netif->tx.rsp_prod_pvt); -+ pr_alert(" shared(rx_req_prod=%08x " -+ "rx_resp_prod=%08x\n", -+ netif->rx.sring->req_prod, -+ netif->rx.sring->rsp_prod); -+ pr_alert(" rx_event=%08x tx_req_prod=%08x\n", -+ netif->rx.sring->rsp_event, -+ netif->tx.sring->req_prod); -+ pr_alert(" tx_resp_prod=%08x, tx_event=%08x)\n", -+ netif->tx.sring->rsp_prod, -+ netif->tx.sring->rsp_event); -+ i++; -+ } -+ -+ spin_unlock_irq(&netbk->netbk->schedule_list_lock); - } - -- spin_unlock_irq(&net_schedule_list_lock); - pr_alert(" ** End of netif_schedule_list **\n"); - - return IRQ_HANDLED; -@@ -1653,46 +1670,66 @@ static struct irqaction netif_be_dbg_act - - static int __init netback_init(void) - { -- int i; -+ unsigned int i, group; -+ int rc; - struct page *page; - - if (!is_running_on_xen()) - return -ENODEV; - -+ xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), -+ GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, -+ PAGE_KERNEL); -+ if (!xen_netbk) { -+ pr_err("%s: out of memory\n", __func__); -+ return -ENOMEM; -+ } -+ - /* We can increase reservation by this much in net_rx_action(). */ -- balloon_update_driver_allowance(NET_RX_RING_SIZE); -+ balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE); - -- skb_queue_head_init(&rx_queue); -- skb_queue_head_init(&tx_queue); -+ for (group = 0; group < netbk_nr_groups; group++) { -+ struct xen_netbk *netbk = &xen_netbk[group]; - -- init_timer(&net_timer); -- net_timer.data = 0; -- net_timer.function = net_alarm; -- -- init_timer(&netbk_tx_pending_timer); -- netbk_tx_pending_timer.data = 0; -- netbk_tx_pending_timer.function = netbk_tx_pending_timeout; -- -- mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS); -- if (mmap_pages == NULL) { -- pr_err("%s: out of memory\n", __FUNCTION__); -- return -ENOMEM; -- } -+ tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group); -+ tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group); - -- for (i = 0; i < MAX_PENDING_REQS; i++) { -- page = mmap_pages[i]; -- SetPageForeign(page, netif_page_release); -- netif_set_page_index(page, i); -- INIT_LIST_HEAD(&pending_inuse[i].list); -- } -+ skb_queue_head_init(&netbk->rx_queue); -+ skb_queue_head_init(&netbk->tx_queue); -+ -+ init_timer(&netbk->net_timer); -+ netbk->net_timer.data = group; -+ netbk->net_timer.function = net_alarm; -+ -+ init_timer(&netbk->tx_pending_timer); -+ netbk->tx_pending_timer.data = group; -+ netbk->tx_pending_timer.function = -+ netbk_tx_pending_timeout; -+ -+ netbk->pending_prod = MAX_PENDING_REQS; - -- pending_cons = 0; -- pending_prod = MAX_PENDING_REQS; -- for (i = 0; i < MAX_PENDING_REQS; i++) -- pending_ring[i] = i; -+ INIT_LIST_HEAD(&netbk->pending_inuse_head); -+ INIT_LIST_HEAD(&netbk->schedule_list); - -- spin_lock_init(&net_schedule_list_lock); -- INIT_LIST_HEAD(&net_schedule_list); -+ spin_lock_init(&netbk->schedule_list_lock); -+ spin_lock_init(&netbk->release_lock); -+ -+ netbk->mmap_pages = -+ alloc_empty_pages_and_pagevec(MAX_PENDING_REQS); -+ if (netbk->mmap_pages == NULL) { -+ pr_err("%s: out of memory\n", __func__); -+ rc = -ENOMEM; -+ goto failed_init; -+ } -+ -+ for (i = 0; i < MAX_PENDING_REQS; i++) { -+ page = netbk->mmap_pages[i]; -+ SetPageForeign(page, netif_page_release); -+ netif_set_page_ext(page, group, i); -+ netbk->pending_ring[i] = i; -+ INIT_LIST_HEAD(&netbk->pending_inuse[i].list); -+ } -+ } - - netbk_copy_skb_mode = NETBK_DONT_COPY_SKB; - if (MODPARM_copy_skb) { -@@ -1714,6 +1751,19 @@ static int __init netback_init(void) - #endif - - return 0; -+ -+failed_init: -+ while (group-- > 0) { -+ struct xen_netbk *netbk = &xen_netbk[group]; -+ -+ free_empty_pages_and_pagevec(netbk->mmap_pages, -+ MAX_PENDING_REQS); -+ } -+ vfree(xen_netbk); -+ balloon_update_driver_allowance(-(long)netbk_nr_groups -+ * NET_RX_RING_SIZE); -+ -+ return rc; - } - - module_init(netback_init); diff --git a/patches.xen/xen-netback-kernel-threads b/patches.xen/xen-netback-kernel-threads deleted file mode 100644 index 0284cbc..0000000 --- a/patches.xen/xen-netback-kernel-threads +++ /dev/null @@ -1,320 +0,0 @@ -From: Dongxiao Xu -Subject: [PATCH 3/3] Use Kernel thread to replace the tasklet. -Patch-mainline: n/a - - Kernel thread has more control over QoS, and could improve - dom0's userspace responseness. - -Signed-off-by: Dongxiao Xu - -Subject: xen: ensure locking gnttab_copy_grant_page is safe against interrupts. - -Now that netback processing occurs in a thread instead of a tasklet -gnttab_copy_grant_page needs to be safe against interrupts. - -The code is currently commented out in this tree but on 2.6.18 we observed a -deadlock where the netback thread called gnttab_copy_grant_page, locked -gnttab_dma_lock for writing, was interrupted and on return from interrupt the -network stack's TX tasklet ended up calling __gnttab_dma_map_page via the -hardware driver->swiotlb and tries to take gnttab_dma_lock for reading. - -Signed-off-by: Ian Campbell -Cc: Jeremy Fitzhardinge # -Cc: "Xu, Dongxiao" - -Subject: Add a missing test to tx_work_todo. - -Add a test so that, when netback is using worker threads, net_tx_action() -gets called in a timely manner when the pending_inuse list is populated. - -Signed-off-by: Paul Durrant - -jb: changed write_seq{,un}lock_irq() to write_seq{,un}lock_bh(), and - made the use of kernel threads optional (but default) -Acked-by: jbeulich@novell.com - ---- head-2011-02-17.orig/drivers/xen/core/gnttab.c 2011-01-14 15:13:58.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/gnttab.c 2010-09-23 17:06:35.000000000 +0200 -@@ -552,14 +552,14 @@ int gnttab_copy_grant_page(grant_ref_t r - mfn = pfn_to_mfn(pfn); - new_mfn = virt_to_mfn(new_addr); - -- write_seqlock(&gnttab_dma_lock); -+ write_seqlock_bh(&gnttab_dma_lock); - - /* Make seq visible before checking page_mapped. */ - smp_mb(); - - /* Has the page been DMA-mapped? */ - if (unlikely(page_mapped(page))) { -- write_sequnlock(&gnttab_dma_lock); -+ write_sequnlock_bh(&gnttab_dma_lock); - put_page(new_page); - err = -EBUSY; - goto out; -@@ -576,7 +576,7 @@ int gnttab_copy_grant_page(grant_ref_t r - BUG_ON(err); - BUG_ON(unmap.status != GNTST_okay); - -- write_sequnlock(&gnttab_dma_lock); -+ write_sequnlock_bh(&gnttab_dma_lock); - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); ---- head-2011-02-17.orig/drivers/xen/netback/common.h 2011-02-09 16:21:50.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/common.h 2011-02-17 10:34:35.000000000 +0100 -@@ -241,8 +241,16 @@ struct netbk_tx_pending_inuse { - #define MAX_MFN_ALLOC 64 - - struct xen_netbk { -- struct tasklet_struct net_tx_tasklet; -- struct tasklet_struct net_rx_tasklet; -+ union { -+ struct { -+ struct tasklet_struct net_tx_tasklet; -+ struct tasklet_struct net_rx_tasklet; -+ }; -+ struct { -+ wait_queue_head_t netbk_action_wq; -+ struct task_struct *task; -+ }; -+ }; - - struct sk_buff_head rx_queue; - struct sk_buff_head tx_queue; ---- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-03-01 11:53:33.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-02 13:33:15.000000000 +0100 -@@ -36,6 +36,7 @@ - - #include "common.h" - #include -+#include - #include - #include - #include -@@ -46,6 +47,8 @@ - - struct xen_netbk *__read_mostly xen_netbk; - unsigned int __read_mostly netbk_nr_groups; -+static bool __read_mostly use_kthreads = true; -+static bool __initdata bind_threads; - - #define GET_GROUP_INDEX(netif) ((netif)->group) - -@@ -137,7 +140,11 @@ static int MODPARM_permute_returns = 0; - module_param_named(permute_returns, MODPARM_permute_returns, bool, S_IRUSR|S_IWUSR); - MODULE_PARM_DESC(permute_returns, "Randomly permute the order in which TX responses are sent to the frontend"); - module_param_named(groups, netbk_nr_groups, uint, 0); --MODULE_PARM_DESC(groups, "Specify the number of tasklet pairs to use"); -+MODULE_PARM_DESC(groups, "Specify the number of tasklet pairs/threads to use"); -+module_param_named(tasklets, use_kthreads, invbool, 0); -+MODULE_PARM_DESC(tasklets, "Use tasklets instead of kernel threads"); -+module_param_named(bind, bind_threads, bool, 0); -+MODULE_PARM_DESC(bind, "Bind kernel threads to (v)CPUs"); - - int netbk_copy_skb_mode; - -@@ -168,6 +175,19 @@ static int check_mfn(struct xen_netbk *n - return netbk->alloc_index >= nr ? 0 : -ENOMEM; - } - -+static void netbk_schedule(struct xen_netbk *netbk) -+{ -+ if (use_kthreads) -+ wake_up(&netbk->netbk_action_wq); -+ else -+ tasklet_schedule(&netbk->net_tx_tasklet); -+} -+ -+static void netbk_schedule_group(unsigned long group) -+{ -+ netbk_schedule(&xen_netbk[group]); -+} -+ - static inline void maybe_schedule_tx_action(unsigned int group) - { - struct xen_netbk *netbk = &xen_netbk[group]; -@@ -175,7 +195,7 @@ static inline void maybe_schedule_tx_act - smp_mb(); - if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && - !list_empty(&netbk->schedule_list)) -- tasklet_schedule(&netbk->net_tx_tasklet); -+ netbk_schedule(netbk); - } - - static struct sk_buff *netbk_copy_skb(struct sk_buff *skb) -@@ -335,7 +355,7 @@ int netif_be_start_xmit(struct sk_buff * - - netbk = &xen_netbk[GET_GROUP_INDEX(netif)]; - skb_queue_tail(&netbk->rx_queue, skb); -- tasklet_schedule(&netbk->net_rx_tasklet); -+ netbk_schedule(netbk); - - return NETDEV_TX_OK; - -@@ -772,23 +792,13 @@ static void net_rx_action(unsigned long - /* More work to do? */ - if (!skb_queue_empty(&netbk->rx_queue) && - !timer_pending(&netbk->net_timer)) -- tasklet_schedule(&netbk->net_rx_tasklet); -+ netbk_schedule(netbk); - #if 0 - else - xen_network_done_notify(); - #endif - } - --static void net_alarm(unsigned long group) --{ -- tasklet_schedule(&xen_netbk[group].net_rx_tasklet); --} -- --static void netbk_tx_pending_timeout(unsigned long group) --{ -- tasklet_schedule(&xen_netbk[group].net_tx_tasklet); --} -- - static int __on_net_schedule_list(netif_t *netif) - { - return netif->list.next != NULL; -@@ -1519,7 +1529,10 @@ static void net_tx_action(unsigned long - dev->stats.rx_bytes += skb->len; - dev->stats.rx_packets++; - -- netif_rx(skb); -+ if (use_kthreads) -+ netif_rx_ni(skb); -+ else -+ netif_rx(skb); - } - - out: -@@ -1544,7 +1557,7 @@ static void netif_idx_release(struct xen - netbk->dealloc_prod++; - spin_unlock_irqrestore(&netbk->release_lock, flags); - -- tasklet_schedule(&netbk->net_tx_tasklet); -+ netbk_schedule(netbk); - } - - static void netif_page_release(struct page *page, unsigned int order) -@@ -1683,6 +1696,50 @@ static struct irqaction netif_be_dbg_act - }; - #endif - -+static inline int rx_work_todo(struct xen_netbk *netbk) -+{ -+ return !skb_queue_empty(&netbk->rx_queue); -+} -+ -+static inline int tx_work_todo(struct xen_netbk *netbk) -+{ -+ if (netbk->dealloc_cons != netbk->dealloc_prod) -+ return 1; -+ -+ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB && -+ !list_empty(&netbk->pending_inuse_head)) -+ return 1; -+ -+ if (nr_pending_reqs(netbk) + MAX_SKB_FRAGS < MAX_PENDING_REQS && -+ !list_empty(&netbk->schedule_list)) -+ return 1; -+ -+ return 0; -+} -+ -+static int netbk_action_thread(void *index) -+{ -+ unsigned long group = (unsigned long)index; -+ struct xen_netbk *netbk = &xen_netbk[group]; -+ -+ while (!kthread_should_stop()) { -+ wait_event_interruptible(netbk->netbk_action_wq, -+ rx_work_todo(netbk) || -+ tx_work_todo(netbk) || -+ kthread_should_stop()); -+ cond_resched(); -+ -+ if (rx_work_todo(netbk)) -+ net_rx_action(group); -+ -+ if (tx_work_todo(netbk)) -+ net_tx_action(group); -+ } -+ -+ return 0; -+} -+ -+ - static int __init netback_init(void) - { - unsigned int i, group; -@@ -1717,20 +1774,16 @@ static int __init netback_init(void) - for (group = 0; group < netbk_nr_groups; group++) { - struct xen_netbk *netbk = &xen_netbk[group]; - -- tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group); -- tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group); -- - skb_queue_head_init(&netbk->rx_queue); - skb_queue_head_init(&netbk->tx_queue); - - init_timer(&netbk->net_timer); - netbk->net_timer.data = group; -- netbk->net_timer.function = net_alarm; -+ netbk->net_timer.function = netbk_schedule_group; - - init_timer(&netbk->tx_pending_timer); - netbk->tx_pending_timer.data = group; -- netbk->tx_pending_timer.function = -- netbk_tx_pending_timeout; -+ netbk->tx_pending_timer.function = netbk_schedule_group; - - netbk->pending_prod = MAX_PENDING_REQS; - -@@ -1755,6 +1808,25 @@ static int __init netback_init(void) - netbk->pending_ring[i] = i; - INIT_LIST_HEAD(&netbk->pending_inuse[i].list); - } -+ -+ if (use_kthreads) { -+ init_waitqueue_head(&netbk->netbk_action_wq); -+ netbk->task = kthread_create(netbk_action_thread, -+ (void *)(long)group, -+ "netback/%u", group); -+ -+ if (IS_ERR(netbk->task)) { -+ pr_alert("netback: kthread_create() failed\n"); -+ rc = PTR_ERR(netbk->task); -+ goto failed_init; -+ } -+ if (bind_threads) -+ kthread_bind(netbk->task, group); -+ wake_up_process(netbk->task); -+ } else { -+ tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group); -+ tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group); -+ } - } - - netbk_copy_skb_mode = NETBK_DONT_COPY_SKB; -@@ -1779,12 +1851,15 @@ static int __init netback_init(void) - return 0; - - failed_init: -- while (group-- > 0) { -+ do { - struct xen_netbk *netbk = &xen_netbk[group]; - -- free_empty_pages_and_pagevec(netbk->mmap_pages, -- MAX_PENDING_REQS); -- } -+ if (use_kthreads && netbk->task && !IS_ERR(netbk->task)) -+ kthread_stop(netbk->task); -+ if (netbk->mmap_pages) -+ free_empty_pages_and_pagevec(netbk->mmap_pages, -+ MAX_PENDING_REQS); -+ } while (group--); - vfree(xen_netbk); - balloon_update_driver_allowance(-(long)netbk_nr_groups - * NET_RX_RING_SIZE); diff --git a/patches.xen/xen-netback-multiple-tasklets b/patches.xen/xen-netback-multiple-tasklets deleted file mode 100644 index 16acbd1..0000000 --- a/patches.xen/xen-netback-multiple-tasklets +++ /dev/null @@ -1,183 +0,0 @@ -From: Dongxiao Xu -Subject: [PATCH 2/3] Netback: Multiple tasklets support. -Patch-mainline: n/a - - Now netback uses one pair of tasklets for Tx/Rx data transaction. Netback - tasklet could only run at one CPU at a time, and it is used to serve all the - netfronts. Therefore it has become a performance bottle neck. This patch is to - use multiple tasklet pairs to replace the current single pair in dom0. - - Assuming that Dom0 has CPUNR VCPUs, we define CPUNR kinds of tasklets pair - (CPUNR for Tx, and CPUNR for Rx). Each pare of tasklets serve specific group of - netfronts. Also for those global and static variables, we duplicated them for - each group in order to avoid the spinlock. - -Signed-off-by: Dongxiao Xu - -jb: some cleanups -Acked-by: jbeulich@novell.com - ---- head-2011-02-17.orig/drivers/xen/netback/common.h 2011-02-17 10:33:48.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/common.h 2011-02-09 16:21:50.000000000 +0100 -@@ -55,6 +55,7 @@ - typedef struct netif_st { - /* Unique identifier for this interface. */ - domid_t domid; -+ unsigned int group; - unsigned int handle; - - u8 fe_dev_addr[6]; -@@ -262,6 +263,7 @@ struct xen_netbk { - - struct page **mmap_pages; - -+ atomic_t nr_groups; - unsigned int alloc_index; - - struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; -@@ -289,4 +291,8 @@ struct xen_netbk { - - unsigned long mfn_list[MAX_MFN_ALLOC]; - }; -+ -+extern struct xen_netbk *xen_netbk; -+extern unsigned int netbk_nr_groups; -+ - #endif /* __NETIF__BACKEND__COMMON_H__ */ ---- head-2011-02-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:33:17.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/interface.c 2011-02-17 10:34:28.000000000 +0100 -@@ -54,14 +54,36 @@ module_param_named(queue_length, netbk_q - - static void __netif_up(netif_t *netif) - { -+ unsigned int group = 0; -+ unsigned int min_groups = atomic_read(&xen_netbk[0].nr_groups); -+ unsigned int i; -+ -+ /* Find the list which contains least number of domains. */ -+ for (i = 1; i < netbk_nr_groups; i++) { -+ unsigned int nr_groups = atomic_read(&xen_netbk[i].nr_groups); -+ -+ if (nr_groups < min_groups) { -+ group = i; -+ min_groups = nr_groups; -+ } -+ } -+ -+ atomic_inc(&xen_netbk[group].nr_groups); -+ netif->group = group; -+ - enable_irq(netif->irq); - netif_schedule_work(netif); - } - - static void __netif_down(netif_t *netif) - { -+ struct xen_netbk *netbk = xen_netbk + netif->group; -+ - disable_irq(netif->irq); - netif_deschedule_work(netif); -+ -+ netif->group = UINT_MAX; -+ atomic_dec(&netbk->nr_groups); - } - - static int net_open(struct net_device *dev) -@@ -250,6 +272,7 @@ netif_t *netif_alloc(struct device *pare - netif = netdev_priv(dev); - memset(netif, 0, sizeof(*netif)); - netif->domid = domid; -+ netif->group = UINT_MAX; - netif->handle = handle; - netif->can_sg = 1; - netif->csum = 1; ---- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-03-01 11:53:28.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-01 11:53:33.000000000 +0100 -@@ -44,10 +44,10 @@ - - /*define NETBE_DEBUG_INTERRUPT*/ - --static struct xen_netbk *__read_mostly xen_netbk; --static const unsigned int netbk_nr_groups = 1; -+struct xen_netbk *__read_mostly xen_netbk; -+unsigned int __read_mostly netbk_nr_groups; - --#define GET_GROUP_INDEX(netif) (0) -+#define GET_GROUP_INDEX(netif) ((netif)->group) - - static void netif_idx_release(struct xen_netbk *, u16 pending_idx); - static void make_tx_response(netif_t *netif, -@@ -136,6 +136,8 @@ MODULE_PARM_DESC(copy_skb, "Copy data re - static int MODPARM_permute_returns = 0; - module_param_named(permute_returns, MODPARM_permute_returns, bool, S_IRUSR|S_IWUSR); - MODULE_PARM_DESC(permute_returns, "Randomly permute the order in which TX responses are sent to the frontend"); -+module_param_named(groups, netbk_nr_groups, uint, 0); -+MODULE_PARM_DESC(groups, "Specify the number of tasklet pairs to use"); - - int netbk_copy_skb_mode; - -@@ -406,11 +408,13 @@ static u16 netbk_gop_frag(netif_t *netif - (idx = netif_page_index(page)) < MAX_PENDING_REQS && - (group = netif_page_group(page)) < netbk_nr_groups) { - struct pending_tx_info *src_pend; -+ unsigned int grp; - - netbk = &xen_netbk[group]; - BUG_ON(netbk->mmap_pages[idx] != page); - src_pend = &netbk->pending_tx_info[idx]; -- BUG_ON(group != GET_GROUP_INDEX(src_pend->netif)); -+ grp = GET_GROUP_INDEX(src_pend->netif); -+ BUG_ON(group != grp && grp != UINT_MAX); - copy_gop->source.domid = src_pend->netif->domid; - copy_gop->source.u.ref = src_pend->req.gref; - copy_gop->flags |= GNTCOPY_source_gref; -@@ -1558,9 +1562,20 @@ static void netif_page_release(struct pa - irqreturn_t netif_be_int(int irq, void *dev_id) - { - netif_t *netif = dev_id; -+ unsigned int group = GET_GROUP_INDEX(netif); -+ -+ if (unlikely(group >= netbk_nr_groups)) { -+ /* -+ * Short of having a way to bind the IRQ in disabled mode -+ * (IRQ_NOAUTOEN), we have to ignore the first invocation(s) -+ * (before we got assigned to a group). -+ */ -+ BUG_ON(group != UINT_MAX); -+ return IRQ_HANDLED; -+ } - - add_to_net_schedule_list_tail(netif); -- maybe_schedule_tx_action(GET_GROUP_INDEX(netif)); -+ maybe_schedule_tx_action(group); - - if (netif_schedulable(netif) && !netbk_queue_full(netif)) - netif_wake_queue(netif->dev); -@@ -1677,13 +1692,24 @@ static int __init netback_init(void) - if (!is_running_on_xen()) - return -ENODEV; - -- xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), -- GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, -- PAGE_KERNEL); -+ group = netbk_nr_groups; -+ if (!netbk_nr_groups) -+ netbk_nr_groups = (num_online_cpus() + 1) / 2; -+ if (netbk_nr_groups > MAX_GROUPS) -+ netbk_nr_groups = MAX_GROUPS; -+ -+ do { -+ xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk), -+ GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, -+ PAGE_KERNEL); -+ } while (!xen_netbk && (netbk_nr_groups >>= 1)); - if (!xen_netbk) { - pr_err("%s: out of memory\n", __func__); - return -ENOMEM; - } -+ if (group && netbk_nr_groups != group) -+ pr_warning("netback: only using %u (instead of %u) groups\n", -+ netbk_nr_groups, group); - - /* We can increase reservation by this much in net_rx_action(). */ - balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE); diff --git a/patches.xen/xen-netback-notify-multi b/patches.xen/xen-netback-notify-multi deleted file mode 100644 index a716954..0000000 --- a/patches.xen/xen-netback-notify-multi +++ /dev/null @@ -1,91 +0,0 @@ -From: jbeulich@novell.com -Subject: netback: use multicall for send multiple notifications -Patch-mainline: obsolete - -This also yields a small fairness improvement since now notifications -get sent in the order requests came in rather than in the inverse one. - ---- head-2011-02-08.orig/drivers/xen/core/evtchn.c 2011-02-16 08:29:29.000000000 +0100 -+++ head-2011-02-08/drivers/xen/core/evtchn.c 2011-02-16 08:29:50.000000000 +0100 -@@ -1473,6 +1473,27 @@ void notify_remote_via_irq(int irq) - } - EXPORT_SYMBOL_GPL(notify_remote_via_irq); - -+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) -+int multi_notify_remote_via_irq(multicall_entry_t *mcl, int irq) -+{ -+ const struct irq_cfg *cfg = irq_cfg(irq); -+ int evtchn; -+ -+ if (WARN_ON_ONCE(!cfg)) -+ return -EINVAL; -+ BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ); -+ BUG_IF_IPI(cfg); -+ -+ evtchn = evtchn_from_irq_cfg(cfg); -+ if (!VALID_EVTCHN(evtchn)) -+ return -EINVAL; -+ -+ multi_notify_remote_via_evtchn(mcl, evtchn); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(multi_notify_remote_via_irq); -+#endif -+ - int irq_to_evtchn_port(int irq) - { - const struct irq_cfg *cfg = irq_cfg(irq); ---- head-2011-02-08.orig/drivers/xen/netback/netback.c 2011-01-03 13:30:08.000000000 +0100 -+++ head-2011-02-08/drivers/xen/netback/netback.c 2011-01-03 13:30:15.000000000 +0100 -@@ -767,10 +767,20 @@ static void net_rx_action(unsigned long - npo.meta_cons += nr_frags + 1; - } - -- while (notify_nr != 0) { -- irq = notify_list[--notify_nr]; -+ if (notify_nr == 1) { -+ irq = *notify_list; - __clear_bit(irq, rx_notify); - notify_remote_via_irq(irq + DYNIRQ_BASE); -+ } else { -+ for (count = ret = 0; ret < notify_nr; ++ret) { -+ irq = notify_list[ret]; -+ __clear_bit(irq, rx_notify); -+ if (!multi_notify_remote_via_irq(rx_mcl + count, -+ irq + DYNIRQ_BASE)) -+ ++count; -+ } -+ if (HYPERVISOR_multicall(rx_mcl, count)) -+ BUG(); - } - - /* More work to do? */ ---- head-2011-02-08.orig/include/xen/evtchn.h 2010-11-23 16:18:23.000000000 +0100 -+++ head-2011-02-08/include/xen/evtchn.h 2010-11-23 16:20:08.000000000 +0100 -@@ -193,6 +193,18 @@ static inline void notify_remote_via_evt - VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); - } - -+static inline void -+multi_notify_remote_via_evtchn(multicall_entry_t *mcl, int port) -+{ -+ struct evtchn_send *send = (void *)(mcl->args + 2); -+ -+ BUILD_BUG_ON(sizeof(*send) > sizeof(mcl->args) - 2 * sizeof(*mcl->args)); -+ send->port = port; -+ mcl->op = __HYPERVISOR_event_channel_op; -+ mcl->args[0] = EVTCHNOP_send; -+ mcl->args[1] = (unsigned long)send; -+} -+ - static inline int close_evtchn(int port) - { - struct evtchn_close close = { .port = port }; -@@ -207,6 +219,7 @@ int xen_test_irq_pending(int irq); - * by bind_*_to_irqhandler(). - */ - void notify_remote_via_irq(int irq); -+int multi_notify_remote_via_irq(multicall_entry_t *, int irq); - int irq_to_evtchn_port(int irq); - - #if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) diff --git a/patches.xen/xen-netback-nr-irqs b/patches.xen/xen-netback-nr-irqs deleted file mode 100644 index c7b5464..0000000 --- a/patches.xen/xen-netback-nr-irqs +++ /dev/null @@ -1,61 +0,0 @@ -From: jbeulich@novell.com -Subject: netback: reduce overhead of IRQ recording -Patch-mainline: obsolete - -Since both NR_PIRQS and NR_DYNIRQS are no longer hardcoded, the -(memory) overhead of tracking which ones to send notifications to can -be pretty unbounded. Also, store the dynirq rather than the raw irq -to push up the limit where the type of notify_list needs to become -'int' rather than 'u16'. - ---- head-2011-02-17.orig/drivers/xen/netback/interface.c 2011-02-17 10:18:52.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/interface.c 2011-02-17 10:33:17.000000000 +0100 -@@ -381,6 +381,7 @@ int netif_map(netif_t *netif, unsigned l - netif->dev->name, netif); - if (err < 0) - goto err_hypervisor; -+ BUG_ON(err < DYNIRQ_BASE || err >= DYNIRQ_BASE + NR_DYNIRQS); - netif->irq = err; - disable_irq(netif->irq); - ---- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-01-03 13:29:58.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/netback.c 2011-01-03 13:30:08.000000000 +0100 -@@ -593,8 +593,12 @@ static void net_rx_action(unsigned long - static mmu_update_t rx_mmu[NET_RX_RING_SIZE]; - static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE]; - static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE]; -- static unsigned char rx_notify[NR_IRQS]; -+ static DECLARE_BITMAP(rx_notify, NR_DYNIRQS); -+#if NR_DYNIRQS <= 0x10000 - static u16 notify_list[NET_RX_RING_SIZE]; -+#else -+ static int notify_list[NET_RX_RING_SIZE]; -+#endif - static struct netbk_rx_meta meta[NET_RX_RING_SIZE]; - - struct netrx_pending_operations npo = { -@@ -748,11 +752,9 @@ static void net_rx_action(unsigned long - nr_frags); - - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret); -- irq = netif->irq; -- if (ret && !rx_notify[irq]) { -- rx_notify[irq] = 1; -+ irq = netif->irq - DYNIRQ_BASE; -+ if (ret && !__test_and_set_bit(irq, rx_notify)) - notify_list[notify_nr++] = irq; -- } - - if (netif_queue_stopped(netif->dev) && - netif_schedulable(netif) && -@@ -767,8 +769,8 @@ static void net_rx_action(unsigned long - - while (notify_nr != 0) { - irq = notify_list[--notify_nr]; -- rx_notify[irq] = 0; -- notify_remote_via_irq(irq); -+ __clear_bit(irq, rx_notify); -+ notify_remote_via_irq(irq + DYNIRQ_BASE); - } - - /* More work to do? */ diff --git a/patches.xen/xen-op-packet b/patches.xen/xen-op-packet deleted file mode 100644 index f0f616e..0000000 --- a/patches.xen/xen-op-packet +++ /dev/null @@ -1,287 +0,0 @@ -From: plc@novell.com -Subject: add support for new operation type BLKIF_OP_PACKET -Patch-mainline: n/a -References: fate#300964 - ---- head-2011-03-17.orig/drivers/xen/blkback/blkback.c 2011-02-28 14:15:32.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blkback/blkback.c 2011-02-28 14:23:53.000000000 +0100 -@@ -194,13 +194,15 @@ static void fast_flush_area(pending_req_ - - static void print_stats(blkif_t *blkif) - { -- printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n", -+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d | pk %4d\n", - current->comm, blkif->st_oo_req, -- blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req); -+ blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req, -+ blkif->st_pk_req); - blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); - blkif->st_rd_req = 0; - blkif->st_wr_req = 0; - blkif->st_oo_req = 0; -+ blkif->st_pk_req = 0; - } - - int blkif_schedule(void *arg) -@@ -365,6 +367,13 @@ static int do_block_io_op(blkif_t *blkif - blkif->st_wr_req++; - dispatch_rw_block_io(blkif, &req, pending_req); - break; -+ case BLKIF_OP_PACKET: -+ DPRINTK("error: block operation BLKIF_OP_PACKET not implemented\n"); -+ blkif->st_pk_req++; -+ make_response(blkif, req.id, req.operation, -+ BLKIF_RSP_ERROR); -+ free_req(pending_req); -+ break; - default: - /* A good sign something is wrong: sleep for a while to - * avoid excessive CPU consumption by a bad guest. */ ---- head-2011-03-17.orig/drivers/xen/blkback/common.h 2010-09-23 16:58:21.000000000 +0200 -+++ head-2011-03-17/drivers/xen/blkback/common.h 2010-11-23 15:06:50.000000000 +0100 -@@ -89,6 +89,7 @@ typedef struct blkif_st { - int st_wr_req; - int st_oo_req; - int st_br_req; -+ int st_pk_req; - int st_rd_sect; - int st_wr_sect; - ---- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-02-03 12:37:02.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2010-12-06 15:01:01.000000000 +0100 -@@ -708,6 +708,8 @@ static int blkif_queue_request(struct re - if (req->cmd_flags & REQ_HARDBARRIER) - #endif - ring_req->operation = BLKIF_OP_WRITE_BARRIER; -+ if (req->cmd_type == REQ_TYPE_BLOCK_PC) -+ ring_req->operation = BLKIF_OP_PACKET; - - ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); - BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); -@@ -765,7 +767,8 @@ void do_blkif_request(struct request_que - - blk_start_request(req); - -- if (req->cmd_type != REQ_TYPE_FS) { -+ if (req->cmd_type != REQ_TYPE_FS -+ && req->cmd_type != REQ_TYPE_BLOCK_PC) { - __blk_end_request_all(req, -EIO); - continue; - } -@@ -852,6 +855,7 @@ static irqreturn_t blkif_int(int irq, vo - /* fall through */ - case BLKIF_OP_READ: - case BLKIF_OP_WRITE: -+ case BLKIF_OP_PACKET: - if (unlikely(bret->status != BLKIF_RSP_OKAY)) - DPRINTK("Bad return from blkdev data " - "request: %x\n", bret->status); ---- head-2011-03-17.orig/drivers/xen/blktap/blktap.c 2011-02-28 14:21:49.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blktap/blktap.c 2011-02-28 14:25:05.000000000 +0100 -@@ -1129,13 +1129,14 @@ static void fast_flush_area(pending_req_ - - static void print_stats(blkif_t *blkif) - { -- printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n", -+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | pk %4d\n", - current->comm, blkif->st_oo_req, -- blkif->st_rd_req, blkif->st_wr_req); -+ blkif->st_rd_req, blkif->st_wr_req, blkif->st_pk_req); - blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); - blkif->st_rd_req = 0; - blkif->st_wr_req = 0; - blkif->st_oo_req = 0; -+ blkif->st_pk_req = 0; - } - - int tap_blkif_schedule(void *arg) -@@ -1382,6 +1383,11 @@ static int do_block_io_op(blkif_t *blkif - dispatch_rw_block_io(blkif, &req, pending_req); - break; - -+ case BLKIF_OP_PACKET: -+ blkif->st_pk_req++; -+ dispatch_rw_block_io(blkif, &req, pending_req); -+ break; -+ - default: - /* A good sign something is wrong: sleep for a while to - * avoid excessive CPU consumption by a bad guest. */ ---- head-2011-03-17.orig/drivers/xen/blktap/common.h 2011-01-31 17:56:27.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blktap/common.h 2010-11-23 15:06:50.000000000 +0100 -@@ -74,6 +74,7 @@ typedef struct blkif_st { - int st_rd_req; - int st_wr_req; - int st_oo_req; -+ int st_pk_req; - int st_rd_sect; - int st_wr_sect; - ---- head-2011-03-17.orig/drivers/xen/blktap2/blktap.h 2011-02-24 15:24:27.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blktap2/blktap.h 2010-11-23 15:06:50.000000000 +0100 -@@ -140,6 +140,7 @@ struct blktap_statistics { - int st_rd_req; - int st_wr_req; - int st_oo_req; -+ int st_pk_req; - int st_rd_sect; - int st_wr_sect; - s64 st_rd_cnt; ---- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2011-02-07 14:13:37.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blktap2/device.c 2010-11-23 15:06:50.000000000 +0100 -@@ -366,7 +366,8 @@ blktap_device_fail_pending_requests(stru - - BTERR("%u:%u: failing pending %s of %d pages\n", - blktap_device_major, tap->minor, -- (request->operation == BLKIF_OP_READ ? -+ (request->operation == BLKIF_OP_PACKET ? -+ "packet" : request->operation == BLKIF_OP_READ ? - "read" : "write"), request->nr_pages); - - blktap_unmap(tap, request); -@@ -407,6 +408,7 @@ blktap_device_finish_request(struct blkt - switch (request->operation) { - case BLKIF_OP_READ: - case BLKIF_OP_WRITE: -+ case BLKIF_OP_PACKET: - if (unlikely(res->status != BLKIF_RSP_OKAY)) - BTERR("Bad return from device data " - "request: %x\n", res->status); -@@ -644,6 +646,8 @@ blktap_device_process_request(struct blk - blkif_req.handle = 0; - blkif_req.operation = rq_data_dir(req) ? - BLKIF_OP_WRITE : BLKIF_OP_READ; -+ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) -+ blkif_req.operation = BLKIF_OP_PACKET; - - request->id = (unsigned long)req; - request->operation = blkif_req.operation; -@@ -709,7 +713,9 @@ blktap_device_process_request(struct blk - wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */ - ring->ring.req_prod_pvt++; - -- if (rq_data_dir(req)) { -+ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) -+ tap->stats.st_pk_req++; -+ else if (rq_data_dir(req)) { - tap->stats.st_wr_sect += nr_sects; - tap->stats.st_wr_req++; - } else { ---- head-2011-03-17.orig/drivers/xen/blktap2-new/blktap.h 2011-02-24 15:00:29.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blktap2-new/blktap.h 2011-02-24 15:27:07.000000000 +0100 -@@ -113,6 +113,7 @@ struct blktap_statistics { - int st_rd_req; - int st_wr_req; - int st_oo_req; -+ int st_pk_req; - int st_rd_sect; - int st_wr_sect; - s64 st_rd_cnt; ---- head-2011-03-17.orig/drivers/xen/blktap2-new/device.c 2011-03-11 00:00:00.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blktap2-new/device.c 2011-03-11 11:09:10.000000000 +0100 -@@ -189,6 +189,8 @@ blktap_device_make_request(struct blktap - - request->rq = rq; - request->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ; -+ if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) -+ request->operation = BLKIF_OP_PACKET; - - err = blktap_request_get_pages(tap, request, nsegs); - if (err) ---- head-2011-03-17.orig/drivers/xen/blktap2-new/ring.c 2011-02-24 15:10:15.000000000 +0100 -+++ head-2011-03-17/drivers/xen/blktap2-new/ring.c 2011-02-24 15:39:28.000000000 +0100 -@@ -153,11 +153,11 @@ blktap_ring_map_request(struct blktap *t - int seg, err = 0; - int write; - -- write = request->operation == BLKIF_OP_WRITE; -+ write = request->operation != BLKIF_OP_READ; - - for (seg = 0; seg < request->nr_pages; seg++) { - if (write) -- blktap_request_bounce(tap, request, seg, write); -+ blktap_request_bounce(tap, request, seg, 1); - - err = blktap_ring_map_segment(tap, request, seg); - if (err) -@@ -181,11 +181,11 @@ blktap_ring_unmap_request(struct blktap - - uaddr = MMAP_VADDR(ring->user_vstart, request->usr_idx, 0); - size = request->nr_pages << PAGE_SHIFT; -- read = request->operation == BLKIF_OP_READ; -+ read = request->operation != BLKIF_OP_WRITE; - - if (read) - for (seg = 0; seg < request->nr_pages; seg++) -- blktap_request_bounce(tap, request, seg, !read); -+ blktap_request_bounce(tap, request, seg, 0); - - zap_page_range(ring->vma, uaddr, size, NULL); - } -@@ -269,14 +269,20 @@ blktap_ring_submit_request(struct blktap - do_gettimeofday(&request->time); - - -- if (request->operation == BLKIF_OP_WRITE) { -+ switch (request->operation) { -+ case BLKIF_OP_WRITE: - tap->stats.st_wr_sect += nsecs; - tap->stats.st_wr_req++; -- } -+ break; - -- if (request->operation == BLKIF_OP_READ) { -+ case BLKIF_OP_READ: - tap->stats.st_rd_sect += nsecs; - tap->stats.st_rd_req++; -+ break; -+ -+ case BLKIF_OP_PACKET: -+ tap->stats.st_pk_req++; -+ break; - } - } - -@@ -483,20 +489,24 @@ blktap_ring_debug(struct blktap *tap, ch - for (usr_idx = 0; usr_idx < MAX_PENDING_REQS; usr_idx++) { - struct blktap_request *request; - struct timeval *time; -- int write; -+ char op = '?'; - - request = ring->pending[usr_idx]; - if (!request) - continue; - -- write = request->operation == BLKIF_OP_WRITE; -+ switch (request->operation) { -+ case BLKIF_OP_WRITE: op = 'W'; break; -+ case BLKIF_OP_READ: op = 'R'; break; -+ case BLKIF_OP_PACKET: op = 'P'; break; -+ } - time = &request->time; - - s += snprintf(s, end - s, - "%02d: usr_idx:%02d " - "op:%c nr_pages:%02d time:%lu.%09lu\n", - usr_idx, request->usr_idx, -- write ? 'W' : 'R', request->nr_pages, -+ op, request->nr_pages, - time->tv_sec, time->tv_usec); - } - ---- head-2011-03-17.orig/include/xen/interface/io/blkif.h 2011-03-17 13:50:24.000000000 +0100 -+++ head-2011-03-17/include/xen/interface/io/blkif.h 2011-03-17 14:34:04.000000000 +0100 -@@ -77,10 +77,9 @@ - */ - #define BLKIF_OP_FLUSH_DISKCACHE 3 - /* -- * Used in SLES sources for device specific command packet -- * contained within the request. Reserved for that purpose. -+ * Device specific command packet contained within the request - */ --#define BLKIF_OP_RESERVED_1 4 -+#define BLKIF_OP_PACKET 4 - /* - * Recognised only if "feature-trim" is present in backend xenbus info. - * The "feature-trim" node contains a boolean indicating whether trim diff --git a/patches.xen/xen-pcpu-hotplug b/patches.xen/xen-pcpu-hotplug deleted file mode 100644 index b1ef1c6..0000000 --- a/patches.xen/xen-pcpu-hotplug +++ /dev/null @@ -1,644 +0,0 @@ -From: Jiang, Yunhong -Subject: xen/acpi: Export host physical CPU information to dom0 -References: bnc#651066 -Patch-mainline: n/a - -This patch expose host's physical CPU information to dom0 in sysfs, so -that dom0's management tools can control the physical CPU if needed. - -It also provides interface in sysfs to logical online/offline a -physical CPU. - -Notice: The information in dom0 is synced with xen hypervisor -asynchronously. - -From: Jiang, Yunhong -Subject: Add cpu hotplug support for 2.6.32 branch - -Add physical CPU hotplug support to origin/xen/next-2.6.32 branch. -Please notice that, even with this change, the acpi_processor->id is -still always -1. This is because several workaround in PM side depends -on acpi_processor->id == -1. As the CPU hotplug logic does not depends -on acpi_processor->id, I'd still keep it no changes. - -But we need change the acpi_processor->id in the future. - -Signed-off-by: Jiang, Yunhong - -jb: ported over glue logic; retry loops around XENPF_get_cpuinfo; - cleanup. -Acked-by: jbeulich@novell.com - ---- head-2011-01-30.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-02-02 15:09:40.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/acpi/processor_extcntl_xen.c 2011-02-02 15:09:57.000000000 +0100 -@@ -181,9 +181,69 @@ static int xen_tx_notifier(struct acpi_p - { - return -EINVAL; - } -+ - static int xen_hotplug_notifier(struct acpi_processor *pr, int event) - { -- return -EINVAL; -+ int ret = -EINVAL; -+#ifdef CONFIG_ACPI_HOTPLUG_CPU -+ acpi_status status = 0; -+ acpi_object_type type; -+ uint32_t apic_id; -+ int device_decl = 0; -+ unsigned long long pxm; -+ xen_platform_op_t op = { -+ .interface_version = XENPF_INTERFACE_VERSION, -+ }; -+ -+ status = acpi_get_type(pr->handle, &type); -+ if (ACPI_FAILURE(status)) { -+ pr_warning("can't get object type for acpi_id %#x\n", -+ pr->acpi_id); -+ return -ENXIO; -+ } -+ -+ switch (type) { -+ case ACPI_TYPE_PROCESSOR: -+ break; -+ case ACPI_TYPE_DEVICE: -+ device_decl = 1; -+ break; -+ default: -+ pr_warning("unsupported object type %#x for acpi_id %#x\n", -+ type, pr->acpi_id); -+ return -EOPNOTSUPP; -+ } -+ -+ apic_id = acpi_get_cpuid(pr->handle, ~device_decl, pr->acpi_id); -+ if (apic_id < 0) { -+ pr_warning("can't get apic_id for acpi_id %#x\n", -+ pr->acpi_id); -+ return -ENODATA; -+ } -+ -+ status = acpi_evaluate_integer(pr->handle, "_PXM", NULL, &pxm); -+ if (ACPI_FAILURE(status)) { -+ pr_warning("can't get pxm for acpi_id %#x\n", -+ pr->acpi_id); -+ return -ENODATA; -+ } -+ -+ switch (event) { -+ case HOTPLUG_TYPE_ADD: -+ op.cmd = XENPF_cpu_hotadd; -+ op.u.cpu_add.apic_id = apic_id; -+ op.u.cpu_add.acpi_id = pr->acpi_id; -+ op.u.cpu_add.pxm = pxm; -+ ret = HYPERVISOR_platform_op(&op); -+ break; -+ case HOTPLUG_TYPE_REMOVE: -+ pr_warning("Xen doesn't support CPU hot remove\n"); -+ ret = -EOPNOTSUPP; -+ break; -+ } -+#endif -+ -+ return ret; - } - - static struct processor_extcntl_ops xen_extcntl_ops = { -@@ -194,8 +254,10 @@ static int __init init_extcntl(void) - { - unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8; - -+#ifndef CONFIG_ACPI_HOTPLUG_CPU - if (!pmbits) - return 0; -+#endif - if (pmbits & XEN_PROCESSOR_PM_CX) - xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier; - if (pmbits & XEN_PROCESSOR_PM_PX) ---- head-2011-01-30.orig/drivers/acpi/processor_driver.c 2011-02-01 15:03:10.000000000 +0100 -+++ head-2011-01-30/drivers/acpi/processor_driver.c 2011-02-02 15:09:57.000000000 +0100 -@@ -82,7 +82,7 @@ MODULE_LICENSE("GPL"); - static int acpi_processor_add(struct acpi_device *device); - static int acpi_processor_remove(struct acpi_device *device, int type); - static void acpi_processor_notify(struct acpi_device *device, u32 event); --static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); -+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); - static int acpi_processor_handle_eject(struct acpi_processor *pr); - - -@@ -324,8 +324,7 @@ static int acpi_processor_get_info(struc - * they are physically not present. - */ - if (pr->id == -1) { -- if (ACPI_FAILURE -- (acpi_processor_hotadd_init(pr->handle, &pr->id)) && -+ if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)) && - acpi_get_cpuid(pr->handle, ~device_declaration, - pr->acpi_id) < 0) { - return -ENODEV; -@@ -789,13 +788,26 @@ processor_walk_namespace_cb(acpi_handle - return (AE_OK); - } - --static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) -+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) - { -+ acpi_handle handle = pr->handle; -+ int *p_cpu = &pr->id; -+ -+#ifdef CONFIG_XEN -+ if (xen_pcpu_index(pr->acpi_id, 1) != -1) -+ return AE_OK; -+#endif - - if (!is_processor_present(handle)) { - return AE_ERROR; - } - -+ if (processor_cntl_external()) { -+ processor_notify_external(pr, PROCESSOR_HOTPLUG, -+ HOTPLUG_TYPE_ADD); -+ return AE_OK; -+ } -+ - if (acpi_map_lsapic(handle, p_cpu)) - return AE_ERROR; - -@@ -809,10 +821,11 @@ static acpi_status acpi_processor_hotadd - - static int acpi_processor_handle_eject(struct acpi_processor *pr) - { --#ifdef CONFIG_XEN -- if (pr->id == -1) -+ if (processor_cntl_external()) { -+ processor_notify_external(pr, PROCESSOR_HOTPLUG, -+ HOTPLUG_TYPE_REMOVE); - return (0); --#endif -+ } - - if (cpu_online(pr->id)) - cpu_down(pr->id); -@@ -822,7 +835,7 @@ static int acpi_processor_handle_eject(s - return (0); - } - #else --static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) -+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) - { - return AE_ERROR; - } ---- head-2011-01-30.orig/drivers/acpi/processor_extcntl.c 2011-02-01 15:03:03.000000000 +0100 -+++ head-2011-01-30/drivers/acpi/processor_extcntl.c 2011-02-02 15:09:57.000000000 +0100 -@@ -83,10 +83,13 @@ int processor_notify_external(struct acp - - ret = processor_extcntl_ops->pm_ops[type](pr, event); - break; -+#ifdef CONFIG_ACPI_HOTPLUG_CPU - case PROCESSOR_HOTPLUG: - if (processor_extcntl_ops->hotplug) - ret = processor_extcntl_ops->hotplug(pr, type); -+ xen_pcpu_hotplug(type); - break; -+#endif - default: - pr_err("Unsupported processor event %d.\n", event); - break; ---- head-2011-01-30.orig/drivers/xen/core/Makefile 2011-02-02 15:09:52.000000000 +0100 -+++ head-2011-01-30/drivers/xen/core/Makefile 2011-02-02 15:09:57.000000000 +0100 -@@ -5,6 +5,7 @@ - obj-y := evtchn.o gnttab.o reboot.o machine_reboot.o firmware.o - - obj-$(CONFIG_PCI) += pci.o -+obj-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o - obj-$(CONFIG_PROC_FS) += xen_proc.o - obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o - obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-01-30/drivers/xen/core/pcpu.c 2011-02-02 15:09:57.000000000 +0100 -@@ -0,0 +1,416 @@ -+/* -+ * pcpu.c - management physical cpu in dom0 environment -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct pcpu { -+ struct list_head pcpu_list; -+ struct sys_device sysdev; -+ uint32_t xen_id; -+ uint32_t apic_id; -+ uint32_t acpi_id; -+ uint32_t flags; -+}; -+ -+static inline int xen_pcpu_online(uint32_t flags) -+{ -+ return !!(flags & XEN_PCPU_FLAGS_ONLINE); -+} -+ -+static DEFINE_MUTEX(xen_pcpu_lock); -+ -+/* No need for irq disable since hotplug notify is in workqueue context */ -+#define get_pcpu_lock() mutex_lock(&xen_pcpu_lock); -+#define put_pcpu_lock() mutex_unlock(&xen_pcpu_lock); -+ -+static LIST_HEAD(xen_pcpus); -+ -+static int xen_pcpu_down(uint32_t xen_id) -+{ -+ xen_platform_op_t op = { -+ .cmd = XENPF_cpu_offline, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ .u.cpu_ol.cpuid = xen_id, -+ }; -+ -+ return HYPERVISOR_platform_op(&op); -+} -+ -+static int xen_pcpu_up(uint32_t xen_id) -+{ -+ xen_platform_op_t op = { -+ .cmd = XENPF_cpu_online, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ .u.cpu_ol.cpuid = xen_id, -+ }; -+ -+ return HYPERVISOR_platform_op(&op); -+} -+ -+static ssize_t show_online(struct sys_device *dev, -+ struct sysdev_attribute *attr, -+ char *buf) -+{ -+ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); -+ -+ return sprintf(buf, "%d\n", xen_pcpu_online(cpu->flags)); -+} -+ -+static ssize_t store_online(struct sys_device *dev, -+ struct sysdev_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); -+ ssize_t ret; -+ -+ switch (buf[0]) { -+ case '0': -+ ret = xen_pcpu_down(cpu->xen_id); -+ break; -+ case '1': -+ ret = xen_pcpu_up(cpu->xen_id); -+ break; -+ default: -+ ret = -EINVAL; -+ } -+ -+ if (ret >= 0) -+ ret = count; -+ return ret; -+} -+ -+static SYSDEV_ATTR(online, 0644, show_online, store_online); -+ -+static ssize_t show_apicid(struct sys_device *dev, -+ struct sysdev_attribute *attr, -+ char *buf) -+{ -+ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); -+ -+ return sprintf(buf, "%#x\n", cpu->apic_id); -+} -+static SYSDEV_ATTR(apic_id, 0444, show_apicid, NULL); -+ -+static ssize_t show_acpiid(struct sys_device *dev, -+ struct sysdev_attribute *attr, -+ char *buf) -+{ -+ struct pcpu *cpu = container_of(dev, struct pcpu, sysdev); -+ -+ return sprintf(buf, "%#x\n", cpu->acpi_id); -+} -+static SYSDEV_ATTR(acpi_id, 0444, show_acpiid, NULL); -+ -+static struct sysdev_class xen_pcpu_sysdev_class = { -+ .name = "xen_pcpu", -+}; -+ -+static int xen_pcpu_free(struct pcpu *pcpu) -+{ -+ if (!pcpu) -+ return 0; -+ -+ sysdev_remove_file(&pcpu->sysdev, &attr_online); -+ sysdev_remove_file(&pcpu->sysdev, &attr_apic_id); -+ sysdev_remove_file(&pcpu->sysdev, &attr_acpi_id); -+ sysdev_unregister(&pcpu->sysdev); -+ list_del(&pcpu->pcpu_list); -+ kfree(pcpu); -+ -+ return 0; -+} -+ -+static inline int same_pcpu(struct xenpf_pcpuinfo *info, -+ struct pcpu *pcpu) -+{ -+ return (pcpu->apic_id == info->apic_id) && -+ (pcpu->xen_id == info->xen_cpuid); -+} -+ -+/* -+ * Return 1 if online status changed -+ */ -+static int xen_pcpu_online_check(struct xenpf_pcpuinfo *info, -+ struct pcpu *pcpu) -+{ -+ int result = 0; -+ -+ if (info->xen_cpuid != pcpu->xen_id) -+ return 0; -+ -+ if (xen_pcpu_online(info->flags) && !xen_pcpu_online(pcpu->flags)) { -+ /* the pcpu is onlined */ -+ pcpu->flags |= XEN_PCPU_FLAGS_ONLINE; -+ kobject_uevent(&pcpu->sysdev.kobj, KOBJ_ONLINE); -+ result = 1; -+ } else if (!xen_pcpu_online(info->flags) && -+ xen_pcpu_online(pcpu->flags)) { -+ /* The pcpu is offlined now */ -+ pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE; -+ kobject_uevent(&pcpu->sysdev.kobj, KOBJ_OFFLINE); -+ result = 1; -+ } -+ -+ return result; -+} -+ -+static int pcpu_sysdev_init(struct pcpu *cpu) -+{ -+ int error; -+ -+ error = sysdev_register(&cpu->sysdev); -+ if (error) { -+ pr_warning("xen_pcpu_add: Failed to register pcpu\n"); -+ kfree(cpu); -+ return -1; -+ } -+ sysdev_create_file(&cpu->sysdev, &attr_online); -+ sysdev_create_file(&cpu->sysdev, &attr_apic_id); -+ sysdev_create_file(&cpu->sysdev, &attr_acpi_id); -+ return 0; -+} -+ -+static struct pcpu *get_pcpu(unsigned int xen_id) -+{ -+ struct pcpu *pcpu; -+ -+ list_for_each_entry(pcpu, &xen_pcpus, pcpu_list) -+ if (pcpu->xen_id == xen_id) -+ return pcpu; -+ -+ return NULL; -+} -+ -+static struct pcpu *init_pcpu(struct xenpf_pcpuinfo *info) -+{ -+ struct pcpu *pcpu; -+ -+ if (info->flags & XEN_PCPU_FLAGS_INVALID) -+ return NULL; -+ -+ /* The PCPU is just added */ -+ pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL); -+ if (!pcpu) -+ return NULL; -+ -+ INIT_LIST_HEAD(&pcpu->pcpu_list); -+ pcpu->xen_id = info->xen_cpuid; -+ pcpu->apic_id = info->apic_id; -+ pcpu->acpi_id = info->acpi_id; -+ pcpu->flags = info->flags; -+ -+ pcpu->sysdev.cls = &xen_pcpu_sysdev_class; -+ pcpu->sysdev.id = info->xen_cpuid; -+ -+ if (pcpu_sysdev_init(pcpu)) { -+ kfree(pcpu); -+ return NULL; -+ } -+ -+ list_add_tail(&pcpu->pcpu_list, &xen_pcpus); -+ return pcpu; -+} -+ -+#define PCPU_NO_CHANGE 0 -+#define PCPU_ADDED 1 -+#define PCPU_ONLINE_OFFLINE 2 -+#define PCPU_REMOVED 3 -+/* -+ * Caller should hold the pcpu lock -+ * < 0: Something wrong -+ * 0: No changes -+ * > 0: State changed -+ */ -+static struct pcpu *_sync_pcpu(unsigned int cpu_num, unsigned int *max_id, -+ int *result) -+{ -+ struct pcpu *pcpu; -+ struct xenpf_pcpuinfo *info; -+ xen_platform_op_t op = { -+ .cmd = XENPF_get_cpuinfo, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ }; -+ int ret; -+ -+ *result = -1; -+ -+ info = &op.u.pcpu_info; -+ info->xen_cpuid = cpu_num; -+ -+ do { -+ ret = HYPERVISOR_platform_op(&op); -+ } while (ret == -EBUSY); -+ if (ret) -+ return NULL; -+ -+ if (max_id) -+ *max_id = op.u.pcpu_info.max_present; -+ -+ pcpu = get_pcpu(cpu_num); -+ -+ if (info->flags & XEN_PCPU_FLAGS_INVALID) { -+ /* The pcpu has been removed */ -+ *result = PCPU_NO_CHANGE; -+ if (pcpu) { -+ xen_pcpu_free(pcpu); -+ *result = PCPU_REMOVED; -+ } -+ return NULL; -+ } -+ -+ -+ if (!pcpu) { -+ *result = PCPU_ADDED; -+ pcpu = init_pcpu(info); -+ if (pcpu == NULL) { -+ pr_warning("Failed to init pcpu %x\n", -+ info->xen_cpuid); -+ *result = -1; -+ } -+ } else { -+ *result = PCPU_NO_CHANGE; -+ /* -+ * Old PCPU is replaced with a new pcpu, this means -+ * several virq is missed, will it happen? -+ */ -+ if (!same_pcpu(info, pcpu)) { -+ pr_warning("Pcpu %x changed!\n", pcpu->xen_id); -+ pcpu->apic_id = info->apic_id; -+ pcpu->acpi_id = info->acpi_id; -+ } -+ if (xen_pcpu_online_check(info, pcpu)) -+ *result = PCPU_ONLINE_OFFLINE; -+ } -+ return pcpu; -+} -+ -+/* -+ * Sync dom0's pcpu information with xen hypervisor's -+ */ -+static int xen_sync_pcpus(void) -+{ -+ /* -+ * Boot cpu always have cpu_id 0 in xen -+ */ -+ unsigned int cpu_num = 0, max_id = 0; -+ int result = 0; -+ struct pcpu *pcpu; -+ -+ get_pcpu_lock(); -+ -+ while ((result >= 0) && (cpu_num <= max_id)) { -+ pcpu = _sync_pcpu(cpu_num, &max_id, &result); -+ -+ switch (result) { -+ case PCPU_NO_CHANGE: -+ case PCPU_ADDED: -+ case PCPU_ONLINE_OFFLINE: -+ case PCPU_REMOVED: -+ break; -+ default: -+ pr_warning("Failed to sync pcpu %x\n", cpu_num); -+ break; -+ } -+ cpu_num++; -+ } -+ -+ if (result < 0) { -+ struct pcpu *tmp; -+ -+ list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, pcpu_list) -+ xen_pcpu_free(pcpu); -+ } -+ -+ put_pcpu_lock(); -+ -+ return 0; -+} -+ -+static void xen_pcpu_dpc(struct work_struct *work) -+{ -+ if (xen_sync_pcpus() < 0) -+ pr_warning("xen_pcpu_dpc: Failed to sync pcpu information\n"); -+} -+static DECLARE_WORK(xen_pcpu_work, xen_pcpu_dpc); -+ -+static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id) -+{ -+ schedule_work(&xen_pcpu_work); -+ -+ return IRQ_HANDLED; -+} -+ -+int xen_pcpu_hotplug(int type) -+{ -+ schedule_work(&xen_pcpu_work); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(xen_pcpu_hotplug); -+ -+int xen_pcpu_index(uint32_t id, bool is_acpiid) -+{ -+ unsigned int cpu_num, max_id; -+ xen_platform_op_t op = { -+ .cmd = XENPF_get_cpuinfo, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ }; -+ struct xenpf_pcpuinfo *info = &op.u.pcpu_info; -+ -+ for (max_id = cpu_num = 0; cpu_num <= max_id; ++cpu_num) { -+ int ret; -+ -+ info->xen_cpuid = cpu_num; -+ do { -+ ret = HYPERVISOR_platform_op(&op); -+ } while (ret == -EBUSY); -+ if (ret) -+ continue; -+ -+ if (info->max_present > max_id) -+ max_id = info->max_present; -+ if (id == (is_acpiid ? info->acpi_id : info->apic_id)) -+ return cpu_num; -+ } -+ -+ return -1; -+} -+EXPORT_SYMBOL_GPL(xen_pcpu_index); -+ -+static int __init xen_pcpu_init(void) -+{ -+ int err; -+ -+ if (!is_initial_xendomain()) -+ return 0; -+ -+ err = sysdev_class_register(&xen_pcpu_sysdev_class); -+ if (err) { -+ pr_warning("xen_pcpu_init: " -+ "Failed to register sysdev class (%d)\n", err); -+ return err; -+ } -+ -+ xen_sync_pcpus(); -+ -+ if (!list_empty(&xen_pcpus)) -+ err = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0, -+ xen_pcpu_interrupt, 0, -+ "pcpu", NULL); -+ if (err < 0) -+ pr_warning("xen_pcpu_init: " -+ "Failed to bind pcpu_state virq (%d)\n", err); -+ -+ return err; -+} -+subsys_initcall(xen_pcpu_init); ---- head-2011-01-30.orig/include/acpi/processor.h 2011-02-01 15:03:10.000000000 +0100 -+++ head-2011-01-30/include/acpi/processor.h 2011-02-02 15:09:57.000000000 +0100 -@@ -509,6 +509,8 @@ static inline void xen_convert_psd_pack( - xpsd->num_processors = apsd->num_processors; - } - -+extern int xen_pcpu_hotplug(int type); -+extern int xen_pcpu_index(uint32_t id, bool is_acpiid); - #endif /* CONFIG_XEN */ - - #endif diff --git a/patches.xen/xen-sections b/patches.xen/xen-sections deleted file mode 100644 index 44cd67e..0000000 --- a/patches.xen/xen-sections +++ /dev/null @@ -1,105 +0,0 @@ -From: jbeulich@novell.com -Subject: fix placement of some routines/data -Patch-mainline: obsolete - ---- head-2011-01-22.orig/arch/x86/kernel/time-xen.c 2010-11-22 13:21:13.000000000 +0100 -+++ head-2011-01-22/arch/x86/kernel/time-xen.c 2010-09-16 16:49:59.000000000 +0200 -@@ -648,7 +648,7 @@ int xen_update_persistent_clock(void) - /* Dynamically-mapped IRQ. */ - DEFINE_PER_CPU(int, timer_irq); - --static void setup_cpu0_timer_irq(void) -+static void __init setup_cpu0_timer_irq(void) - { - per_cpu(timer_irq, 0) = - bind_virq_to_irqhandler( ---- head-2011-01-22.orig/drivers/xen/core/cpu_hotplug.c 2011-01-24 12:14:25.000000000 +0100 -+++ head-2011-01-22/drivers/xen/core/cpu_hotplug.c 2011-01-24 12:15:51.000000000 +0100 -@@ -25,7 +25,7 @@ static int local_cpu_hotplug_request(voi - return (current->mm != NULL); - } - --static void vcpu_hotplug(unsigned int cpu, struct sys_device *dev) -+static void __cpuinit vcpu_hotplug(unsigned int cpu, struct sys_device *dev) - { - int err; - char dir[32], state[32]; -@@ -54,7 +54,7 @@ static void vcpu_hotplug(unsigned int cp - } - } - --static void handle_vcpu_hotplug_event( -+static void __cpuinit handle_vcpu_hotplug_event( - struct xenbus_watch *watch, const char **vec, unsigned int len) - { - unsigned int cpu; -@@ -83,12 +83,12 @@ static int smpboot_cpu_notify(struct not - return NOTIFY_OK; - } - --static int setup_cpu_watcher(struct notifier_block *notifier, -- unsigned long event, void *data) -+static int __cpuinit setup_cpu_watcher(struct notifier_block *notifier, -+ unsigned long event, void *data) - { - unsigned int i; - -- static struct xenbus_watch cpu_watch = { -+ static struct xenbus_watch __cpuinitdata cpu_watch = { - .node = "cpu", - .callback = handle_vcpu_hotplug_event, - .flags = XBWF_new_thread }; -@@ -107,7 +107,7 @@ static int __init setup_vcpu_hotplug_eve - { - static struct notifier_block hotplug_cpu = { - .notifier_call = smpboot_cpu_notify }; -- static struct notifier_block xsn_cpu = { -+ static struct notifier_block __cpuinitdata xsn_cpu = { - .notifier_call = setup_cpu_watcher }; - - if (!is_running_on_xen()) -@@ -121,7 +121,7 @@ static int __init setup_vcpu_hotplug_eve - - arch_initcall(setup_vcpu_hotplug_event); - --int smp_suspend(void) -+int __ref smp_suspend(void) - { - unsigned int cpu; - int err; -@@ -141,7 +141,7 @@ int smp_suspend(void) - return 0; - } - --void smp_resume(void) -+void __ref smp_resume(void) - { - unsigned int cpu; - ---- head-2011-01-22.orig/drivers/xen/core/gnttab.c 2010-11-22 13:21:03.000000000 +0100 -+++ head-2011-01-22/drivers/xen/core/gnttab.c 2011-01-14 15:00:13.000000000 +0100 -@@ -794,7 +794,12 @@ static int gnttab_expand(unsigned int re - return rc; - } - --int __devinit gnttab_init(void) -+#ifdef CONFIG_XEN -+static int __init -+#else -+int __devinit -+#endif -+gnttab_init(void) - { - int i; - unsigned int max_nr_glist_frames, nr_glist_frames; ---- head-2011-01-22.orig/drivers/xen/pcifront/pci_op.c 2010-11-22 13:10:22.000000000 +0100 -+++ head-2011-01-22/drivers/xen/pcifront/pci_op.c 2010-10-04 11:10:07.000000000 +0200 -@@ -416,7 +416,7 @@ void pci_frontend_disable_msi(struct pci - #endif /* CONFIG_PCI_MSI */ - - /* Claim resources for the PCI frontend as-is, backend won't allow changes */ --static int pcifront_claim_resource(struct pci_dev *dev, void *data) -+static int __devinit pcifront_claim_resource(struct pci_dev *dev, void *data) - { - struct pcifront_device *pdev = data; - int i; diff --git a/patches.xen/xen-setup-gsi b/patches.xen/xen-setup-gsi deleted file mode 100644 index 5813f6e..0000000 --- a/patches.xen/xen-setup-gsi +++ /dev/null @@ -1,158 +0,0 @@ -From: jbeulich@novell.com -Subject: pass trigger mode and polarity information to Xen for all interrupts -Patch-mainline: n/a - -For Xen to be able to use non-legacy IRQs e.g. for its serial console, -it needs to know trigger mode and polarity for them regardless of -whether the kernel is also going to (try to) use those interrupts. - ---- head-2011-01-30.orig/arch/x86/kernel/apic/io_apic-xen.c 2011-02-01 16:05:51.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/apic/io_apic-xen.c 2011-02-02 15:10:38.000000000 +0100 -@@ -1400,6 +1400,10 @@ static int setup_ioapic_entry(int apic_i - return 0; - } - -+static struct { -+ DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); -+} mp_ioapic_routing[MAX_IO_APICS]; -+ - static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, - struct irq_cfg *cfg, int trigger, int polarity) - { -@@ -1416,6 +1420,42 @@ static void setup_ioapic_irq(int apic_id - */ - if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) - apic->vector_allocation_domain(0, cfg->domain); -+#else -+ /* -+ * For legacy IRQs we may get here before trigger mode and polarity -+ * get obtained, but Xen refuses to set those through -+ * PHYSDEVOP_setup_gsi more than once (perhaps even at all). -+ */ -+ if (irq >= legacy_pic->nr_legacy_irqs -+ || test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { -+ struct physdev_setup_gsi setup_gsi = { -+ .gsi = irq, -+ .triggering = trigger, -+ .polarity = polarity -+ }; -+ struct physdev_map_pirq map_pirq = { -+ .domid = DOMID_SELF, -+ .type = MAP_PIRQ_TYPE_GSI, -+ .index = irq, -+ .pirq = irq -+ }; -+ -+ switch (HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, -+ &setup_gsi)) { -+ case -EEXIST: -+ if (irq < legacy_pic->nr_legacy_irqs) -+ break; -+ /* fall through */ -+ case 0: -+ evtchn_register_pirq(irq); -+ if (HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, -+ &map_pirq) == 0) { -+ /* fake (for init_IO_APIC_traps()): */ -+ cfg->vector = irq; -+ return; -+ } -+ } -+ } - #endif - - if (assign_irq_vector(irq, cfg, apic->target_cpus())) -@@ -1451,10 +1491,6 @@ static void setup_ioapic_irq(int apic_id - ioapic_write_entry(apic_id, pin, entry); - } - --static struct { -- DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); --} mp_ioapic_routing[MAX_IO_APICS]; -- - static void __init setup_IO_APIC_irqs(void) - { - int apic_id, pin, idx, irq, notcon = 0; ---- head-2011-01-30.orig/drivers/acpi/pci_irq.c 2011-01-05 01:50:19.000000000 +0100 -+++ head-2011-01-30/drivers/acpi/pci_irq.c 2011-02-02 15:10:38.000000000 +0100 -@@ -469,3 +469,80 @@ void acpi_pci_irq_disable(struct pci_dev - dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); - acpi_unregister_gsi(gsi); - } -+ -+#if defined(CONFIG_XEN) && defined(CONFIG_PCI) -+static int __init xen_setup_gsi(void) -+{ -+ struct pci_dev *dev = NULL; -+ -+ if (acpi_noirq) -+ return 0; -+ -+ /* Loop body is a clone of acpi_pci_irq_enable(). */ -+ for_each_pci_dev(dev) { -+ const struct acpi_prt_entry *entry; -+ int gsi; -+ int triggering = ACPI_LEVEL_SENSITIVE; -+ int polarity = ACPI_ACTIVE_LOW; -+ struct physdev_setup_gsi setup_gsi; -+ -+ if (!dev->pin) -+ continue; -+ -+ entry = acpi_pci_irq_lookup(dev, dev->pin); -+ if (!entry) { -+ /* -+ * IDE legacy mode controller IRQs are magic. Why do -+ * compat extensions always make such a nasty mess. -+ */ -+ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && -+ (dev->class & 0x05) == 0) -+ continue; -+ } -+ -+ gsi = entry -+ ? entry->link -+ ? acpi_pci_link_allocate_irq(entry->link, -+ entry->index, -+ &triggering, &polarity, -+ NULL) -+ : entry->index -+ : -1; -+ -+ if (gsi >= 0) { -+ setup_gsi.gsi = gsi; -+ setup_gsi.triggering -+ = (triggering == ACPI_LEVEL_SENSITIVE); -+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_LOW); -+ if (HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, -+ &setup_gsi) < 0) -+ continue; -+ -+ dev_info(&dev->dev, "GSI%d: %s-%s\n", gsi, -+ triggering == ACPI_LEVEL_SENSITIVE ? "level" -+ : "edge", -+ polarity == ACPI_ACTIVE_LOW ? "low" : "high"); -+ } else { -+ /* -+ * No IRQ known to the ACPI subsystem - maybe the -+ * BIOS / driver reported one, then use it. -+ */ -+ dev_warn(&dev->dev, "PCI INT %c: no GSI", -+ pin_name(dev->pin)); -+ /* Interrupt Line values above 0xF are forbidden */ -+ if (dev->irq > 0 && (dev->irq <= 0xF)) { -+ pr_cont(" - using IRQ %d\n", dev->irq); -+ setup_gsi.gsi = dev->irq; -+ setup_gsi.triggering = 1; -+ setup_gsi.polarity = 1; -+ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, -+ &setup_gsi)); -+ } else -+ pr_cont("\n"); -+ } -+ } -+ -+ return 0; -+} -+subsys_initcall(xen_setup_gsi); -+#endif diff --git a/patches.xen/xen-spinlock-poll-early b/patches.xen/xen-spinlock-poll-early deleted file mode 100644 index fb22288..0000000 --- a/patches.xen/xen-spinlock-poll-early +++ /dev/null @@ -1,184 +0,0 @@ -From: jbeulich@novell.com -Subject: Go into polling mode early if lock owner is not running -Patch-mainline: n/a - -This could be merged into the original ticket spinlock code once -validated, if there wasn't the dependency on smp-processor-id.h, which -only gets introduced in the 2.6.32 merge. - ---- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock.h 2011-01-18 15:47:44.000000000 +0100 -@@ -41,11 +41,12 @@ - #ifdef TICKET_SHIFT - - #include -+#include - - int xen_spinlock_init(unsigned int cpu); - void xen_spinlock_cleanup(unsigned int cpu); --bool xen_spin_wait(arch_spinlock_t *, unsigned int *token, -- unsigned int flags); -+unsigned int xen_spin_wait(arch_spinlock_t *, unsigned int *token, -+ unsigned int flags); - unsigned int xen_spin_adjust(const arch_spinlock_t *, unsigned int token); - void xen_spin_kick(arch_spinlock_t *, unsigned int token); - -@@ -113,6 +114,9 @@ static __always_inline int __ticket_spin - : - : "memory", "cc"); - -+ if (tmp) -+ lock->owner = raw_smp_processor_id(); -+ - return tmp; - } - #elif TICKET_SHIFT == 16 -@@ -179,10 +183,15 @@ static __always_inline int __ticket_spin - : - : "memory", "cc"); - -+ if (tmp) -+ lock->owner = raw_smp_processor_id(); -+ - return tmp; - } - #endif - -+#define __ticket_spin_count(lock) (vcpu_running((lock)->owner) ? 1 << 10 : 1) -+ - static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) - { - int tmp = ACCESS_ONCE(lock->slock); -@@ -204,16 +213,18 @@ static __always_inline void __ticket_spi - bool free; - - __ticket_spin_lock_preamble; -- if (likely(free)) { -+ if (likely(free)) - arch_local_irq_restore(flags); -- return; -+ else { -+ token = xen_spin_adjust(lock, token); -+ arch_local_irq_restore(flags); -+ count = __ticket_spin_count(lock); -+ do { -+ __ticket_spin_lock_body; -+ } while (unlikely(!count) -+ && (count = xen_spin_wait(lock, &token, flags))); - } -- token = xen_spin_adjust(lock, token); -- arch_local_irq_restore(flags); -- do { -- count = 1 << 10; -- __ticket_spin_lock_body; -- } while (unlikely(!count) && !xen_spin_wait(lock, &token, flags)); -+ lock->owner = raw_smp_processor_id(); - } - - static __always_inline void __ticket_spin_lock_flags(arch_spinlock_t *lock, -@@ -223,13 +234,15 @@ static __always_inline void __ticket_spi - bool free; - - __ticket_spin_lock_preamble; -- if (likely(free)) -- return; -- token = xen_spin_adjust(lock, token); -- do { -- count = 1 << 10; -- __ticket_spin_lock_body; -- } while (unlikely(!count) && !xen_spin_wait(lock, &token, flags)); -+ if (unlikely(!free)) { -+ token = xen_spin_adjust(lock, token); -+ count = __ticket_spin_count(lock); -+ do { -+ __ticket_spin_lock_body; -+ } while (unlikely(!count) -+ && (count = xen_spin_wait(lock, &token, flags))); -+ } -+ lock->owner = raw_smp_processor_id(); - } - - static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) -@@ -246,6 +259,7 @@ static __always_inline void __ticket_spi - #undef __ticket_spin_lock_preamble - #undef __ticket_spin_lock_body - #undef __ticket_spin_unlock_body -+#undef __ticket_spin_count - #endif - - #define __arch_spin(n) __ticket_spin_##n ---- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:55:46.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-01-26 11:27:24.000000000 +0100 -@@ -26,6 +26,11 @@ typedef union { - # define TICKET_SHIFT 16 - u16 cur, seq; - #endif -+#if CONFIG_NR_CPUS <= 256 -+ u8 owner; -+#else -+ u16 owner; -+#endif - #else - /* - * This differs from the pre-2.6.24 spinlock by always using xchgb ---- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:18:37.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:19:26.000000000 +0100 -@@ -39,6 +39,8 @@ int __cpuinit xen_spinlock_init(unsigned - struct evtchn_bind_ipi bind_ipi; - int rc; - -+ setup_runstate_area(cpu); -+ - WARN_ON(per_cpu(poll_evtchn, cpu)); - bind_ipi.vcpu = cpu; - rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi); -@@ -124,18 +126,17 @@ unsigned int xen_spin_adjust(const arch_ - return spin_adjust(percpu_read(_spinning), lock, token); - } - --bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, -- unsigned int flags) -+unsigned int xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok, -+ unsigned int flags) - { -+ unsigned int rm_idx, cpu = raw_smp_processor_id(); - bool rc; - typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask; -- unsigned int rm_idx; - struct spinning spinning, *other; - - /* If kicker interrupt not initialized yet, just spin. */ -- if (unlikely(!cpu_online(raw_smp_processor_id())) -- || unlikely(!percpu_read(poll_evtchn))) -- return false; -+ if (unlikely(!cpu_online(cpu)) || unlikely(!percpu_read(poll_evtchn))) -+ return UINT_MAX; - - /* announce we're spinning */ - spinning.ticket = *ptok >> TICKET_SHIFT; -@@ -155,6 +156,7 @@ bool xen_spin_wait(arch_spinlock_t *lock - * we weren't looking. - */ - if (lock->cur == spinning.ticket) { -+ lock->owner = cpu; - /* - * If we interrupted another spinlock while it was - * blocking, make sure it doesn't block (again) -@@ -251,6 +253,8 @@ bool xen_spin_wait(arch_spinlock_t *lock - if (!free) - token = spin_adjust(other->prev, lock, token); - other->ticket = token >> TICKET_SHIFT; -+ if (lock->cur == other->ticket) -+ lock->owner = cpu; - } while ((other = other->prev) != NULL); - lock = spinning.lock; - } -@@ -261,7 +265,7 @@ bool xen_spin_wait(arch_spinlock_t *lock - arch_local_irq_restore(upcall_mask); - *ptok = lock->cur | (spinning.ticket << TICKET_SHIFT); - -- return rc; -+ return rc ? 0 : __ticket_spin_count(lock); - } - - void xen_spin_kick(arch_spinlock_t *lock, unsigned int token) diff --git a/patches.xen/xen-swiotlb-heuristics b/patches.xen/xen-swiotlb-heuristics deleted file mode 100644 index bd087af..0000000 --- a/patches.xen/xen-swiotlb-heuristics +++ /dev/null @@ -1,32 +0,0 @@ -From: jbeulich@novell.com -Subject: adjust Xen's swiotlb default size setting -Patch-mainline: obsolete - ---- head-2010-08-24.orig/lib/swiotlb-xen.c 2010-08-24 16:49:11.000000000 +0200 -+++ head-2010-08-24/lib/swiotlb-xen.c 2010-08-25 14:41:33.000000000 +0200 -@@ -228,8 +228,8 @@ swiotlb_init_with_default_size(size_t de - void __init - swiotlb_init(int verbose) - { -- long ram_end; -- size_t defsz = 64 * (1 << 20); /* 64MB default size */ -+ unsigned long ram_end; -+ size_t defsz = 64 << 20; /* 64MB default size */ - - if (swiotlb_force == 1) { - swiotlb = 1; -@@ -238,8 +238,12 @@ swiotlb_init(int verbose) - is_initial_xendomain()) { - /* Domain 0 always has a swiotlb. */ - ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL); -- if (ram_end <= 0x7ffff) -- defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */ -+ if (ram_end <= 0x1ffff) -+ defsz = 2 << 20; /* 2MB on <512MB systems. */ -+ else if (ram_end <= 0x3ffff) -+ defsz = 4 << 20; /* 4MB on <1GB systems. */ -+ else if (ram_end <= 0x7ffff) -+ defsz = 8 << 20; /* 8MB on <2GB systems. */ - swiotlb = 1; - } - diff --git a/patches.xen/xen-sysdev-suspend b/patches.xen/xen-sysdev-suspend deleted file mode 100644 index 1b81c68..0000000 --- a/patches.xen/xen-sysdev-suspend +++ /dev/null @@ -1,532 +0,0 @@ -From: jbeulich@novell.com -Subject: use base kernel suspend/resume infrastructure -Patch-mainline: n/a - -... rather than calling just a few functions explicitly. - ---- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2010-09-16 16:49:59.000000000 +0200 -+++ head-2011-03-11/arch/x86/kernel/time-xen.c 2010-11-23 15:07:01.000000000 +0100 -@@ -69,6 +69,10 @@ DEFINE_PER_CPU(struct vcpu_runstate_info - /* Must be signed, as it's compared with s64 quantities which can be -ve. */ - #define NS_PER_TICK (1000000000LL/HZ) - -+static struct vcpu_set_periodic_timer xen_set_periodic_tick = { -+ .period_ns = NS_PER_TICK -+}; -+ - /* - * GCC 4.3 can turn loops over an induction variable into division. We do - * not support arbitrary 64-bit division, and so must break the induction. -@@ -533,6 +537,17 @@ void mark_tsc_unstable(char *reason) - } - EXPORT_SYMBOL_GPL(mark_tsc_unstable); - -+static void init_missing_ticks_accounting(unsigned int cpu) -+{ -+ struct vcpu_runstate_info *runstate = setup_runstate_area(cpu); -+ -+ per_cpu(processed_blocked_time, cpu) = -+ runstate->time[RUNSTATE_blocked]; -+ per_cpu(processed_stolen_time, cpu) = -+ runstate->time[RUNSTATE_runnable] + -+ runstate->time[RUNSTATE_offline]; -+} -+ - static cycle_t cs_last; - - static cycle_t xen_clocksource_read(struct clocksource *cs) -@@ -569,11 +584,32 @@ static cycle_t xen_clocksource_read(stru - #endif - } - -+/* No locking required. Interrupts are disabled on all CPUs. */ - static void xen_clocksource_resume(struct clocksource *cs) - { -- extern void time_resume(void); -+ unsigned int cpu; -+ -+ init_cpu_khz(); -+ -+ for_each_online_cpu(cpu) { -+ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu, -+ &xen_set_periodic_tick)) { -+ case 0: -+#if CONFIG_XEN_COMPAT <= 0x030004 -+ case -ENOSYS: -+#endif -+ break; -+ default: -+ BUG(); -+ } -+ get_time_values_from_xen(cpu); -+ per_cpu(processed_system_time, cpu) = -+ per_cpu(shadow_time, 0).system_timestamp; -+ init_missing_ticks_accounting(cpu); -+ } -+ -+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp; - -- time_resume(); - cs_last = local_clock(); - } - -@@ -605,17 +641,6 @@ struct vcpu_runstate_info *setup_runstat - return rs; - } - --static void init_missing_ticks_accounting(unsigned int cpu) --{ -- struct vcpu_runstate_info *runstate = setup_runstate_area(cpu); -- -- per_cpu(processed_blocked_time, cpu) = -- runstate->time[RUNSTATE_blocked]; -- per_cpu(processed_stolen_time, cpu) = -- runstate->time[RUNSTATE_runnable] + -- runstate->time[RUNSTATE_offline]; --} -- - void xen_read_persistent_clock(struct timespec *ts) - { - const shared_info_t *s = HYPERVISOR_shared_info; -@@ -661,10 +686,6 @@ static void __init setup_cpu0_timer_irq( - BUG_ON(per_cpu(timer_irq, 0) < 0); - } - --static struct vcpu_set_periodic_timer xen_set_periodic_tick = { -- .period_ns = NS_PER_TICK --}; -- - static void __init _late_time_init(void) - { - update_wallclock(); -@@ -807,35 +828,6 @@ void xen_halt(void) - VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL)); - } - --/* No locking required. Interrupts are disabled on all CPUs. */ --void time_resume(void) --{ -- unsigned int cpu; -- -- init_cpu_khz(); -- -- for_each_online_cpu(cpu) { -- switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu, -- &xen_set_periodic_tick)) { -- case 0: --#if CONFIG_XEN_COMPAT <= 0x030004 -- case -ENOSYS: --#endif -- break; -- default: -- BUG(); -- } -- get_time_values_from_xen(cpu); -- per_cpu(processed_system_time, cpu) = -- per_cpu(shadow_time, 0).system_timestamp; -- init_missing_ticks_accounting(cpu); -- } -- -- processed_system_time = per_cpu(shadow_time, 0).system_timestamp; -- -- update_wallclock(); --} -- - #ifdef CONFIG_SMP - static char timer_name[NR_CPUS][15]; - ---- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-02-10 16:24:57.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-02-10 16:18:00.000000000 +0100 -@@ -1097,6 +1097,8 @@ int xen_test_irq_pending(int irq) - } - - #ifdef CONFIG_PM_SLEEP -+#include -+ - static void restore_cpu_virqs(unsigned int cpu) - { - struct evtchn_bind_virq bind_virq; -@@ -1155,9 +1157,20 @@ static void restore_cpu_ipis(unsigned in - } - } - --void irq_resume(void) -+static int evtchn_resume(struct sys_device *dev) - { - unsigned int cpu, irq, evtchn; -+ struct evtchn_status status; -+ -+ /* Avoid doing anything in the 'suspend cancelled' case. */ -+ status.dom = DOMID_SELF; -+ status.port = evtchn_from_irq(percpu_read(virq_to_irq[VIRQ_TIMER])); -+ if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status)) -+ BUG(); -+ if (status.status == EVTCHNSTAT_virq -+ && status.vcpu == smp_processor_id() -+ && status.u.virq == VIRQ_TIMER) -+ return 0; - - init_evtchn_cpu_bindings(); - -@@ -1198,7 +1211,32 @@ void irq_resume(void) - restore_cpu_ipis(cpu); - } - -+ return 0; -+} -+ -+static struct sysdev_class evtchn_sysclass = { -+ .name = "evtchn", -+ .resume = evtchn_resume, -+}; -+ -+static struct sys_device device_evtchn = { -+ .id = 0, -+ .cls = &evtchn_sysclass, -+}; -+ -+static int __init evtchn_register(void) -+{ -+ int err; -+ -+ if (is_initial_xendomain()) -+ return 0; -+ -+ err = sysdev_class_register(&evtchn_sysclass); -+ if (!err) -+ err = sysdev_register(&device_evtchn); -+ return err; - } -+core_initcall(evtchn_register); - #endif - - int __init arch_early_irq_init(void) ---- head-2011-03-11.orig/drivers/xen/core/gnttab.c 2011-01-14 15:00:13.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/gnttab.c 2011-01-14 15:13:58.000000000 +0100 -@@ -707,23 +707,40 @@ EXPORT_SYMBOL(gnttab_post_map_adjust); - - #endif /* __HAVE_ARCH_PTE_SPECIAL */ - --int gnttab_resume(void) -+struct sys_device; -+static int gnttab_resume(struct sys_device *dev) - { - if (max_nr_grant_frames() < nr_grant_frames) - return -ENOSYS; - return gnttab_map(0, nr_grant_frames - 1); - } -+#define gnttab_resume() gnttab_resume(NULL) - - #ifdef CONFIG_PM_SLEEP --int gnttab_suspend(void) --{ -+#include -+ - #ifdef CONFIG_X86 -+static int gnttab_suspend(struct sys_device *dev, pm_message_t state) -+{ - apply_to_page_range(&init_mm, (unsigned long)shared, - PAGE_SIZE * nr_grant_frames, - unmap_pte_fn, NULL); --#endif - return 0; - } -+#else -+#define gnttab_suspend NULL -+#endif -+ -+static struct sysdev_class gnttab_sysclass = { -+ .name = "gnttab", -+ .resume = gnttab_resume, -+ .suspend = gnttab_suspend, -+}; -+ -+static struct sys_device device_gnttab = { -+ .id = 0, -+ .cls = &gnttab_sysclass, -+}; - #endif - - #else /* !CONFIG_XEN */ -@@ -808,6 +825,17 @@ gnttab_init(void) - if (!is_running_on_xen()) - return -ENODEV; - -+#if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) -+ if (!is_initial_xendomain()) { -+ int err = sysdev_class_register(&gnttab_sysclass); -+ -+ if (!err) -+ err = sysdev_register(&device_gnttab); -+ if (err) -+ return err; -+ } -+#endif -+ - nr_grant_frames = 1; - boot_max_nr_grant_frames = __max_nr_grant_frames(); - ---- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-02-01 15:03:10.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-01-13 16:21:42.000000000 +0100 -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include "../../base/base.h" - - #if defined(__i386__) || defined(__x86_64__) - #include -@@ -140,50 +141,28 @@ struct suspend { - static int take_machine_down(void *_suspend) - { - struct suspend *suspend = _suspend; -- int suspend_cancelled, err; -- extern void time_resume(void); -+ int suspend_cancelled; - -- if (suspend->fast_suspend) { -- BUG_ON(!irqs_disabled()); -- } else { -- BUG_ON(irqs_disabled()); -- -- for (;;) { -- err = smp_suspend(); -- if (err) -- return err; -- -- xenbus_suspend(); -- preempt_disable(); -- -- if (num_online_cpus() == 1) -- break; -- -- preempt_enable(); -- xenbus_suspend_cancel(); -- } -- -- local_irq_disable(); -- } -+ BUG_ON(!irqs_disabled()); - - mm_pin_all(); -- gnttab_suspend(); -- pre_suspend(); -- -- /* -- * This hypercall returns 1 if suspend was cancelled or the domain was -- * merely checkpointed, and 0 if it is resuming in a new domain. -- */ -- suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info)); -+ suspend_cancelled = sysdev_suspend(PMSG_SUSPEND); -+ if (!suspend_cancelled) { -+ pre_suspend(); - -+ /* -+ * This hypercall returns 1 if suspend was cancelled or the domain was -+ * merely checkpointed, and 0 if it is resuming in a new domain. -+ */ -+ suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info)); -+ } else -+ BUG_ON(suspend_cancelled > 0); - suspend->resume_notifier(suspend_cancelled); -- post_suspend(suspend_cancelled); -- gnttab_resume(); -+ if (suspend_cancelled >= 0) { -+ post_suspend(suspend_cancelled); -+ sysdev_resume(); -+ } - if (!suspend_cancelled) { -- extern void spinlock_resume(void); -- -- spinlock_resume(); -- irq_resume(); - #ifdef __x86_64__ - /* - * Older versions of Xen do not save/restore the user %cr3. -@@ -195,10 +174,6 @@ static int take_machine_down(void *_susp - current->active_mm->pgd))); - #endif - } -- time_resume(); -- -- if (!suspend->fast_suspend) -- local_irq_enable(); - - return suspend_cancelled; - } -@@ -206,8 +181,14 @@ static int take_machine_down(void *_susp - int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) - { - int err, suspend_cancelled; -+ const char *what; - struct suspend suspend; - -+#define _check(fn, args...) ({ \ -+ what = #fn; \ -+ err = (fn)(args); \ -+}) -+ - BUG_ON(smp_processor_id() != 0); - BUG_ON(in_interrupt()); - -@@ -225,30 +206,77 @@ int __xen_suspend(int fast_suspend, void - suspend.fast_suspend = fast_suspend; - suspend.resume_notifier = resume_notifier; - -+ if (_check(dpm_suspend_start, PMSG_SUSPEND)) { -+ dpm_resume_end(PMSG_RESUME); -+ pr_err("%s() failed: %d\n", what, err); -+ return err; -+ } -+ - if (fast_suspend) { - xenbus_suspend(); -+ -+ if (_check(dpm_suspend_noirq, PMSG_SUSPEND)) { -+ xenbus_suspend_cancel(); -+ dpm_resume_end(PMSG_RESUME); -+ pr_err("%s() failed: %d\n", what, err); -+ return err; -+ } -+ - err = stop_machine(take_machine_down, &suspend, - &cpumask_of_cpu(0)); - if (err < 0) - xenbus_suspend_cancel(); - } else { -+ BUG_ON(irqs_disabled()); -+ -+ for (;;) { -+ xenbus_suspend(); -+ -+ if (!_check(dpm_suspend_noirq, PMSG_SUSPEND) -+ && _check(smp_suspend)) -+ dpm_resume_noirq(PMSG_RESUME); -+ if (err) { -+ xenbus_suspend_cancel(); -+ dpm_resume_end(PMSG_RESUME); -+ pr_err("%s() failed: %d\n", what, err); -+ return err; -+ } -+ -+ preempt_disable(); -+ -+ if (num_online_cpus() == 1) -+ break; -+ -+ preempt_enable(); -+ -+ dpm_resume_noirq(PMSG_RESUME); -+ -+ xenbus_suspend_cancel(); -+ } -+ -+ local_irq_disable(); - err = take_machine_down(&suspend); -+ local_irq_enable(); - } - -- if (err < 0) -- return err; -+ dpm_resume_noirq(PMSG_RESUME); - -- suspend_cancelled = err; -- if (!suspend_cancelled) { -- xencons_resume(); -- xenbus_resume(); -- } else { -- xenbus_suspend_cancel(); -+ if (err >= 0) { -+ suspend_cancelled = err; -+ if (!suspend_cancelled) { -+ xencons_resume(); -+ xenbus_resume(); -+ } else { -+ xenbus_suspend_cancel(); -+ err = 0; -+ } -+ -+ if (!fast_suspend) -+ smp_resume(); - } - -- if (!fast_suspend) -- smp_resume(); -+ dpm_resume_end(PMSG_RESUME); - -- return 0; -+ return err; - } - #endif ---- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:17:10.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:18:17.000000000 +0100 -@@ -60,7 +60,9 @@ void __cpuinit xen_spinlock_cleanup(unsi - } - - #ifdef CONFIG_PM_SLEEP --void __cpuinit spinlock_resume(void) -+#include -+ -+static int __cpuinit spinlock_resume(struct sys_device *dev) - { - unsigned int cpu; - -@@ -68,7 +70,33 @@ void __cpuinit spinlock_resume(void) - per_cpu(poll_evtchn, cpu) = 0; - xen_spinlock_init(cpu); - } -+ -+ return 0; -+} -+ -+static struct sysdev_class __cpuinitdata spinlock_sysclass = { -+ .name = "spinlock", -+ .resume = spinlock_resume -+}; -+ -+static struct sys_device __cpuinitdata device_spinlock = { -+ .id = 0, -+ .cls = &spinlock_sysclass -+}; -+ -+static int __init spinlock_register(void) -+{ -+ int rc; -+ -+ if (is_initial_xendomain()) -+ return 0; -+ -+ rc = sysdev_class_register(&spinlock_sysclass); -+ if (!rc) -+ rc = sysdev_register(&device_spinlock); -+ return rc; - } -+core_initcall(spinlock_register); - #endif - - static unsigned int spin_adjust(struct spinning *spinning, ---- head-2011-03-11.orig/include/xen/evtchn.h 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-03-11/include/xen/evtchn.h 2010-11-23 15:07:01.000000000 +0100 -@@ -109,7 +109,9 @@ int bind_ipi_to_irqhandler( - */ - void unbind_from_irqhandler(unsigned int irq, void *dev_id); - -+#ifndef CONFIG_XEN - void irq_resume(void); -+#endif - - /* Entry point for notifications into Linux subsystems. */ - asmlinkage void evtchn_do_upcall(struct pt_regs *regs); ---- head-2011-03-11.orig/include/xen/gnttab.h 2011-01-31 17:56:27.000000000 +0100 -+++ head-2011-03-11/include/xen/gnttab.h 2010-11-23 15:07:01.000000000 +0100 -@@ -111,8 +111,9 @@ static inline void __gnttab_dma_unmap_pa - - void gnttab_reset_grant_page(struct page *page); - --int gnttab_suspend(void); -+#ifndef CONFIG_XEN - int gnttab_resume(void); -+#endif - - void *arch_gnttab_alloc_shared(unsigned long *frames); - diff --git a/patches.xen/xen-tmem-v1 b/patches.xen/xen-tmem-v1 deleted file mode 100644 index 48fad80..0000000 --- a/patches.xen/xen-tmem-v1 +++ /dev/null @@ -1,348 +0,0 @@ -From: jbeulich@novell.com -Subject: update tmem interface to v1 -Patch-mainline: n/a - ---- head-2010-10-05.orig/include/xen/interface/tmem.h 2010-01-04 11:56:34.000000000 +0100 -+++ head-2010-10-05/include/xen/interface/tmem.h 2010-10-06 12:12:59.000000000 +0200 -@@ -29,6 +29,9 @@ - - #include "xen.h" - -+/* version of ABI */ -+#define TMEM_SPEC_VERSION 1 -+ - /* Commands to HYPERVISOR_tmem_op() */ - #define TMEM_CONTROL 0 - #define TMEM_NEW_POOL 1 -@@ -75,10 +78,12 @@ - /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ - #define TMEM_POOL_PERSIST 1 - #define TMEM_POOL_SHARED 2 -+#define TMEM_POOL_PRECOMPRESSED 4 - #define TMEM_POOL_PAGESIZE_SHIFT 4 - #define TMEM_POOL_PAGESIZE_MASK 0xf - #define TMEM_POOL_VERSION_SHIFT 24 - #define TMEM_POOL_VERSION_MASK 0xff -+#define TMEM_POOL_RESERVED_BITS 0x00ffff00 - - /* Bits for client flags (save/restore) */ - #define TMEM_CLIENT_COMPRESS 1 -@@ -106,12 +111,12 @@ struct tmem_op { - uint32_t cli_id; - uint32_t arg1; - uint32_t arg2; -- uint64_t arg3; -+ uint64_t oid[3]; - tmem_cli_va_t buf; - } ctrl; /* for cmd == TMEM_CONTROL */ - struct { - -- uint64_t object; -+ uint64_t oid[3]; - uint32_t index; - uint32_t tmem_offset; - uint32_t pfn_offset; -@@ -126,9 +131,8 @@ DEFINE_XEN_GUEST_HANDLE(tmem_op_t); - struct tmem_handle { - uint32_t pool_id; - uint32_t index; -- uint64_t oid; -+ uint64_t oid[3]; - }; -- - #endif - - #endif /* __XEN_PUBLIC_TMEM_H__ */ ---- head-2010-10-05.orig/mm/precache.c 2010-10-08 16:25:45.000000000 +0200 -+++ head-2010-10-05/mm/precache.c 2010-10-11 10:58:42.000000000 +0200 -@@ -31,16 +31,57 @@ - */ - - #include -+#include - #include - #include "tmem.h" - - static int precache_auto_allocate; /* set to 1 to auto_allocate */ - -+union precache_filekey { -+ struct tmem_oid oid; -+ u32 fh[0]; -+}; -+ -+/* -+ * If the filesystem uses exportable filehandles, use the filehandle as -+ * the key, else use the inode number. -+ */ -+static int precache_get_key(struct inode *inode, union precache_filekey *key) -+{ -+#define PRECACHE_KEY_MAX (sizeof(key->oid) / sizeof(*key->fh)) -+ struct super_block *sb = inode->i_sb; -+ -+ memset(key, 0, sizeof(key)); -+ if (sb->s_export_op) { -+ int (*fhfn)(struct dentry *, __u32 *fh, int *, int); -+ -+ fhfn = sb->s_export_op->encode_fh; -+ if (fhfn) { -+ struct dentry *d; -+ int ret, maxlen = PRECACHE_KEY_MAX; -+ -+ d = list_first_entry(&inode->i_dentry, -+ struct dentry, d_alias); -+ ret = fhfn(d, key->fh, &maxlen, 0); -+ if (ret < 0) -+ return ret; -+ if (ret >= 255 || maxlen > PRECACHE_KEY_MAX) -+ return -EPERM; -+ if (maxlen > 0) -+ return 0; -+ } -+ } -+ key->oid.oid[0] = inode->i_ino; -+ key->oid.oid[1] = inode->i_generation; -+ return 0; -+#undef PRECACHE_KEY_MAX -+} -+ - int precache_put(struct address_space *mapping, unsigned long index, - struct page *page) - { - u32 tmem_pool = mapping->host->i_sb->precache_poolid; -- u64 obj = (unsigned long) mapping->host->i_ino; -+ union precache_filekey key; - u32 ind = (u32) index; - unsigned long mfn = pfn_to_mfn(page_to_pfn(page)); - int ret; -@@ -56,53 +97,53 @@ int precache_put(struct address_space *m - mapping->host->i_sb->s_id, tmem_pool); - mapping->host->i_sb->precache_poolid = tmem_pool; - } -- if (ind != index) -+ if (ind != index || precache_get_key(mapping->host, &key)) - return 0; - mb(); /* ensure page is quiescent; tmem may address it with an alias */ -- return tmem_put_page(tmem_pool, obj, ind, mfn); -+ return tmem_put_page(tmem_pool, key.oid, ind, mfn); - } - - int precache_get(struct address_space *mapping, unsigned long index, - struct page *empty_page) - { - u32 tmem_pool = mapping->host->i_sb->precache_poolid; -- u64 obj = (unsigned long) mapping->host->i_ino; -+ union precache_filekey key; - u32 ind = (u32) index; - unsigned long mfn = pfn_to_mfn(page_to_pfn(empty_page)); - - if ((s32)tmem_pool < 0) - return 0; -- if (ind != index) -+ if (ind != index || precache_get_key(mapping->host, &key)) - return 0; - -- return tmem_get_page(tmem_pool, obj, ind, mfn); -+ return tmem_get_page(tmem_pool, key.oid, ind, mfn); - } - EXPORT_SYMBOL(precache_get); - - int precache_flush(struct address_space *mapping, unsigned long index) - { - u32 tmem_pool = mapping->host->i_sb->precache_poolid; -- u64 obj = (unsigned long) mapping->host->i_ino; -+ union precache_filekey key; - u32 ind = (u32) index; - - if ((s32)tmem_pool < 0) - return 0; -- if (ind != index) -+ if (ind != index || precache_get_key(mapping->host, &key)) - return 0; - -- return tmem_flush_page(tmem_pool, obj, ind); -+ return tmem_flush_page(tmem_pool, key.oid, ind); - } - EXPORT_SYMBOL(precache_flush); - - int precache_flush_inode(struct address_space *mapping) - { - u32 tmem_pool = mapping->host->i_sb->precache_poolid; -- u64 obj = (unsigned long) mapping->host->i_ino; -+ union precache_filekey key; - -- if ((s32)tmem_pool < 0) -+ if ((s32)tmem_pool < 0 || precache_get_key(mapping->host, &key)) - return 0; - -- return tmem_flush_object(tmem_pool, obj); -+ return tmem_flush_object(tmem_pool, key.oid); - } - EXPORT_SYMBOL(precache_flush_inode); - ---- head-2010-10-05.orig/mm/preswap.c 2010-08-24 11:19:44.000000000 +0200 -+++ head-2010-10-05/mm/preswap.c 2010-10-06 13:08:11.000000000 +0200 -@@ -46,7 +46,8 @@ const unsigned long preswap_zero = 0, pr - */ - #define SWIZ_BITS 4 - #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) --#define oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) -+#define oswiz(_type, _ind) ((struct tmem_oid){ \ -+ .oid[0] = (_type << SWIZ_BITS) | (_ind & SWIZ_MASK) }) - #define iswiz(_ind) (_ind >> SWIZ_BITS) - - /* ---- head-2010-10-05.orig/mm/tmem.h 2010-08-24 11:19:44.000000000 +0200 -+++ head-2010-10-05/mm/tmem.h 2010-10-06 14:27:10.000000000 +0200 -@@ -14,71 +14,58 @@ - #define TMEM_POOL_MIN_PAGESHIFT 12 - #define TMEM_POOL_PAGEORDER (PAGE_SHIFT - TMEM_POOL_MIN_PAGESHIFT) - --extern int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, u64 object, u32 index, -+struct tmem_pool_uuid { -+ u64 lo; -+ u64 hi; -+}; -+ -+struct tmem_oid { -+ u64 oid[3]; -+}; -+ -+extern int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid, u32 index, - unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len); --extern int xen_tmem_new_pool(u32 tmem_cmd, u64 uuid_lo, u64 uuid_hi, u32 flags); -+extern int xen_tmem_new_pool(struct tmem_pool_uuid, u32 flags); - --static inline int tmem_put_page(u32 pool_id, u64 object, u32 index, -+static inline int tmem_put_page(u32 pool_id, struct tmem_oid oid, u32 index, - unsigned long gmfn) - { -- return xen_tmem_op(TMEM_PUT_PAGE, pool_id, object, index, -+ return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, - gmfn, 0, 0, 0); - } - --static inline int tmem_get_page(u32 pool_id, u64 object, u32 index, -+static inline int tmem_get_page(u32 pool_id, struct tmem_oid oid, u32 index, - unsigned long gmfn) - { -- return xen_tmem_op(TMEM_GET_PAGE, pool_id, object, index, -+ return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, - gmfn, 0, 0, 0); - } - --static inline int tmem_flush_page(u32 pool_id, u64 object, u32 index) -+static inline int tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) - { -- return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, object, index, -+ return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index, - 0, 0, 0, 0); - } - --static inline int tmem_flush_object(u32 pool_id, u64 object) -+static inline int tmem_flush_object(u32 pool_id, struct tmem_oid oid) - { -- return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, object, 0, 0, 0, 0, 0); -+ return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); - } - - static inline int tmem_new_pool(u64 uuid_lo, u64 uuid_hi, u32 flags) - { -+ struct tmem_pool_uuid uuid = { .lo = uuid_lo, .hi = uuid_hi }; -+ - BUILD_BUG_ON((TMEM_POOL_PAGEORDER < 0) || - (TMEM_POOL_PAGEORDER >= TMEM_POOL_PAGESIZE_MASK)); - flags |= TMEM_POOL_PAGEORDER << TMEM_POOL_PAGESIZE_SHIFT; -- return xen_tmem_new_pool(TMEM_NEW_POOL, uuid_lo, uuid_hi, flags); -+ return xen_tmem_new_pool(uuid, flags); - } - - static inline int tmem_destroy_pool(u32 pool_id) - { -- return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, 0, 0, 0, 0, 0, 0); -+ static const struct tmem_oid oid = {}; -+ -+ return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0); - } --#else --struct tmem_op { -- u32 cmd; -- s32 pool_id; /* private > 0; shared < 0; 0 is invalid */ -- union { -- struct { /* for cmd == TMEM_NEW_POOL */ -- u64 uuid[2]; -- u32 flags; -- } new; -- struct { /* for cmd == TMEM_CONTROL */ -- u32 subop; -- u32 cli_id; -- u32 arg1; -- u32 arg2; -- void *buf; -- } ctrl; -- struct { -- u64 object; -- u32 index; -- u32 tmem_offset; -- u32 pfn_offset; -- u32 len; -- unsigned long pfn; /* page frame */ -- } gen; -- } u; --}; - #endif ---- head-2010-10-05.orig/mm/tmem-xen.c 2009-06-23 09:28:21.000000000 +0200 -+++ head-2010-10-05/mm/tmem-xen.c 2010-10-06 14:27:25.000000000 +0200 -@@ -7,8 +7,9 @@ - #include - #include - #include -+#include "tmem.h" - --int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, u64 object, u32 index, -+int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid, u32 index, - unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len) - { - struct tmem_op op; -@@ -16,7 +17,8 @@ int xen_tmem_op(u32 tmem_cmd, u32 tmem_p - - op.cmd = tmem_cmd; - op.pool_id = tmem_pool; -- op.u.gen.object = object; -+ BUILD_BUG_ON(sizeof(op.u.gen.oid) != sizeof(oid.oid)); -+ memcpy(op.u.gen.oid, oid.oid, sizeof(op.u.gen.oid)); - op.u.gen.index = index; - op.u.gen.tmem_offset = tmem_offset; - op.u.gen.pfn_offset = pfn_offset; -@@ -26,15 +28,27 @@ int xen_tmem_op(u32 tmem_cmd, u32 tmem_p - return rc; - } - --int xen_tmem_new_pool(uint32_t tmem_cmd, uint64_t uuid_lo, -- uint64_t uuid_hi, uint32_t flags) -+int xen_tmem_new_pool(struct tmem_pool_uuid uuid, uint32_t flags) - { - struct tmem_op op; - int rc = 0; - -- op.cmd = tmem_cmd; -- op.u.new.uuid[0] = uuid_lo; -- op.u.new.uuid[1] = uuid_hi; -+ op.cmd = TMEM_NEW_POOL; -+ op.u.new.uuid[0] = uuid.lo; -+ op.u.new.uuid[1] = uuid.hi; -+#ifdef TMEM_SPEC_VERSION -+ switch (flags >> TMEM_POOL_VERSION_SHIFT) { -+ case 0: -+ flags |= TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT; -+ break; -+ case TMEM_SPEC_VERSION: -+ break; -+ default: -+ WARN(1, "TMEM: Bogus version %u, expecting %u\n", -+ flags >> TMEM_POOL_VERSION_SHIFT, TMEM_SPEC_VERSION); -+ return -ENOSYS; -+ } -+#endif - op.u.new.flags = flags; - rc = HYPERVISOR_tmem_op(&op); - return rc; diff --git a/patches.xen/xen-unpriv-build b/patches.xen/xen-unpriv-build deleted file mode 100644 index 72e133e..0000000 --- a/patches.xen/xen-unpriv-build +++ /dev/null @@ -1,372 +0,0 @@ -From: jbeulich@novell.com -Subject: no need to build certain bits when building non-privileged kernel -Patch-mainline: n/a - ---- head-2011-02-08.orig/arch/x86/Kconfig 2011-02-02 15:09:52.000000000 +0100 -+++ head-2011-02-08/arch/x86/Kconfig 2011-02-02 15:10:34.000000000 +0100 -@@ -665,6 +665,7 @@ config APB_TIMER - config DMI - default y - bool "Enable DMI scanning" if EXPERT -+ depends on !XEN_UNPRIVILEGED_GUEST - ---help--- - Enabled scanning of DMI to identify machine quirks. Say Y - here unless you have verified that your setup is not -@@ -745,6 +746,7 @@ config AMD_IOMMU_STATS - # need this always selected by IOMMU for the VIA workaround - config SWIOTLB - def_bool y if X86_64 || XEN -+ prompt "Software I/O TLB" if XEN_UNPRIVILEGED_GUEST && !XEN_PCIDEV_FRONTEND - ---help--- - Support for software bounce buffers used on x86-64 systems - which don't have a hardware IOMMU (e.g. the current generation -@@ -1968,13 +1970,15 @@ config PCI_GOBIOS - - config PCI_GOMMCONFIG - bool "MMConfig" -+ depends on !XEN_UNPRIVILEGED_GUEST - - config PCI_GODIRECT - bool "Direct" -+ depends on !XEN_UNPRIVILEGED_GUEST - - config PCI_GOOLPC - bool "OLPC XO-1" -- depends on OLPC -+ depends on OLPC && !XEN_UNPRIVILEGED_GUEST - - config PCI_GOXEN_FE - bool "Xen PCI Frontend" -@@ -1985,6 +1989,7 @@ config PCI_GOXEN_FE - - config PCI_GOANY - bool "Any" -+ depends on !XEN_UNPRIVILEGED_GUEST - - endchoice - -@@ -2185,7 +2190,7 @@ endif # X86_32 - - config AMD_NB - def_bool y -- depends on CPU_SUP_AMD && PCI -+ depends on CPU_SUP_AMD && PCI && !XEN_UNPRIVILEGED_GUEST - - source "drivers/pcmcia/Kconfig" - -@@ -2240,7 +2245,9 @@ source "net/Kconfig" - - source "drivers/Kconfig" - -+if !XEN_UNPRIVILEGED_GUEST - source "drivers/firmware/Kconfig" -+endif - - source "fs/Kconfig" - ---- head-2011-02-08.orig/arch/x86/include/mach-xen/asm/swiotlb.h 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-02-08/arch/x86/include/mach-xen/asm/swiotlb.h 2011-02-02 15:10:34.000000000 +0100 -@@ -1,4 +1,8 @@ - #include_next - -+#ifndef CONFIG_SWIOTLB -+#define swiotlb_init(verbose) ((void)(verbose)) -+#endif -+ - dma_addr_t swiotlb_map_single_phys(struct device *, phys_addr_t, size_t size, - int dir); ---- head-2011-02-08.orig/drivers/firmware/Kconfig 2010-11-23 16:20:20.000000000 +0100 -+++ head-2011-02-08/drivers/firmware/Kconfig 2011-02-02 15:10:34.000000000 +0100 -@@ -116,7 +116,7 @@ config DMIID - - config ISCSI_IBFT_FIND - bool "iSCSI Boot Firmware Table Attributes" -- depends on X86 && !XEN_UNPRIVILEGED_GUEST -+ depends on X86 - default n - help - This option enables the kernel to find the region of memory ---- head-2011-02-08.orig/drivers/pci/Kconfig 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-02-08/drivers/pci/Kconfig 2011-02-02 15:10:34.000000000 +0100 -@@ -74,7 +74,7 @@ config PARAVIRT_XEN_PCIDEV_FRONTEND - - config XEN_PCIDEV_FRONTEND - def_bool y -- prompt "Xen PCI Frontend" if X86_64 -+ prompt "Xen PCI Frontend" if X86_64 && !XEN_UNPRIVILEGED_GUEST - depends on PCI && XEN && (PCI_GOXEN_FE || PCI_GOANY || X86_64) - select HOTPLUG - help ---- head-2011-02-08.orig/drivers/xen/Kconfig 2010-11-26 13:38:08.000000000 +0100 -+++ head-2011-02-08/drivers/xen/Kconfig 2011-02-09 16:23:14.000000000 +0100 -@@ -19,7 +19,8 @@ config XEN_PRIVILEGED_GUEST - Support for privileged operation (domain 0) - - config XEN_UNPRIVILEGED_GUEST -- def_bool !XEN_PRIVILEGED_GUEST -+ def_bool y -+ depends on !XEN_PRIVILEGED_GUEST - select PM - select SUSPEND - -@@ -271,6 +272,7 @@ config XEN_USB_FRONTEND_HCD_PM - - config XEN_GRANT_DEV - tristate "User-space granted page access driver" -+ depends on XEN_BACKEND != n - default XEN_PRIVILEGED_GUEST - help - Device for accessing (in user-space) pages that have been granted ---- head-2011-02-08.orig/drivers/xen/balloon/balloon.c 2010-11-25 13:47:01.000000000 +0100 -+++ head-2011-02-08/drivers/xen/balloon/balloon.c 2011-02-02 15:10:34.000000000 +0100 -@@ -660,6 +660,9 @@ void balloon_update_driver_allowance(lon - bs.driver_pages += delta; - balloon_unlock(flags); - } -+EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); -+ -+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) - - #ifdef CONFIG_XEN - static int dealloc_pte_fn( -@@ -768,6 +771,7 @@ struct page **alloc_empty_pages_and_page - pagevec = NULL; - goto out; - } -+EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); - - void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) - { -@@ -788,6 +792,9 @@ void free_empty_pages_and_pagevec(struct - - schedule_work(&balloon_worker); - } -+EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); -+ -+#endif /* CONFIG_XEN_BACKEND */ - - void balloon_release_driver_page(struct page *page) - { -@@ -801,10 +808,6 @@ void balloon_release_driver_page(struct - - schedule_work(&balloon_worker); - } -- --EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); --EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); --EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); - EXPORT_SYMBOL_GPL(balloon_release_driver_page); - - MODULE_LICENSE("Dual BSD/GPL"); ---- head-2011-02-08.orig/drivers/xen/console/console.c 2011-02-01 15:04:27.000000000 +0100 -+++ head-2011-02-08/drivers/xen/console/console.c 2011-02-02 15:10:34.000000000 +0100 -@@ -47,7 +47,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -244,6 +243,7 @@ static int __init xen_console_init(void) - } - console_initcall(xen_console_init); - -+#ifdef CONFIG_XEN_PRIVILEGED_GUEST - /*** Useful function for console debugging -- goes straight to Xen. ***/ - asmlinkage int xprintk(const char *fmt, ...) - { -@@ -261,6 +261,7 @@ asmlinkage int xprintk(const char *fmt, - - return 0; - } -+#endif - - /*** Forcibly flush console data before dying. ***/ - void xencons_force_flush(void) -@@ -285,6 +286,9 @@ void xencons_force_flush(void) - } - - -+#ifdef CONFIG_XEN_PRIVILEGED_GUEST -+#include -+ - void __init dom0_init_screen_info(const struct dom0_vga_console_info *info, size_t size) - { - /* This is drawn from a dump from vgacon:startup in -@@ -340,6 +344,7 @@ void __init dom0_init_screen_info(const - break; - } - } -+#endif - - - /******************** User-space console driver (/dev/console) ************/ ---- head-2011-02-08.orig/drivers/xen/core/Makefile 2011-02-02 15:09:57.000000000 +0100 -+++ head-2011-02-08/drivers/xen/core/Makefile 2011-02-02 15:10:34.000000000 +0100 -@@ -2,10 +2,11 @@ - # Makefile for the linux kernel. - # - --obj-y := evtchn.o gnttab.o reboot.o machine_reboot.o firmware.o -+obj-y := evtchn.o gnttab.o reboot.o machine_reboot.o - --obj-$(CONFIG_PCI) += pci.o --obj-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o -+priv-$(CONFIG_PCI) += pci.o -+priv-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o -+obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += firmware.o $(priv-y) - obj-$(CONFIG_PROC_FS) += xen_proc.o - obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o - obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o ---- head-2011-02-08.orig/drivers/xen/core/evtchn.c 2011-02-16 08:29:50.000000000 +0100 -+++ head-2011-02-08/drivers/xen/core/evtchn.c 2011-02-16 08:30:09.000000000 +0100 -@@ -1854,6 +1854,7 @@ void evtchn_register_pirq(int irq) - "fasteoi"); - } - -+#ifdef CONFIG_PCI_MSI - int evtchn_map_pirq(int irq, int xen_pirq) - { - if (irq < 0) { -@@ -1928,6 +1929,7 @@ int evtchn_map_pirq(int irq, int xen_pir - } - return index_from_irq(irq) ? irq : -EINVAL; - } -+#endif - - int evtchn_get_xen_pirq(int irq) - { ---- head-2011-02-08.orig/drivers/xen/core/gnttab.c 2011-02-02 15:10:16.000000000 +0100 -+++ head-2011-02-08/drivers/xen/core/gnttab.c 2011-02-02 15:10:34.000000000 +0100 -@@ -436,8 +436,6 @@ static inline unsigned int max_nr_grant_ - - #ifdef CONFIG_XEN - --static DEFINE_SEQLOCK(gnttab_dma_lock); -- - #ifdef CONFIG_X86 - static int map_pte_fn(pte_t *pte, struct page *pmd_page, - unsigned long addr, void *data) -@@ -507,6 +505,10 @@ static int gnttab_map(unsigned int start - return 0; - } - -+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) -+ -+static DEFINE_SEQLOCK(gnttab_dma_lock); -+ - static void gnttab_page_free(struct page *page, unsigned int order) - { - BUG_ON(order); -@@ -638,6 +640,8 @@ void __gnttab_dma_map_page(struct page * - } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); - } - -+#endif /* CONFIG_XEN_BACKEND */ -+ - #ifdef __HAVE_ARCH_PTE_SPECIAL - - static unsigned int GNTMAP_pte_special; ---- head-2011-02-08.orig/drivers/xen/privcmd/Makefile 2007-07-10 09:42:30.000000000 +0200 -+++ head-2011-02-08/drivers/xen/privcmd/Makefile 2011-02-02 15:10:34.000000000 +0100 -@@ -1,3 +1,3 @@ -- --obj-y += privcmd.o --obj-$(CONFIG_COMPAT) += compat_privcmd.o -+priv-$(CONFIG_COMPAT) := compat_privcmd.o -+obj-y := privcmd.o -+obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += $(priv-y) ---- head-2011-02-08.orig/drivers/xen/privcmd/privcmd.c 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-02-08/drivers/xen/privcmd/privcmd.c 2011-02-02 15:10:34.000000000 +0100 -@@ -32,6 +32,9 @@ - static struct proc_dir_entry *privcmd_intf; - static struct proc_dir_entry *capabilities_intf; - -+#ifndef CONFIG_XEN_PRIVILEGED_GUEST -+#define HAVE_ARCH_PRIVCMD_MMAP -+#endif - #ifndef HAVE_ARCH_PRIVCMD_MMAP - static int enforce_singleshot_mapping_fn(pte_t *pte, struct page *pmd_page, - unsigned long addr, void *data) -@@ -56,12 +59,14 @@ static long privcmd_ioctl(struct file *f - { - long ret; - void __user *udata = (void __user *) data; -+#ifdef CONFIG_XEN_PRIVILEGED_GUEST - unsigned long i, addr, nr, nr_pages; - int paged_out; - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - LIST_HEAD(pagelist); - struct list_head *l, *l2; -+#endif - - switch (cmd) { - case IOCTL_PRIVCMD_HYPERCALL: { -@@ -86,6 +91,8 @@ static long privcmd_ioctl(struct file *f - } - break; - -+#ifdef CONFIG_XEN_PRIVILEGED_GUEST -+ - case IOCTL_PRIVCMD_MMAP: { - #define MMAP_NR_PER_PAGE \ - (unsigned long)((PAGE_SIZE - sizeof(*l)) / sizeof(*msg)) -@@ -391,6 +398,8 @@ static long privcmd_ioctl(struct file *f - } - break; - -+#endif /* CONFIG_XEN_PRIVILEGED_GUEST */ -+ - default: - ret = -EINVAL; - break; -@@ -429,7 +438,9 @@ static const struct file_operations priv - .open = nonseekable_open, - .llseek = no_llseek, - .unlocked_ioctl = privcmd_ioctl, -+#ifdef CONFIG_XEN_PRIVILEGED_GUEST - .mmap = privcmd_mmap, -+#endif - }; - - static int capabilities_read(char *page, char **start, off_t off, ---- head-2011-02-08.orig/fs/compat_ioctl.c 2011-02-01 14:55:46.000000000 +0100 -+++ head-2011-02-08/fs/compat_ioctl.c 2011-02-02 15:10:34.000000000 +0100 -@@ -1481,7 +1481,7 @@ static long do_ioctl_trans(int fd, unsig - return do_video_stillpicture(fd, cmd, argp); - case VIDEO_SET_SPU_PALETTE: - return do_video_set_spu_palette(fd, cmd, argp); --#ifdef CONFIG_XEN -+#ifdef CONFIG_XEN_PRIVILEGED_GUEST - case IOCTL_PRIVCMD_MMAP_32: - case IOCTL_PRIVCMD_MMAPBATCH_32: - case IOCTL_PRIVCMD_MMAPBATCH_V2_32: ---- head-2011-02-08.orig/include/xen/firmware.h 2007-07-02 08:16:19.000000000 +0200 -+++ head-2011-02-08/include/xen/firmware.h 2011-02-02 15:10:34.000000000 +0100 -@@ -5,6 +5,10 @@ - void copy_edd(void); - #endif - -+#ifdef CONFIG_XEN_PRIVILEGED_GUEST - void copy_edid(void); -+#else -+static inline void copy_edid(void) {} -+#endif - - #endif /* __XEN_FIRMWARE_H__ */ ---- head-2011-02-08.orig/include/xen/gnttab.h 2010-11-23 15:07:01.000000000 +0100 -+++ head-2011-02-08/include/xen/gnttab.h 2011-02-02 15:10:34.000000000 +0100 -@@ -104,7 +104,11 @@ void gnttab_grant_foreign_transfer_ref(g - unsigned long pfn); - - int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); -+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) - void __gnttab_dma_map_page(struct page *page); -+#else -+#define __gnttab_dma_map_page __gnttab_dma_unmap_page -+#endif - static inline void __gnttab_dma_unmap_page(struct page *page) - { - } diff --git a/patches.xen/xen-virq-per-cpu-irq b/patches.xen/xen-virq-per-cpu-irq deleted file mode 100644 index 0175588..0000000 --- a/patches.xen/xen-virq-per-cpu-irq +++ /dev/null @@ -1,652 +0,0 @@ -From: jbeulich@novell.com -Subject: fold per-CPU VIRQs onto a single IRQ each -Patch-mainline: n/a - ---- head-2011-02-17.orig/arch/x86/kernel/time-xen.c 2010-11-23 15:07:01.000000000 +0100 -+++ head-2011-02-17/arch/x86/kernel/time-xen.c 2010-10-05 16:57:34.000000000 +0200 -@@ -671,19 +671,17 @@ int xen_update_persistent_clock(void) - } - - /* Dynamically-mapped IRQ. */ --DEFINE_PER_CPU(int, timer_irq); -+static int __read_mostly timer_irq = -1; -+static struct irqaction timer_action = { -+ .handler = timer_interrupt, -+ .flags = IRQF_DISABLED|IRQF_TIMER, -+ .name = "timer" -+}; - - static void __init setup_cpu0_timer_irq(void) - { -- per_cpu(timer_irq, 0) = -- bind_virq_to_irqhandler( -- VIRQ_TIMER, -- 0, -- timer_interrupt, -- IRQF_DISABLED|IRQF_TIMER|IRQF_NOBALANCING, -- "timer0", -- NULL); -- BUG_ON(per_cpu(timer_irq, 0) < 0); -+ timer_irq = bind_virq_to_irqaction(VIRQ_TIMER, 0, &timer_action); -+ BUG_ON(timer_irq < 0); - } - - static void __init _late_time_init(void) -@@ -829,8 +827,6 @@ void xen_halt(void) - } - - #ifdef CONFIG_SMP --static char timer_name[NR_CPUS][15]; -- - int __cpuinit local_setup_timer(unsigned int cpu) - { - int seq, irq; -@@ -856,16 +852,10 @@ int __cpuinit local_setup_timer(unsigned - init_missing_ticks_accounting(cpu); - } while (read_seqretry(&xtime_lock, seq)); - -- sprintf(timer_name[cpu], "timer%u", cpu); -- irq = bind_virq_to_irqhandler(VIRQ_TIMER, -- cpu, -- timer_interrupt, -- IRQF_DISABLED|IRQF_TIMER|IRQF_NOBALANCING, -- timer_name[cpu], -- NULL); -+ irq = bind_virq_to_irqaction(VIRQ_TIMER, cpu, &timer_action); - if (irq < 0) - return irq; -- per_cpu(timer_irq, cpu) = irq; -+ BUG_ON(timer_irq != irq); - - return 0; - } -@@ -873,7 +863,7 @@ int __cpuinit local_setup_timer(unsigned - void __cpuinit local_teardown_timer(unsigned int cpu) - { - BUG_ON(cpu == 0); -- unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); -+ unbind_from_per_cpu_irq(timer_irq, cpu, &timer_action); - } - #endif - ---- head-2011-02-17.orig/drivers/xen/core/evtchn.c 2011-02-15 17:52:39.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/evtchn.c 2011-02-16 08:29:06.000000000 +0100 -@@ -59,6 +59,23 @@ static DEFINE_SPINLOCK(irq_mapping_updat - static int evtchn_to_irq[NR_EVENT_CHANNELS] = { - [0 ... NR_EVENT_CHANNELS-1] = -1 }; - -+#if defined(CONFIG_SMP) && defined(CONFIG_X86) -+static struct percpu_irqaction { -+ struct irqaction action; /* must be first */ -+ struct percpu_irqaction *next; -+ cpumask_var_t cpus; -+} *virq_actions[NR_VIRQS]; -+/* IRQ <-> VIRQ mapping. */ -+static DECLARE_BITMAP(virq_per_cpu, NR_VIRQS) __read_mostly; -+static DEFINE_PER_CPU_READ_MOSTLY(int[NR_VIRQS], virq_to_evtchn); -+#define BUG_IF_VIRQ_PER_CPU(irq_cfg) \ -+ BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_VIRQ \ -+ && test_bit(index_from_irq_cfg(irq_cfg), virq_per_cpu)) -+#else -+#define BUG_IF_VIRQ_PER_CPU(irq_cfg) ((void)0) -+#define PER_CPU_VIRQ_IRQ -+#endif -+ - /* IRQ <-> IPI mapping. */ - #if defined(CONFIG_SMP) && defined(CONFIG_X86) - static int __read_mostly ipi_irq = -1; -@@ -160,21 +177,34 @@ static inline unsigned int type_from_irq - return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND; - } - --#ifndef PER_CPU_IPI_IRQ - static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg, - unsigned int cpu) - { -- BUG_ON(type_from_irq_cfg(cfg) != IRQT_IPI); -- return per_cpu(ipi_evtchn, cpu); --} -+ switch (type_from_irq_cfg(cfg)) { -+#ifndef PER_CPU_VIRQ_IRQ -+ case IRQT_VIRQ: -+ return per_cpu(virq_to_evtchn, cpu)[index_from_irq_cfg(cfg)]; -+#endif -+#ifndef PER_CPU_IPI_IRQ -+ case IRQT_IPI: -+ return per_cpu(ipi_evtchn, cpu); - #endif -+ } -+ BUG(); -+ return 0; -+} - - static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg) - { -+ switch (type_from_irq_cfg(cfg)) { -+#ifndef PER_CPU_VIRQ_IRQ -+ case IRQT_VIRQ: -+#endif - #ifndef PER_CPU_IPI_IRQ -- if (type_from_irq_cfg(cfg) == IRQT_IPI) -- return evtchn_from_per_cpu_irq(cfg, smp_processor_id()); -+ case IRQT_IPI: - #endif -+ return evtchn_from_per_cpu_irq(cfg, smp_processor_id()); -+ } - return cfg->info & ((1U << _EVTCHN_BITS) - 1); - } - -@@ -357,13 +387,22 @@ asmlinkage void __irq_entry evtchn_do_up - * hardirq handlers see an up-to-date system time even if we - * have just woken from a long idle period. - */ -+#ifdef PER_CPU_VIRQ_IRQ - if ((irq = percpu_read(virq_to_irq[VIRQ_TIMER])) != -1) { - port = evtchn_from_irq(irq); -+#else -+ port = percpu_read(virq_to_evtchn[VIRQ_TIMER]); -+ if (VALID_EVTCHN(port)) { -+#endif - l1i = port / BITS_PER_LONG; - l2i = port % BITS_PER_LONG; - if (active_evtchns(l1i) & (1ul<info = mk_irq_info(IRQT_VIRQ, virq, evtchn); - - per_cpu(virq_to_irq, cpu)[virq] = irq; -@@ -646,7 +693,9 @@ static void unbind_from_irq(unsigned int - struct irq_cfg *cfg = irq_cfg(irq); - int evtchn = evtchn_from_irq_cfg(cfg); - -+ BUG_IF_VIRQ_PER_CPU(cfg); - BUG_IF_IPI(cfg); -+ - spin_lock(&irq_mapping_update_lock); - - if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) { -@@ -658,6 +707,11 @@ static void unbind_from_irq(unsigned int - case IRQT_VIRQ: - per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) - [index_from_irq_cfg(cfg)] = -1; -+#ifndef PER_CPU_VIRQ_IRQ -+ for_each_possible_cpu(cpu) -+ per_cpu(virq_to_evtchn, cpu) -+ [index_from_irq_cfg(cfg)] = 0; -+#endif - break; - #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) - case IRQT_IPI: -@@ -691,13 +745,34 @@ static void unbind_from_irq(unsigned int - spin_unlock(&irq_mapping_update_lock); - } - --#ifndef PER_CPU_IPI_IRQ --void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu) -+#if !defined(PER_CPU_IPI_IRQ) || !defined(PER_CPU_VIRQ_IRQ) -+static inline struct percpu_irqaction *alloc_percpu_irqaction(gfp_t gfp) -+{ -+ struct percpu_irqaction *new = kzalloc(sizeof(*new), GFP_ATOMIC); -+ -+ if (new && !zalloc_cpumask_var(&new->cpus, gfp)) { -+ kfree(new); -+ new = NULL; -+ } -+ return new; -+} -+ -+static inline void free_percpu_irqaction(struct percpu_irqaction *action) -+{ -+ if (!action) -+ return; -+ free_cpumask_var(action->cpus); -+ kfree(action); -+} -+ -+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu, -+ struct irqaction *action) - { - struct evtchn_close close; - struct irq_data *data = irq_get_irq_data(irq); - struct irq_cfg *cfg = irq_data_cfg(data); - int evtchn = evtchn_from_per_cpu_irq(cfg, cpu); -+ struct percpu_irqaction *free_action = NULL; - - spin_lock(&irq_mapping_update_lock); - -@@ -706,6 +781,34 @@ void unbind_from_per_cpu_irq(unsigned in - - BUG_ON(cfg->bindcount <= 1); - cfg->bindcount--; -+ -+#ifndef PER_CPU_VIRQ_IRQ -+ if (type_from_irq_cfg(cfg) == IRQT_VIRQ) { -+ unsigned int virq = index_from_irq_cfg(cfg); -+ struct percpu_irqaction *cur, *prev = NULL; -+ -+ cur = virq_actions[virq]; -+ while (cur) { -+ if (cur->action.dev_id == action) { -+ cpumask_clear_cpu(cpu, cur->cpus); -+ if (cpumask_empty(cur->cpus)) { -+ WARN_ON(free_action); -+ if (prev) -+ prev->next = cur->next; -+ else -+ virq_actions[virq] -+ = cur->next; -+ free_action = cur; -+ } -+ } else if (cpumask_test_cpu(cpu, cur->cpus)) -+ evtchn = 0; -+ cur = (prev = cur)->next; -+ } -+ if (!VALID_EVTCHN(evtchn)) -+ goto done; -+ } -+#endif -+ - cpumask_clear_cpu(cpu, data->affinity); - - close.port = evtchn; -@@ -713,9 +816,17 @@ void unbind_from_per_cpu_irq(unsigned in - BUG(); - - switch (type_from_irq_cfg(cfg)) { -+#ifndef PER_CPU_VIRQ_IRQ -+ case IRQT_VIRQ: -+ per_cpu(virq_to_evtchn, cpu) -+ [index_from_irq_cfg(cfg)] = 0; -+ break; -+#endif -+#ifndef PER_CPU_IPI_IRQ - case IRQT_IPI: - per_cpu(ipi_evtchn, cpu) = 0; - break; -+#endif - default: - BUG(); - break; -@@ -727,9 +838,18 @@ void unbind_from_per_cpu_irq(unsigned in - evtchn_to_irq[evtchn] = -1; - } - -+#ifndef PER_CPU_VIRQ_IRQ -+done: -+#endif - spin_unlock(&irq_mapping_update_lock); -+ -+ if (free_action) { -+ free_irq(irq, free_action->action.dev_id); -+ free_percpu_irqaction(free_action); -+ } - } --#endif /* !PER_CPU_IPI_IRQ */ -+EXPORT_SYMBOL_GPL(unbind_from_per_cpu_irq); -+#endif /* !PER_CPU_IPI_IRQ || !PER_CPU_VIRQ_IRQ */ - - int bind_caller_port_to_irqhandler( - unsigned int caller_port, -@@ -811,6 +931,10 @@ int bind_virq_to_irqhandler( - { - int irq, retval; - -+#ifndef PER_CPU_VIRQ_IRQ -+ BUG_ON(test_bit(virq, virq_per_cpu)); -+#endif -+ - irq = bind_virq_to_irq(virq, cpu); - if (irq < 0) - return irq; -@@ -826,6 +950,109 @@ int bind_virq_to_irqhandler( - EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); - - #ifdef CONFIG_SMP -+#ifndef PER_CPU_VIRQ_IRQ -+int bind_virq_to_irqaction( -+ unsigned int virq, -+ unsigned int cpu, -+ struct irqaction *action) -+{ -+ struct evtchn_bind_virq bind_virq; -+ struct irq_cfg *cfg; -+ int evtchn, irq, retval = 0; -+ struct percpu_irqaction *cur = NULL, *new; -+ -+ BUG_ON(!test_bit(virq, virq_per_cpu)); -+ -+ if (action->dev_id) -+ return -EINVAL; -+ -+ new = alloc_percpu_irqaction(GFP_ATOMIC); -+ if (new) { -+ new->action = *action; -+ new->action.dev_id = action; -+ } -+ -+ spin_lock(&irq_mapping_update_lock); -+ -+ for (cur = virq_actions[virq]; cur; cur = cur->next) -+ if (cur->action.dev_id == action) -+ break; -+ if (!cur) { -+ if (!new) { -+ spin_unlock(&irq_mapping_update_lock); -+ return -ENOMEM; -+ } -+ new->next = virq_actions[virq]; -+ virq_actions[virq] = cur = new; -+ new = NULL; -+ retval = 1; -+ } -+ cpumask_set_cpu(cpu, cur->cpus); -+ action = &cur->action; -+ -+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) { -+ unsigned int nr; -+ -+ BUG_ON(!retval); -+ -+ if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg, -+ &dynirq_chip, true)) < 0) { -+ virq_actions[virq] = cur->next; -+ spin_unlock(&irq_mapping_update_lock); -+ free_percpu_irqaction(new); -+ return irq; -+ } -+ -+ /* Extra reference so count will never drop to zero. */ -+ cfg->bindcount++; -+ -+ for_each_possible_cpu(nr) -+ per_cpu(virq_to_irq, nr)[virq] = irq; -+ cfg->info = mk_irq_info(IRQT_VIRQ, virq, 0); -+ } else -+ cfg = irq_cfg(irq); -+ -+ evtchn = per_cpu(virq_to_evtchn, cpu)[virq]; -+ if (!VALID_EVTCHN(evtchn)) { -+ bind_virq.virq = virq; -+ bind_virq.vcpu = cpu; -+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, -+ &bind_virq) != 0) -+ BUG(); -+ evtchn = bind_virq.port; -+ evtchn_to_irq[evtchn] = irq; -+ per_cpu(virq_to_evtchn, cpu)[virq] = evtchn; -+ -+ bind_evtchn_to_cpu(evtchn, cpu); -+ } -+ -+ cfg->bindcount++; -+ -+ spin_unlock(&irq_mapping_update_lock); -+ -+ free_percpu_irqaction(new); -+ -+ if (retval == 0) { -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ unmask_evtchn(evtchn); -+ local_irq_restore(flags); -+ } else { -+ action->flags |= IRQF_PERCPU; -+ retval = setup_irq(irq, action); -+ if (retval) { -+ unbind_from_per_cpu_irq(irq, cpu, action); -+ BUG_ON(retval > 0); -+ irq = retval; -+ } -+ } -+ -+ return irq; -+} -+EXPORT_SYMBOL_GPL(bind_virq_to_irqaction); -+#endif -+ - #ifdef PER_CPU_IPI_IRQ - int bind_ipi_to_irqhandler( - unsigned int ipi, -@@ -905,7 +1132,7 @@ int __cpuinit bind_ipi_to_irqaction( - action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND; - retval = setup_irq(ipi_irq, action); - if (retval) { -- unbind_from_per_cpu_irq(ipi_irq, cpu); -+ unbind_from_per_cpu_irq(ipi_irq, cpu, NULL); - BUG_ON(retval > 0); - ipi_irq = retval; - } -@@ -941,7 +1168,9 @@ static void rebind_irq_to_cpu(struct irq - const struct irq_cfg *cfg = irq_data_cfg(data); - int evtchn = evtchn_from_irq_cfg(cfg); - -+ BUG_IF_VIRQ_PER_CPU(cfg); - BUG_IF_IPI(cfg); -+ - if (VALID_EVTCHN(evtchn)) - rebind_evtchn_to_cpu(evtchn, tcpu); - } -@@ -1233,7 +1462,9 @@ void notify_remote_via_irq(int irq) - - if (WARN_ON_ONCE(!cfg)) - return; -+ BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ); - BUG_IF_IPI(cfg); -+ - evtchn = evtchn_from_irq_cfg(cfg); - if (VALID_EVTCHN(evtchn)) - notify_remote_via_evtchn(evtchn); -@@ -1246,6 +1477,7 @@ int irq_to_evtchn_port(int irq) - - if (!cfg) - return 0; -+ BUG_IF_VIRQ_PER_CPU(cfg); - BUG_IF_IPI(cfg); - return evtchn_from_irq_cfg(cfg); - } -@@ -1313,6 +1545,12 @@ static void restore_cpu_virqs(unsigned i - if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) - continue; - -+#ifndef PER_CPU_VIRQ_IRQ -+ if (test_bit(virq, virq_per_cpu) -+ && !VALID_EVTCHN(per_cpu(virq_to_evtchn, cpu)[virq])) -+ continue; -+#endif -+ - BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_VIRQ, virq, 0)); - - /* Get a new binding from Xen. */ -@@ -1325,7 +1563,20 @@ static void restore_cpu_virqs(unsigned i - - /* Record the new mapping. */ - evtchn_to_irq[evtchn] = irq; -+#ifdef PER_CPU_VIRQ_IRQ - irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq, evtchn); -+#else -+ if (test_bit(virq, virq_per_cpu)) -+ per_cpu(virq_to_evtchn, cpu)[virq] = evtchn; -+ else { -+ unsigned int cpu; -+ -+ irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq, -+ evtchn); -+ for_each_possible_cpu(cpu) -+ per_cpu(virq_to_evtchn, cpu)[virq] = evtchn; -+ } -+#endif - bind_evtchn_to_cpu(evtchn, cpu); - - /* Ready for use. */ -@@ -1389,7 +1640,11 @@ static int evtchn_resume(struct sys_devi - - /* Avoid doing anything in the 'suspend cancelled' case. */ - status.dom = DOMID_SELF; -+#ifdef PER_CPU_VIRQ_IRQ - status.port = evtchn_from_irq(percpu_read(virq_to_irq[VIRQ_TIMER])); -+#else -+ status.port = percpu_read(virq_to_evtchn[VIRQ_TIMER]); -+#endif - if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status)) - BUG(); - if (status.status == EVTCHNSTAT_virq -@@ -1666,6 +1921,15 @@ void __init xen_init_IRQ(void) - unsigned int i; - struct physdev_pirq_eoi_gmfn eoi_gmfn; - -+#ifndef PER_CPU_VIRQ_IRQ -+ __set_bit(VIRQ_TIMER, virq_per_cpu); -+ __set_bit(VIRQ_DEBUG, virq_per_cpu); -+ __set_bit(VIRQ_XENOPROF, virq_per_cpu); -+#ifdef CONFIG_IA64 -+ __set_bit(VIRQ_ITC, virq_per_cpu); -+#endif -+#endif -+ - init_evtchn_cpu_bindings(); - - #ifdef CONFIG_SPARSE_IRQ ---- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:14:20.000000000 +0100 -+++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-03-03 16:14:51.000000000 +0100 -@@ -125,7 +125,7 @@ static int __cpuinit xen_smp_intr_init(u - fail: - xen_spinlock_cleanup(cpu); - unbind_ipi: -- unbind_from_per_cpu_irq(ipi_irq, cpu); -+ unbind_from_per_cpu_irq(ipi_irq, cpu, NULL); - return rc; - } - -@@ -135,7 +135,7 @@ static void __cpuinit xen_smp_intr_exit( - if (cpu != 0) - local_teardown_timer(cpu); - -- unbind_from_per_cpu_irq(ipi_irq, cpu); -+ unbind_from_per_cpu_irq(ipi_irq, cpu, NULL); - xen_spinlock_cleanup(cpu); - } - #endif ---- head-2011-02-17.orig/drivers/xen/netback/netback.c 2011-03-01 11:52:43.000000000 +0100 -+++ head-2011-02-17/drivers/xen/netback/netback.c 2011-03-01 11:53:15.000000000 +0100 -@@ -1630,6 +1630,12 @@ static irqreturn_t netif_be_dbg(int irq, - - return IRQ_HANDLED; - } -+ -+static struct irqaction netif_be_dbg_action = { -+ .handler = netif_be_dbg, -+ .flags = IRQF_SHARED, -+ .name = "net-be-dbg" -+}; - #endif - - static int __init netback_init(void) -@@ -1689,12 +1695,9 @@ static int __init netback_init(void) - netif_xenbus_init(); - - #ifdef NETBE_DEBUG_INTERRUPT -- (void)bind_virq_to_irqhandler(VIRQ_DEBUG, -- 0, -- netif_be_dbg, -- IRQF_SHARED, -- "net-be-dbg", -- &netif_be_dbg); -+ (void)bind_virq_to_irqaction(VIRQ_DEBUG, -+ 0, -+ &netif_be_dbg_action); - #endif - - return 0; ---- head-2011-02-17.orig/drivers/xen/xenoprof/xenoprofile.c 2011-02-01 14:42:26.000000000 +0100 -+++ head-2011-02-17/drivers/xen/xenoprof/xenoprofile.c 2010-09-09 16:53:30.000000000 +0200 -@@ -209,6 +209,11 @@ static irqreturn_t xenoprof_ovf_interrup - return IRQ_HANDLED; - } - -+static struct irqaction ovf_action = { -+ .handler = xenoprof_ovf_interrupt, -+ .flags = IRQF_DISABLED, -+ .name = "xenoprof" -+}; - - static void unbind_virq(void) - { -@@ -216,7 +221,7 @@ static void unbind_virq(void) - - for_each_online_cpu(i) { - if (ovf_irq[i] >= 0) { -- unbind_from_irqhandler(ovf_irq[i], NULL); -+ unbind_from_per_cpu_irq(ovf_irq[i], i, &ovf_action); - ovf_irq[i] = -1; - } - } -@@ -229,12 +234,7 @@ static int bind_virq(void) - int result; - - for_each_online_cpu(i) { -- result = bind_virq_to_irqhandler(VIRQ_XENOPROF, -- i, -- xenoprof_ovf_interrupt, -- IRQF_DISABLED|IRQF_NOBALANCING, -- "xenoprof", -- NULL); -+ result = bind_virq_to_irqaction(VIRQ_XENOPROF, i, &ovf_action); - - if (result < 0) { - unbind_virq(); ---- head-2011-02-17.orig/include/xen/evtchn.h 2011-02-02 15:09:43.000000000 +0100 -+++ head-2011-02-17/include/xen/evtchn.h 2010-11-23 16:18:23.000000000 +0100 -@@ -94,6 +94,17 @@ int bind_virq_to_irqhandler( - unsigned long irqflags, - const char *devname, - void *dev_id); -+#if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86) -+int bind_virq_to_irqaction( -+ unsigned int virq, -+ unsigned int cpu, -+ struct irqaction *action); -+#else -+#define bind_virq_to_irqaction(virq, cpu, action) \ -+ bind_virq_to_irqhandler(virq, cpu, (action)->handler, \ -+ (action)->flags | IRQF_NOBALANCING, \ -+ (action)->name, action) -+#endif - #if defined(CONFIG_SMP) && !defined(MODULE) - #ifndef CONFIG_X86 - int bind_ipi_to_irqhandler( -@@ -118,9 +129,13 @@ DECLARE_PER_CPU(DECLARE_BITMAP(, NR_IPIS - */ - void unbind_from_irqhandler(unsigned int irq, void *dev_id); - --#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) -+#if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86) - /* Specialized unbind function for per-CPU IRQs. */ --void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu); -+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu, -+ struct irqaction *); -+#else -+#define unbind_from_per_cpu_irq(irq, cpu, action) \ -+ unbind_from_irqhandler(irq, action) - #endif - - #ifndef CONFIG_XEN diff --git a/patches.xen/xen-watchdog b/patches.xen/xen-watchdog deleted file mode 100644 index ee6c113..0000000 --- a/patches.xen/xen-watchdog +++ /dev/null @@ -1,398 +0,0 @@ -From: jbeulich@novell.com -Subject: Xen: para-virtual watchdog driver -Patch-mainline: n/a - ---- head-2011-01-30.orig/drivers/watchdog/Kconfig 2011-01-31 12:42:35.000000000 +0100 -+++ head-2011-01-30/drivers/watchdog/Kconfig 2011-02-02 15:10:41.000000000 +0100 -@@ -1119,6 +1119,16 @@ config WATCHDOG_RIO - - # XTENSA Architecture - -+# Xen Architecture -+ -+config XEN_WDT -+ tristate "Xen Watchdog support" -+ depends on PARAVIRT_XEN || XEN -+ help -+ Say Y here to support the hypervisor watchdog capability provided -+ by Xen 4.0 and newer. The watchdog timeout period is normally one -+ minute but can be changed with a boot-time parameter. -+ - # - # ISA-based Watchdog Cards - # ---- head-2011-01-30.orig/drivers/watchdog/Makefile 2011-01-31 12:42:35.000000000 +0100 -+++ head-2011-01-30/drivers/watchdog/Makefile 2011-02-02 15:10:41.000000000 +0100 -@@ -148,6 +148,9 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o - - # XTENSA Architecture - -+# Xen -+obj-$(CONFIG_XEN_WDT) += xen_wdt.o -+ - # Architecture Independant - obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o - obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-01-30/drivers/watchdog/xen_wdt.c 2011-02-02 15:10:41.000000000 +0100 -@@ -0,0 +1,360 @@ -+/* -+ * Xen Watchdog Driver -+ * -+ * (c) Copyright 2010 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version -+ * 2 of the License, or (at your option) any later version. -+ */ -+ -+#define DRV_NAME "wdt" -+#define DRV_VERSION "0.01" -+#define PFX DRV_NAME ": " -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#ifdef CONFIG_PARAVIRT_XEN -+#include -+#include -+#endif -+#include -+ -+static struct platform_device *platform_device; -+static DEFINE_SPINLOCK(wdt_lock); -+static struct sched_watchdog wdt; -+static __kernel_time_t wdt_expires; -+static bool is_active, expect_release; -+ -+#define WATCHDOG_TIMEOUT 60 /* in seconds */ -+static unsigned int timeout = WATCHDOG_TIMEOUT; -+module_param(timeout, uint, S_IRUGO); -+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds " -+ "(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); -+ -+static bool nowayout = WATCHDOG_NOWAYOUT; -+module_param(nowayout, bool, S_IRUGO); -+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " -+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -+ -+static inline __kernel_time_t set_timeout(void) -+{ -+ wdt.timeout = timeout; -+ return ktime_to_timespec(ktime_get()).tv_sec + timeout; -+} -+ -+static int xen_wdt_start(void) -+{ -+ __kernel_time_t expires; -+ int err; -+ -+ spin_lock(&wdt_lock); -+ -+ expires = set_timeout(); -+ if (!wdt.id) -+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); -+ else -+ err = -EBUSY; -+ if (err > 0) { -+ wdt.id = err; -+ wdt_expires = expires; -+ err = 0; -+ } else -+ BUG_ON(!err); -+ -+ spin_unlock(&wdt_lock); -+ -+ return err; -+} -+ -+static int xen_wdt_stop(void) -+{ -+ int err = 0; -+ -+ spin_lock(&wdt_lock); -+ -+ wdt.timeout = 0; -+ if (wdt.id) -+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); -+ if (!err) -+ wdt.id = 0; -+ -+ spin_unlock(&wdt_lock); -+ -+ return err; -+} -+ -+static int xen_wdt_kick(void) -+{ -+ __kernel_time_t expires; -+ int err; -+ -+ spin_lock(&wdt_lock); -+ -+ expires = set_timeout(); -+ if (wdt.id) -+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); -+ else -+ err = -ENXIO; -+ if (!err) -+ wdt_expires = expires; -+ -+ spin_unlock(&wdt_lock); -+ -+ return err; -+} -+ -+static int xen_wdt_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ -+ /* /dev/watchdog can only be opened once */ -+ if (xchg(&is_active, true)) -+ return -EBUSY; -+ -+ err = xen_wdt_start(); -+ if (err == -EBUSY) -+ err = xen_wdt_kick(); -+ return err ?: nonseekable_open(inode, file); -+} -+ -+static int xen_wdt_release(struct inode *inode, struct file *file) -+{ -+ if (expect_release) -+ xen_wdt_stop(); -+ else { -+ pr_crit(PFX "unexpected close, not stopping watchdog!\n"); -+ xen_wdt_kick(); -+ } -+ is_active = false; -+ expect_release = false; -+ return 0; -+} -+ -+static ssize_t xen_wdt_write(struct file *file, const char __user *data, -+ size_t len, loff_t *ppos) -+{ -+ /* See if we got the magic character 'V' and reload the timer */ -+ if (len) { -+ if (!nowayout) { -+ size_t i; -+ -+ /* in case it was set long ago */ -+ expect_release = false; -+ -+ /* scan to see whether or not we got the magic -+ character */ -+ for (i = 0; i != len; i++) { -+ char c; -+ if (get_user(c, data + i)) -+ return -EFAULT; -+ if (c == 'V') -+ expect_release = true; -+ } -+ } -+ -+ /* someone wrote to us, we should reload the timer */ -+ xen_wdt_kick(); -+ } -+ return len; -+} -+ -+static long xen_wdt_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ int new_options, retval = -EINVAL; -+ int new_timeout; -+ int __user *argp = (void __user *)arg; -+ static const struct watchdog_info ident = { -+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, -+ .firmware_version = 0, -+ .identity = DRV_NAME, -+ }; -+ -+ switch (cmd) { -+ case WDIOC_GETSUPPORT: -+ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; -+ -+ case WDIOC_GETSTATUS: -+ case WDIOC_GETBOOTSTATUS: -+ return put_user(0, argp); -+ -+ case WDIOC_SETOPTIONS: -+ if (get_user(new_options, argp)) -+ return -EFAULT; -+ -+ if (new_options & WDIOS_DISABLECARD) -+ retval = xen_wdt_stop(); -+ if (new_options & WDIOS_ENABLECARD) { -+ retval = xen_wdt_start(); -+ if (retval == -EBUSY) -+ retval = xen_wdt_kick(); -+ } -+ return retval; -+ -+ case WDIOC_KEEPALIVE: -+ xen_wdt_kick(); -+ return 0; -+ -+ case WDIOC_SETTIMEOUT: -+ if (get_user(new_timeout, argp)) -+ return -EFAULT; -+ if (!new_timeout) -+ return -EINVAL; -+ timeout = new_timeout; -+ xen_wdt_kick(); -+ /* fall through */ -+ case WDIOC_GETTIMEOUT: -+ return put_user(timeout, argp); -+ -+ case WDIOC_GETTIMELEFT: -+ retval = wdt_expires - ktime_to_timespec(ktime_get()).tv_sec; -+ return put_user(retval, argp); -+ } -+ -+ return -ENOTTY; -+} -+ -+static const struct file_operations xen_wdt_fops = { -+ .owner = THIS_MODULE, -+ .llseek = no_llseek, -+ .write = xen_wdt_write, -+ .unlocked_ioctl = xen_wdt_ioctl, -+ .open = xen_wdt_open, -+ .release = xen_wdt_release, -+}; -+ -+static struct miscdevice xen_wdt_miscdev = { -+ .minor = WATCHDOG_MINOR, -+ .name = "watchdog", -+ .fops = &xen_wdt_fops, -+}; -+ -+static int __devinit xen_wdt_probe(struct platform_device *dev) -+{ -+ struct sched_watchdog wd = { .id = ~0 }; -+ int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd); -+ -+ switch (ret) { -+ case -EINVAL: -+ if (!timeout) { -+ timeout = WATCHDOG_TIMEOUT; -+ pr_info(PFX "timeout value invalid, using %d\n", -+ timeout); -+ } -+ -+ ret = misc_register(&xen_wdt_miscdev); -+ if (ret) { -+ pr_err(PFX "can't register miscdev on minor=%d (%d)\n", -+ WATCHDOG_MINOR, ret); -+ break; -+ } -+ -+ pr_info(PFX "initialized (timeout=%ds, nowayout=%d)\n", -+ timeout, nowayout); -+ break; -+ -+ case -ENOSYS: -+ pr_info(PFX "not supported\n"); -+ ret = -ENODEV; -+ break; -+ -+ default: -+ pr_warning(PFX "bogus return value %d\n", ret); -+ break; -+ } -+ -+ return ret; -+} -+ -+static int __devexit xen_wdt_remove(struct platform_device *dev) -+{ -+ /* Stop the timer before we leave */ -+ if (!nowayout) -+ xen_wdt_stop(); -+ -+ misc_deregister(&xen_wdt_miscdev); -+ -+ return 0; -+} -+ -+static void xen_wdt_shutdown(struct platform_device *dev) -+{ -+ xen_wdt_stop(); -+} -+ -+static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state) -+{ -+ return xen_wdt_stop(); -+} -+ -+static int xen_wdt_resume(struct platform_device *dev) -+{ -+ return xen_wdt_start(); -+} -+ -+static struct platform_driver xen_wdt_driver = { -+ .probe = xen_wdt_probe, -+ .remove = __devexit_p(xen_wdt_remove), -+ .shutdown = xen_wdt_shutdown, -+ .suspend = xen_wdt_suspend, -+ .resume = xen_wdt_resume, -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = DRV_NAME, -+ }, -+}; -+ -+static int __init xen_wdt_init_module(void) -+{ -+ int err; -+ -+#ifdef CONFIG_PARAVIRT_XEN -+ if (!xen_domain()) -+ return -ENODEV; -+#endif -+ -+ pr_info(PFX "Xen WatchDog Timer Driver v%s\n", DRV_VERSION); -+ -+ err = platform_driver_register(&xen_wdt_driver); -+ if (err) -+ return err; -+ -+ platform_device = platform_device_register_simple(DRV_NAME, -+ -1, NULL, 0); -+ if (IS_ERR(platform_device)) { -+ err = PTR_ERR(platform_device); -+ platform_driver_unregister(&xen_wdt_driver); -+ } -+ -+ return err; -+} -+ -+static void __exit xen_wdt_cleanup_module(void) -+{ -+ platform_device_unregister(platform_device); -+ platform_driver_unregister(&xen_wdt_driver); -+ pr_info(PFX "module unloaded\n"); -+} -+ -+module_init(xen_wdt_init_module); -+module_exit(xen_wdt_cleanup_module); -+ -+MODULE_AUTHOR("Jan Beulich "); -+MODULE_DESCRIPTION("Xen WatchDog Timer Driver"); -+MODULE_VERSION(DRV_VERSION); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/patches.xen/xen-x86-bigmem b/patches.xen/xen-x86-bigmem deleted file mode 100644 index c74929e..0000000 --- a/patches.xen/xen-x86-bigmem +++ /dev/null @@ -1,143 +0,0 @@ -From: jbeulich@novell.com -Subject: fix issues with the assignment of huge amounts of memory -Patch-mainline: obsolete -References: bnc#482614, bnc#537435 - ---- head-2011-01-30.orig/arch/x86/kernel/e820-xen.c 2011-02-01 15:41:35.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/e820-xen.c 2011-02-03 14:42:11.000000000 +0100 -@@ -951,6 +951,26 @@ static int __init parse_memopt(char *p) - - userdef = 1; - mem_size = memparse(p, &p); -+#ifdef CONFIG_XEN -+ /* -+ * A little less than 2% of available memory are needed for page -+ * tables, p2m map, and mem_map. Hence the maximum amount of memory -+ * we can potentially balloon up to can in no case exceed about 50 -+ * times of what we've been given initially. Since even with that we -+ * won't be able to boot (due to various calculations done based on -+ * the total number of pages) we further restrict this to factor 32. -+ */ -+ if ((mem_size >> (PAGE_SHIFT + 5)) > xen_start_info->nr_pages) { -+ u64 size = (u64)xen_start_info->nr_pages << 5; -+ -+ pr_warning("mem=%Luk is invalid for an initial" -+ " allocation of %luk, using %Luk\n", -+ (unsigned long long)mem_size >> 10, -+ xen_start_info->nr_pages << (PAGE_SHIFT - 10), -+ (unsigned long long)size << (PAGE_SHIFT - 10)); -+ mem_size = size << PAGE_SHIFT; -+ } -+#endif - e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); - - i = e820.nr_map - 1; -@@ -1149,6 +1169,7 @@ void __init e820_reserve_resources_late( - char *__init default_machine_specific_memory_setup(void) - { - int rc, nr_map; -+ unsigned long long maxmem; - struct xen_memory_map memmap; - static struct e820entry __initdata map[E820MAX]; - -@@ -1174,6 +1195,22 @@ char *__init default_machine_specific_me - BUG(); - - #ifdef CONFIG_XEN -+ /* See the comment in parse_memopt(). */ -+ for (maxmem = rc = 0; rc < e820.nr_map; ++rc) -+ if (e820.map[rc].type == E820_RAM) -+ maxmem += e820.map[rc].size; -+ if ((maxmem >> (PAGE_SHIFT + 5)) > xen_start_info->nr_pages) { -+ unsigned long long size = (u64)xen_start_info->nr_pages << 5; -+ -+ pr_warning("maxmem of %LuM is invalid for an initial" -+ " allocation of %luM, using %LuM\n", -+ maxmem >> 20, -+ xen_start_info->nr_pages >> (20 - PAGE_SHIFT), -+ size >> (20 - PAGE_SHIFT)); -+ size <<= PAGE_SHIFT; -+ e820_remove_range(size, ULLONG_MAX - size, E820_RAM, 1); -+ } -+ - if (is_initial_xendomain()) { - memmap.nr_entries = E820MAX; - set_xen_guest_handle(memmap.buffer, machine_e820.map); ---- head-2011-01-30.orig/arch/x86/kernel/setup-xen.c 2011-01-03 14:07:52.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:11.000000000 +0100 -@@ -132,12 +132,7 @@ static struct notifier_block xen_panic_b - unsigned long *phys_to_machine_mapping; - EXPORT_SYMBOL(phys_to_machine_mapping); - --unsigned long *pfn_to_mfn_frame_list_list, --#ifdef CONFIG_X86_64 -- *pfn_to_mfn_frame_list[512]; --#else -- *pfn_to_mfn_frame_list[128]; --#endif -+unsigned long *pfn_to_mfn_frame_list_list, **pfn_to_mfn_frame_list; - - /* Raw start-of-day parameters from the hypervisor. */ - start_info_t *xen_start_info; -@@ -1188,17 +1183,17 @@ void __init setup_arch(char **cmdline_p) - p2m_pages = xen_start_info->nr_pages; - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { -- unsigned long i, j; -+ unsigned long i, j, size; - unsigned int k, fpp; - - /* Make sure we have a large enough P->M table. */ - phys_to_machine_mapping = alloc_bootmem_pages( - max_pfn * sizeof(unsigned long)); -- memset(phys_to_machine_mapping, ~0, -- max_pfn * sizeof(unsigned long)); - memcpy(phys_to_machine_mapping, - (unsigned long *)xen_start_info->mfn_list, - p2m_pages * sizeof(unsigned long)); -+ memset(phys_to_machine_mapping + p2m_pages, ~0, -+ (max_pfn - p2m_pages) * sizeof(unsigned long)); - free_bootmem( - __pa(xen_start_info->mfn_list), - PFN_PHYS(PFN_UP(xen_start_info->nr_pages * -@@ -1208,15 +1203,26 @@ void __init setup_arch(char **cmdline_p) - * Initialise the list of the frames that specify the list of - * frames that make up the p2m table. Used by save/restore. - */ -- pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE); -- - fpp = PAGE_SIZE/sizeof(unsigned long); -+ size = (max_pfn + fpp - 1) / fpp; -+ size = (size + fpp - 1) / fpp; -+ ++size; /* include a zero terminator for crash tools */ -+ size *= sizeof(unsigned long); -+ pfn_to_mfn_frame_list_list = alloc_bootmem_pages(size); -+ if (size > PAGE_SIZE -+ && xen_create_contiguous_region((unsigned long) -+ pfn_to_mfn_frame_list_list, -+ get_order(size), 0)) -+ BUG(); -+ size -= sizeof(unsigned long); -+ pfn_to_mfn_frame_list = alloc_bootmem(size); -+ - for (i = j = 0, k = -1; i < max_pfn; i += fpp, j++) { - if (j == fpp) - j = 0; - if (j == 0) { - k++; -- BUG_ON(k>=ARRAY_SIZE(pfn_to_mfn_frame_list)); -+ BUG_ON(k * sizeof(unsigned long) >= size); - pfn_to_mfn_frame_list[k] = - alloc_bootmem_pages(PAGE_SIZE); - pfn_to_mfn_frame_list_list[k] = ---- head-2011-01-30.orig/drivers/xen/core/machine_reboot.c 2011-02-02 15:10:16.000000000 +0100 -+++ head-2011-01-30/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:11.000000000 +0100 -@@ -75,7 +75,7 @@ static void post_suspend(int suspend_can - unsigned long shinfo_mfn; - extern unsigned long max_pfn; - extern unsigned long *pfn_to_mfn_frame_list_list; -- extern unsigned long *pfn_to_mfn_frame_list[]; -+ extern unsigned long **pfn_to_mfn_frame_list; - - if (suspend_cancelled) { - xen_start_info->store_mfn = diff --git a/patches.xen/xen-x86-dcr-fallback b/patches.xen/xen-x86-dcr-fallback deleted file mode 100644 index 5c9a6fe..0000000 --- a/patches.xen/xen-x86-dcr-fallback +++ /dev/null @@ -1,168 +0,0 @@ -Subject: Add fallback when XENMEM_exchange fails to replace contiguous region -From: jbeulich@novell.com -Patch-mainline: obsolete -References: 181869 - -This avoids losing precious special memory in places where any memory can be -used. - ---- head-2010-12-08.orig/arch/x86/mm/hypervisor.c 2010-12-08 10:38:40.000000000 +0100 -+++ head-2010-12-08/arch/x86/mm/hypervisor.c 2010-12-08 10:45:24.000000000 +0100 -@@ -43,6 +43,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -717,6 +718,83 @@ void xen_destroy_contiguous_region(unsig - BUG(); - - balloon_unlock(flags); -+ -+ if (unlikely(!success)) { -+ /* Try hard to get the special memory back to Xen. */ -+ exchange.in.extent_order = 0; -+ set_xen_guest_handle(exchange.in.extent_start, &in_frame); -+ -+ for (i = 0; i < (1U<> PAGE_SHIFT; -+ set_phys_to_machine(pfn, frame); -+ if (!xen_feature(XENFEAT_auto_translated_physmap)) { -+ mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; -+ mmu.val = pfn; -+ cr_mcl[j].op = __HYPERVISOR_mmu_update; -+ cr_mcl[j].args[0] = (unsigned long)&mmu; -+ cr_mcl[j].args[1] = 1; -+ cr_mcl[j].args[2] = 0; -+ cr_mcl[j].args[3] = DOMID_SELF; -+ ++j; -+ } -+ -+ cr_mcl[j].op = __HYPERVISOR_memory_op; -+ cr_mcl[j].args[0] = XENMEM_decrease_reservation; -+ cr_mcl[j].args[1] = (unsigned long)&exchange.in; -+ -+ if (HYPERVISOR_multicall(cr_mcl, j + 1)) -+ BUG(); -+ BUG_ON(cr_mcl[j].result != 1); -+ while (j--) -+ BUG_ON(cr_mcl[j].result != 0); -+ -+ balloon_unlock(flags); -+ -+ free_empty_pages(&page, 1); -+ -+ in_frame++; -+ vstart += PAGE_SIZE; -+ } -+ } - } - EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); - ---- head-2010-12-08.orig/drivers/xen/balloon/balloon.c 2010-11-25 13:47:53.000000000 +0100 -+++ head-2010-12-08/drivers/xen/balloon/balloon.c 2010-11-25 13:48:02.000000000 +0100 -@@ -773,7 +773,11 @@ struct page **alloc_empty_pages_and_page - } - EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); - --void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) -+#endif /* CONFIG_XEN_BACKEND */ -+ -+#ifdef CONFIG_XEN -+static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, -+ bool free_vec) - { - unsigned long flags; - int i; -@@ -784,17 +788,33 @@ void free_empty_pages_and_pagevec(struct - balloon_lock(flags); - for (i = 0; i < nr_pages; i++) { - BUG_ON(page_count(pagevec[i]) != 1); -- balloon_append(pagevec[i], 0); -+ balloon_append(pagevec[i], !free_vec); -+ } -+ if (!free_vec) { -+ bs.current_pages -= nr_pages; -+ totalram_pages = bs.current_pages - totalram_bias; - } - balloon_unlock(flags); - -- kfree(pagevec); -+ if (free_vec) -+ kfree(pagevec); - - schedule_work(&balloon_worker); - } --EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); - --#endif /* CONFIG_XEN_BACKEND */ -+void free_empty_pages(struct page **pagevec, int nr_pages) -+{ -+ _free_empty_pages_and_pagevec(pagevec, nr_pages, false); -+} -+#endif -+ -+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) -+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) -+{ -+ _free_empty_pages_and_pagevec(pagevec, nr_pages, true); -+} -+EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); -+#endif - - void balloon_release_driver_page(struct page *page) - { ---- head-2010-12-08.orig/include/xen/balloon.h 2010-11-22 12:57:58.000000000 +0100 -+++ head-2010-12-08/include/xen/balloon.h 2009-06-09 15:52:17.000000000 +0200 -@@ -47,6 +47,10 @@ void balloon_update_driver_allowance(lon - struct page **alloc_empty_pages_and_pagevec(int nr_pages); - void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); - -+/* Free an empty page range (not allocated through -+ alloc_empty_pages_and_pagevec), adding to the balloon. */ -+void free_empty_pages(struct page **pagevec, int nr_pages); -+ - void balloon_release_driver_page(struct page *page); - - /* diff --git a/patches.xen/xen-x86-exit-mmap b/patches.xen/xen-x86-exit-mmap deleted file mode 100644 index 7ca2af6..0000000 --- a/patches.xen/xen-x86-exit-mmap +++ /dev/null @@ -1,73 +0,0 @@ -Subject: be more aggressive about de-activating mm-s under destruction -From: jbeulich@novell.com -Patch-mainline: obsolete - -... by not only handling the current task on the CPU arch_exit_mmap() -gets executed on, but also forcing remote CPUs to do so. - ---- head-2010-04-15.orig/arch/x86/mm/pgtable-xen.c 2010-04-15 11:47:53.000000000 +0200 -+++ head-2010-04-15/arch/x86/mm/pgtable-xen.c 2010-04-15 11:48:29.000000000 +0200 -@@ -1,6 +1,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -438,27 +439,44 @@ void arch_dup_mmap(struct mm_struct *old - mm_pin(mm); - } - --void arch_exit_mmap(struct mm_struct *mm) -+/* -+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() *much* -+ * faster this way, as no hypercalls are needed for the page table updates. -+ */ -+static void leave_active_mm(struct task_struct *tsk, struct mm_struct *mm) -+ __releases(tsk->alloc_lock) - { -- struct task_struct *tsk = current; -- -- task_lock(tsk); -- -- /* -- * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() -- * *much* faster this way, as no tlb flushes means bigger wrpt batches. -- */ - if (tsk->active_mm == mm) { - tsk->active_mm = &init_mm; - atomic_inc(&init_mm.mm_count); - - switch_mm(mm, &init_mm, tsk); - -- atomic_dec(&mm->mm_count); -- BUG_ON(atomic_read(&mm->mm_count) == 0); -+ if (atomic_dec_and_test(&mm->mm_count)) -+ BUG(); - } - - task_unlock(tsk); -+} -+ -+static void _leave_active_mm(void *mm) -+{ -+ struct task_struct *tsk = current; -+ -+ if (spin_trylock(&tsk->alloc_lock)) -+ leave_active_mm(tsk, mm); -+} -+ -+void arch_exit_mmap(struct mm_struct *mm) -+{ -+ struct task_struct *tsk = current; -+ -+ task_lock(tsk); -+ leave_active_mm(tsk, mm); -+ -+ preempt_disable(); -+ smp_call_function_many(mm_cpumask(mm), _leave_active_mm, mm, 1); -+ preempt_enable(); - - if (PagePinned(virt_to_page(mm->pgd)) - && atomic_read(&mm->mm_count) == 1 diff --git a/patches.xen/xen-x86-msr-on-pcpu b/patches.xen/xen-x86-msr-on-pcpu deleted file mode 100644 index a7f135a..0000000 --- a/patches.xen/xen-x86-msr-on-pcpu +++ /dev/null @@ -1,822 +0,0 @@ -From: jbeulich@novell.com -Subject: introduce {rd,wr}msr_safe_on_pcpu() and add/enable users -Patch-mainline: n/a - ---- head-2011-03-11.orig/arch/x86/Kconfig 2011-02-02 15:10:34.000000000 +0100 -+++ head-2011-03-11/arch/x86/Kconfig 2011-02-03 14:42:26.000000000 +0100 -@@ -1048,6 +1048,7 @@ config MICROCODE_OLD_INTERFACE - - config X86_MSR - tristate "/dev/cpu/*/msr - Model-specific register support" -+ select XEN_DOMCTL if XEN_PRIVILEGED_GUEST - ---help--- - This device gives privileged processes access to the x86 - Model-Specific Registers (MSRs). It is a character device with ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/arch/x86/kernel/msr-xen.c 2011-02-03 14:42:26.000000000 +0100 -@@ -0,0 +1,339 @@ -+#ifndef CONFIG_XEN_PRIVILEGED_GUEST -+#include "msr.c" -+#else -+/* ----------------------------------------------------------------------- * -+ * -+ * Copyright 2010 Novell, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, -+ * USA; either version 2 of the License, or (at your option) any later -+ * version; incorporated herein by reference. -+ * -+ * ----------------------------------------------------------------------- */ -+ -+/* -+ * x86 MSR access device -+ * -+ * This device is accessed by lseek() to the appropriate register number -+ * and then read/write in chunks of 8 bytes. A larger size means multiple -+ * reads or writes of the same register. -+ * -+ * This driver uses /dev/xen/cpu/%d/msr where %d correlates to the minor -+ * number, and on an SMP box will direct the access to pCPU %d. -+ */ -+ -+static int msr_init(void); -+static void msr_exit(void); -+ -+#define msr_init(args...) _msr_init(args) -+#define msr_exit(args...) _msr_exit(args) -+#include "msr.c" -+#undef msr_exit -+#undef msr_init -+ -+#include -+#include -+ -+static struct class *pmsr_class; -+static unsigned int minor_bias = 10; -+static unsigned int nr_xen_cpu_ids; -+static unsigned long *xen_cpu_online_map; -+ -+#define PMSR_DEV(cpu) MKDEV(MSR_MAJOR, (cpu) + minor_bias) -+ -+static unsigned int pmsr_minor(struct inode *inode) -+{ -+ return iminor(inode) - minor_bias; -+} -+ -+static ssize_t pmsr_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ u32 __user *tmp = (u32 __user *) buf; -+ u32 data[2]; -+ u32 reg = *ppos; -+ unsigned int cpu = pmsr_minor(file->f_path.dentry->d_inode); -+ int err = 0; -+ ssize_t bytes = 0; -+ -+ if (count % 8) -+ return -EINVAL; /* Invalid chunk size */ -+ -+ for (; count; count -= 8) { -+ err = rdmsr_safe_on_pcpu(cpu, reg, &data[0], &data[1]); -+ if (err) -+ break; -+ if (copy_to_user(tmp, &data, 8)) { -+ err = -EFAULT; -+ break; -+ } -+ tmp += 2; -+ bytes += 8; -+ } -+ -+ return bytes ? bytes : err; -+} -+ -+static ssize_t pmsr_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ const u32 __user *tmp = (const u32 __user *)buf; -+ u32 data[2]; -+ u32 reg = *ppos; -+ unsigned int cpu = pmsr_minor(file->f_path.dentry->d_inode); -+ int err = 0; -+ ssize_t bytes = 0; -+ -+ if (count % 8) -+ return -EINVAL; /* Invalid chunk size */ -+ -+ for (; count; count -= 8) { -+ if (copy_from_user(&data, tmp, 8)) { -+ err = -EFAULT; -+ break; -+ } -+ err = wrmsr_safe_on_pcpu(cpu, reg, data[0], data[1]); -+ if (err) -+ break; -+ tmp += 2; -+ bytes += 8; -+ } -+ -+ return bytes ? bytes : err; -+} -+ -+static long pmsr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) -+{ -+ u32 __user *uregs = (u32 __user *)arg; -+ u32 regs[8]; -+ unsigned int cpu = pmsr_minor(file->f_path.dentry->d_inode); -+ int err; -+ -+ switch (ioc) { -+ case X86_IOC_RDMSR_REGS: -+ if (!(file->f_mode & FMODE_READ)) { -+ err = -EBADF; -+ break; -+ } -+ if (copy_from_user(®s, uregs, sizeof regs)) { -+ err = -EFAULT; -+ break; -+ } -+ err = rdmsr_safe_regs_on_pcpu(cpu, regs); -+ if (err) -+ break; -+ if (copy_to_user(uregs, ®s, sizeof regs)) -+ err = -EFAULT; -+ break; -+ -+ case X86_IOC_WRMSR_REGS: -+ if (!(file->f_mode & FMODE_WRITE)) { -+ err = -EBADF; -+ break; -+ } -+ if (copy_from_user(®s, uregs, sizeof regs)) { -+ err = -EFAULT; -+ break; -+ } -+ err = wrmsr_safe_regs_on_pcpu(cpu, regs); -+ if (err) -+ break; -+ if (copy_to_user(uregs, ®s, sizeof regs)) -+ err = -EFAULT; -+ break; -+ -+ default: -+ err = -ENOTTY; -+ break; -+ } -+ -+ return err; -+} -+ -+static int pmsr_open(struct inode *inode, struct file *file) -+{ -+ unsigned int cpu; -+ -+ cpu = pmsr_minor(file->f_path.dentry->d_inode); -+ if (cpu >= nr_xen_cpu_ids || !test_bit(cpu, xen_cpu_online_map)) -+ return -ENXIO; /* No such CPU */ -+ -+ return 0; -+} -+ -+/* -+ * File operations we support -+ */ -+static const struct file_operations pmsr_fops = { -+ .owner = THIS_MODULE, -+ .llseek = msr_seek, -+ .read = pmsr_read, -+ .write = pmsr_write, -+ .open = pmsr_open, -+ .unlocked_ioctl = pmsr_ioctl, -+ .compat_ioctl = pmsr_ioctl, -+}; -+ -+static int pmsr_device_create(unsigned int cpu) -+{ -+ struct device *dev; -+ -+ if (cpu >= nr_xen_cpu_ids) { -+ static bool warned; -+ unsigned long *map; -+ -+ if ((minor_bias + cpu) >> MINORBITS) { -+ if (!warned) { -+ warned = true; -+ pr_warning("Physical MSRs of CPUs beyond %u" -+ " will not be accessible\n", -+ MINORMASK - minor_bias); -+ } -+ return -EDOM; -+ } -+ -+ map = kzalloc(BITS_TO_LONGS(cpu + 1) * sizeof(*map), -+ GFP_KERNEL); -+ if (!map) { -+ if (!warned) { -+ warned = true; -+ pr_warning("Physical MSRs of CPUs beyond %u" -+ " may not be accessible\n", -+ nr_xen_cpu_ids - 1); -+ } -+ return -ENOMEM; -+ } -+ -+ memcpy(map, xen_cpu_online_map, -+ BITS_TO_LONGS(nr_xen_cpu_ids) -+ * sizeof(*xen_cpu_online_map)); -+ nr_xen_cpu_ids = min_t(unsigned int, -+ BITS_TO_LONGS(cpu + 1) * BITS_PER_LONG, -+ MINORMASK + 1 - minor_bias); -+ kfree(xchg(&xen_cpu_online_map, map)); -+ } -+ set_bit(cpu, xen_cpu_online_map); -+ dev = device_create(pmsr_class, NULL, PMSR_DEV(cpu), NULL, -+ "pmsr%d", cpu); -+ return IS_ERR(dev) ? PTR_ERR(dev) : 0; -+} -+ -+static void pmsr_device_destroy(unsigned int cpu) -+{ -+ clear_bit(cpu, xen_cpu_online_map); -+ device_destroy(pmsr_class, PMSR_DEV(cpu)); -+} -+ -+static int pmsr_cpu_callback(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ unsigned int cpu = (unsigned long)hcpu; -+ -+ switch (action) { -+ case CPU_ONLINE: -+ pmsr_device_create(cpu); -+ break; -+ case CPU_DEAD: -+ pmsr_device_destroy(cpu); -+ break; -+ } -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block pmsr_cpu_notifier = { -+ .notifier_call = pmsr_cpu_callback, -+}; -+ -+static char *pmsr_devnode(struct device *dev, mode_t *mode) -+{ -+ return kasprintf(GFP_KERNEL, "xen/cpu/%u/msr", -+ MINOR(dev->devt) - minor_bias); -+} -+ -+static int __init msr_init(void) -+{ -+ int err; -+ xen_platform_op_t op = { -+ .cmd = XENPF_get_cpuinfo, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ .u.pcpu_info.xen_cpuid = 0 -+ }; -+ -+ err = _msr_init(); -+ if (err || !is_initial_xendomain()) -+ return err; -+ -+ do { -+ err = HYPERVISOR_platform_op(&op); -+ } while (err == -EBUSY); -+ if (err) -+ goto out; -+ nr_xen_cpu_ids = BITS_TO_LONGS(op.u.pcpu_info.max_present + 1) -+ * BITS_PER_LONG; -+ -+ while (minor_bias < NR_CPUS) -+ minor_bias *= 10; -+ if ((minor_bias + nr_xen_cpu_ids - 1) >> MINORBITS) -+ minor_bias = NR_CPUS; -+ if ((minor_bias + nr_xen_cpu_ids - 1) >> MINORBITS) -+ nr_xen_cpu_ids = MINORMASK + 1 - NR_CPUS; -+ -+ xen_cpu_online_map = kzalloc(BITS_TO_LONGS(nr_xen_cpu_ids) -+ * sizeof(*xen_cpu_online_map), -+ GFP_KERNEL); -+ if (!xen_cpu_online_map) { -+ err = -ENOMEM; -+ goto out; -+ } -+ -+ if (__register_chrdev(MSR_MAJOR, minor_bias, -+ MINORMASK + 1 - minor_bias, -+ "pcpu/msr", &pmsr_fops)) { -+ pr_err("msr: unable to get minors for pmsr\n"); -+ goto out; -+ } -+ pmsr_class = class_create(THIS_MODULE, "pmsr"); -+ if (IS_ERR(pmsr_class)) { -+ err = PTR_ERR(pmsr_class); -+ goto out_chrdev; -+ } -+ pmsr_class->devnode = pmsr_devnode; -+ err = register_pcpu_notifier(&pmsr_cpu_notifier); -+ -+ if (!err && !nr_xen_cpu_ids) -+ err = -ENODEV; -+ if (!err) -+ return 0; -+ -+ class_destroy(pmsr_class); -+ -+out_chrdev: -+ __unregister_chrdev(MSR_MAJOR, minor_bias, -+ MINORMASK + 1 - minor_bias, "pcpu/msr"); -+out: -+ if (err) -+ pr_warning("msr: can't initialize physical MSR access (%d)\n", -+ err); -+ nr_xen_cpu_ids = 0; -+ kfree(xen_cpu_online_map); -+ return 0; -+} -+ -+static void __exit msr_exit(void) -+{ -+ if (nr_xen_cpu_ids) { -+ unsigned int cpu = 0; -+ -+ unregister_pcpu_notifier(&pmsr_cpu_notifier); -+ for_each_set_bit(cpu, xen_cpu_online_map, nr_xen_cpu_ids) -+ msr_device_destroy(cpu); -+ class_destroy(pmsr_class); -+ __unregister_chrdev(MSR_MAJOR, minor_bias, -+ MINORMASK + 1 - minor_bias, "pcpu/msr"); -+ kfree(xen_cpu_online_map); -+ } -+ _msr_exit(); -+} -+#endif /* CONFIG_XEN_PRIVILEGED_GUEST */ ---- head-2011-03-11.orig/drivers/hwmon/Kconfig 2011-02-01 15:04:27.000000000 +0100 -+++ head-2011-03-11/drivers/hwmon/Kconfig 2011-03-11 11:17:24.000000000 +0100 -@@ -392,7 +392,8 @@ config SENSORS_GPIO_FAN - - config SENSORS_CORETEMP - tristate "Intel Core/Core2/Atom temperature sensor" -- depends on X86 && PCI && !XEN && EXPERIMENTAL -+ depends on X86 && PCI && !XEN_UNPRIVILEGED_GUEST && EXPERIMENTAL -+ select XEN_DOMCTL if XEN - help - If you say yes here you get support for the temperature - sensor inside your CPU. Most of the family 6 CPUs -@@ -400,7 +401,8 @@ config SENSORS_CORETEMP - - config SENSORS_PKGTEMP - tristate "Intel processor package temperature sensor" -- depends on X86 && !XEN && EXPERIMENTAL -+ depends on X86 && !XEN_UNPRIVILEGED_GUEST && EXPERIMENTAL -+ select XEN_DOMCTL if XEN - help - If you say yes here you get support for the package level temperature - sensor inside your CPU. Check documentation/driver for details. -@@ -943,7 +945,8 @@ config SENSORS_TMP421 - - config SENSORS_VIA_CPUTEMP - tristate "VIA CPU temperature sensor" -- depends on X86 && !XEN -+ depends on X86 && !XEN_UNPRIVILEGED_GUEST -+ select XEN_DOMCTL if XEN - help - If you say yes here you get support for the temperature - sensor inside your CPU. Supported are all known variants of ---- head-2011-03-11.orig/drivers/xen/core/Makefile 2011-02-02 15:10:34.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/Makefile 2011-02-03 14:42:26.000000000 +0100 -@@ -5,8 +5,7 @@ - obj-y := evtchn.o gnttab.o reboot.o machine_reboot.o - - priv-$(CONFIG_PCI) += pci.o --priv-$(CONFIG_ACPI_HOTPLUG_CPU) += pcpu.o --obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += firmware.o $(priv-y) -+obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += firmware.o pcpu.o $(priv-y) - obj-$(CONFIG_PROC_FS) += xen_proc.o - obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o - obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o -@@ -17,4 +16,4 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o - obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o - obj-$(CONFIG_XEN_DOMCTL) += domctl.o - CFLAGS_domctl.o := -D__XEN_PUBLIC_XEN_H__ -D__XEN_PUBLIC_GRANT_TABLE_H__ --CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h -+CFLAGS_domctl.o += -D__XEN_TOOLS__ -imacros xen/interface/domctl.h -imacros xen/interface/sysctl.h ---- head-2011-03-11.orig/drivers/xen/core/domctl.c 2010-11-23 16:20:20.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/domctl.c 2011-02-03 14:42:26.000000000 +0100 -@@ -92,6 +92,110 @@ union xen_domctl { - } v5, v6, v7; - }; - -+struct xen_sysctl_physinfo_v6 { -+ uint32_t threads_per_core; -+ uint32_t cores_per_socket; -+ uint32_t nr_cpus; -+ uint32_t nr_nodes; -+ uint32_t cpu_khz; -+ uint64_aligned_t total_pages; -+ uint64_aligned_t free_pages; -+ uint64_aligned_t scrub_pages; -+ uint32_t hw_cap[8]; -+ uint32_t max_cpu_id; -+ union { -+ XEN_GUEST_HANDLE(uint32) cpu_to_node; -+ uint64_aligned_t _ctn_align; -+ }; -+ uint32_t capabilities; -+}; -+ -+struct xen_sysctl_physinfo_v7 { -+ uint32_t threads_per_core; -+ uint32_t cores_per_socket; -+ uint32_t nr_cpus; -+ uint32_t max_node_id; -+ uint32_t cpu_khz; -+ uint64_aligned_t total_pages; -+ uint64_aligned_t free_pages; -+ uint64_aligned_t scrub_pages; -+ uint32_t hw_cap[8]; -+ uint32_t max_cpu_id; -+ union { -+ XEN_GUEST_HANDLE(uint32) cpu_to_node; -+ uint64_aligned_t _ctn_align; -+ }; -+ uint32_t capabilities; -+}; -+ -+#define XEN_SYSCTL_pm_op_get_cputopo 0x20 -+struct xen_get_cputopo_v6 { -+ uint32_t max_cpus; -+ union { -+ XEN_GUEST_HANDLE(uint32) cpu_to_core; -+ uint64_aligned_t _ctc_align; -+ }; -+ union { -+ XEN_GUEST_HANDLE(uint32) cpu_to_socket; -+ uint64_aligned_t _cts_align; -+ }; -+ uint32_t nr_cpus; -+}; -+ -+struct xen_sysctl_pm_op_v6 { -+ uint32_t cmd; -+ uint32_t cpuid; -+ union { -+ struct xen_get_cputopo_v6 get_topo; -+ }; -+}; -+#define xen_sysctl_pm_op_v7 xen_sysctl_pm_op_v6 -+ -+struct xen_sysctl_topologyinfo_v8 { -+ uint32_t max_cpu_index; -+ union { -+ XEN_GUEST_HANDLE(uint32) cpu_to_core; -+ uint64_aligned_t _ctc_align; -+ }; -+ union { -+ XEN_GUEST_HANDLE(uint32) cpu_to_socket; -+ uint64_aligned_t _cts_align; -+ }; -+ union { -+ XEN_GUEST_HANDLE(uint32) cpu_to_node; -+ uint64_aligned_t _ctn_align; -+ }; -+}; -+ -+union xen_sysctl { -+ /* v6: Xen 3.4.x */ -+ struct { -+ uint32_t cmd; -+ uint32_t interface_version; -+ union { -+ struct xen_sysctl_physinfo_v6 physinfo; -+ struct xen_sysctl_pm_op_v6 pm_op; -+ }; -+ } v6; -+ /* v7: Xen 4.0.x */ -+ struct { -+ uint32_t cmd; -+ uint32_t interface_version; -+ union { -+ struct xen_sysctl_physinfo_v7 physinfo; -+ struct xen_sysctl_pm_op_v7 pm_op; -+ }; -+ } v7; -+ /* v8: Xen 4.1+ */ -+ struct { -+ uint32_t cmd; -+ uint32_t interface_version; -+ union { -+ struct xen_sysctl_topologyinfo_v8 topologyinfo; -+ }; -+ } v8; -+}; -+ - /* The actual code comes here */ - - static inline int hypervisor_domctl(void *domctl) -@@ -99,6 +203,11 @@ static inline int hypervisor_domctl(void - return _hypercall1(int, domctl, domctl); - } - -+static inline int hypervisor_sysctl(void *sysctl) -+{ -+ return _hypercall1(int, sysctl, sysctl); -+} -+ - int xen_guest_address_size(int domid) - { - union xen_domctl domctl; -@@ -263,6 +372,172 @@ int xen_set_physical_cpu_affinity(int pc - } - EXPORT_SYMBOL_GPL(xen_set_physical_cpu_affinity); - -+int xen_get_topology_info(unsigned int cpu, u32 *core, u32 *sock, u32 *node) -+{ -+ union xen_sysctl sysctl; -+ uint32_t *cores = NULL, *socks = NULL, *nodes = NULL; -+ unsigned int nr; -+ int rc; -+ -+ if (core) -+ cores = kmalloc((cpu + 1) * sizeof(*cores), GFP_KERNEL); -+ if (sock) -+ socks = kmalloc((cpu + 1) * sizeof(*socks), GFP_KERNEL); -+ if (node) -+ nodes = kmalloc((cpu + 1) * sizeof(*nodes), GFP_KERNEL); -+ if ((core && !cores) || (sock && !socks) || (node && !nodes)) { -+ kfree(cores); -+ kfree(socks); -+ kfree(nodes); -+ return -ENOMEM; -+ } -+ -+#define topologyinfo(ver) do { \ -+ memset(&sysctl, 0, sizeof(sysctl)); \ -+ sysctl.v##ver.cmd = XEN_SYSCTL_topologyinfo; \ -+ sysctl.v##ver.interface_version = ver; \ -+ sysctl.v##ver.topologyinfo.max_cpu_index = cpu; \ -+ set_xen_guest_handle(sysctl.v##ver.topologyinfo.cpu_to_core, \ -+ cores); \ -+ set_xen_guest_handle(sysctl.v##ver.topologyinfo.cpu_to_socket, \ -+ socks); \ -+ set_xen_guest_handle(sysctl.v##ver.topologyinfo.cpu_to_node, \ -+ nodes); \ -+ rc = hypervisor_sysctl(&sysctl); \ -+ nr = sysctl.v##ver.topologyinfo.max_cpu_index + 1; \ -+} while (0) -+ -+ BUILD_BUG_ON(XEN_SYSCTL_INTERFACE_VERSION > 8); -+ topologyinfo(8); -+ -+#if CONFIG_XEN_COMPAT < 0x040100 -+#define pm_op_cputopo(ver) do { \ -+ memset(&sysctl, 0, sizeof(sysctl)); \ -+ sysctl.v##ver.cmd = XEN_SYSCTL_pm_op; \ -+ sysctl.v##ver.interface_version = ver; \ -+ sysctl.v##ver.pm_op.cmd = XEN_SYSCTL_pm_op_get_cputopo; \ -+ sysctl.v##ver.pm_op.cpuid = 0; \ -+ sysctl.v##ver.pm_op.get_topo.max_cpus = cpu + 1; \ -+ set_xen_guest_handle(sysctl.v##ver.pm_op.get_topo.cpu_to_core, \ -+ cores); \ -+ set_xen_guest_handle(sysctl.v##ver.pm_op.get_topo.cpu_to_socket,\ -+ socks); \ -+ rc = hypervisor_sysctl(&sysctl); \ -+ memset(&sysctl, 0, sizeof(sysctl)); \ -+ sysctl.v##ver.cmd = XEN_SYSCTL_physinfo; \ -+ sysctl.v##ver.interface_version = ver; \ -+ sysctl.v##ver.physinfo.max_cpu_id = cpu; \ -+ set_xen_guest_handle(sysctl.v##ver.physinfo.cpu_to_node, nodes);\ -+ rc = hypervisor_sysctl(&sysctl) ?: rc; \ -+ nr = sysctl.v##ver.physinfo.max_cpu_id + 1; \ -+} while (0) -+ -+ if (rc) -+ pm_op_cputopo(7); -+#endif -+#if CONFIG_XEN_COMPAT < 0x040000 -+ if (rc) -+ pm_op_cputopo(6); -+#endif -+ -+ if (!rc && cpu >= nr) -+ rc = -EDOM; -+ -+ if (!rc && core && (*core = cores[cpu]) == INVALID_TOPOLOGY_ID) -+ rc = -ENOENT; -+ kfree(cores); -+ -+ if (!rc && sock && (*sock = socks[cpu]) == INVALID_TOPOLOGY_ID) -+ rc = -ENOENT; -+ kfree(socks); -+ -+ if (!rc && node && (*node = nodes[cpu]) == INVALID_TOPOLOGY_ID) -+ rc = -ENOENT; -+ kfree(nodes); -+ -+ return rc; -+} -+EXPORT_SYMBOL_GPL(xen_get_topology_info); -+ -+#include -+#include -+ -+int rdmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, u32 *l, u32 *h) -+{ -+ int err = xen_set_physical_cpu_affinity(pcpu); -+ -+ switch (err) { -+ case 0: -+ err = rdmsr_safe(msr_no, l, h); -+ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); -+ break; -+ case -EINVAL: -+ /* Fall back in case this is due to dom0_vcpus_pinned. */ -+ err = rdmsr_safe_on_cpu(pcpu, msr_no, l, h) ?: 1; -+ break; -+ } -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(rdmsr_safe_on_pcpu); -+ -+int wrmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, u32 l, u32 h) -+{ -+ int err = xen_set_physical_cpu_affinity(pcpu); -+ -+ switch (err) { -+ case 0: -+ err = wrmsr_safe(msr_no, l, h); -+ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); -+ break; -+ case -EINVAL: -+ /* Fall back in case this is due to dom0_vcpus_pinned. */ -+ err = wrmsr_safe_on_cpu(pcpu, msr_no, l, h) ?: 1; -+ break; -+ } -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(wrmsr_safe_on_pcpu); -+ -+int rdmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs) -+{ -+ int err = xen_set_physical_cpu_affinity(pcpu); -+ -+ switch (err) { -+ case 0: -+ err = rdmsr_safe_regs(regs); -+ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); -+ break; -+ case -EINVAL: -+ /* Fall back in case this is due to dom0_vcpus_pinned. */ -+ err = rdmsr_safe_regs_on_cpu(pcpu, regs) ?: 1; -+ break; -+ } -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(rdmsr_safe_regs_on_pcpu); -+ -+int wrmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs) -+{ -+ int err = xen_set_physical_cpu_affinity(pcpu); -+ -+ switch (err) { -+ case 0: -+ err = wrmsr_safe_regs(regs); -+ WARN_ON_ONCE(xen_set_physical_cpu_affinity(-1)); -+ break; -+ case -EINVAL: -+ /* Fall back in case this is due to dom0_vcpus_pinned. */ -+ err = wrmsr_safe_regs_on_cpu(pcpu, regs) ?: 1; -+ break; -+ } -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(wrmsr_safe_regs_on_pcpu); -+ - #endif /* CONFIG_X86 */ - - MODULE_LICENSE("GPL"); ---- head-2011-03-11.orig/drivers/xen/core/domctl.h 2010-11-23 16:20:20.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/domctl.h 2011-02-03 14:42:26.000000000 +0100 -@@ -1,3 +1,4 @@ - int xen_guest_address_size(int domid); - int xen_guest_blkif_protocol(int domid); - int xen_set_physical_cpu_affinity(int pcpu); -+int xen_get_topology_info(unsigned int cpu, u32 *core, u32 *socket, u32 *node); ---- head-2011-03-11.orig/drivers/xen/core/pcpu.c 2011-02-02 15:09:57.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/pcpu.c 2011-02-03 14:42:26.000000000 +0100 -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - - struct pcpu { -@@ -35,6 +36,44 @@ static DEFINE_MUTEX(xen_pcpu_lock); - - static LIST_HEAD(xen_pcpus); - -+static BLOCKING_NOTIFIER_HEAD(pcpu_chain); -+ -+static inline void *notifier_param(const struct pcpu *pcpu) -+{ -+ return (void *)(unsigned long)pcpu->xen_id; -+} -+ -+int register_pcpu_notifier(struct notifier_block *nb) -+{ -+ int err; -+ -+ get_pcpu_lock(); -+ -+ err = blocking_notifier_chain_register(&pcpu_chain, nb); -+ -+ if (!err) { -+ struct pcpu *pcpu; -+ -+ list_for_each_entry(pcpu, &xen_pcpus, pcpu_list) -+ if (xen_pcpu_online(pcpu->flags)) -+ nb->notifier_call(nb, CPU_ONLINE, -+ notifier_param(pcpu)); -+ } -+ -+ put_pcpu_lock(); -+ -+ return err; -+} -+EXPORT_SYMBOL_GPL(register_pcpu_notifier); -+ -+void unregister_pcpu_notifier(struct notifier_block *nb) -+{ -+ get_pcpu_lock(); -+ blocking_notifier_chain_unregister(&pcpu_chain, nb); -+ put_pcpu_lock(); -+} -+EXPORT_SYMBOL_GPL(unregister_pcpu_notifier); -+ - static int xen_pcpu_down(uint32_t xen_id) - { - xen_platform_op_t op = { -@@ -151,12 +190,16 @@ static int xen_pcpu_online_check(struct - if (xen_pcpu_online(info->flags) && !xen_pcpu_online(pcpu->flags)) { - /* the pcpu is onlined */ - pcpu->flags |= XEN_PCPU_FLAGS_ONLINE; -+ blocking_notifier_call_chain(&pcpu_chain, CPU_ONLINE, -+ notifier_param(pcpu)); - kobject_uevent(&pcpu->sysdev.kobj, KOBJ_ONLINE); - result = 1; - } else if (!xen_pcpu_online(info->flags) && - xen_pcpu_online(pcpu->flags)) { - /* The pcpu is offlined now */ - pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE; -+ blocking_notifier_call_chain(&pcpu_chain, CPU_DEAD, -+ notifier_param(pcpu)); - kobject_uevent(&pcpu->sysdev.kobj, KOBJ_OFFLINE); - result = 1; - } -@@ -350,6 +393,8 @@ static irqreturn_t xen_pcpu_interrupt(in - return IRQ_HANDLED; - } - -+#ifdef CONFIG_ACPI_HOTPLUG_CPU -+ - int xen_pcpu_hotplug(int type) - { - schedule_work(&xen_pcpu_work); -@@ -387,6 +432,8 @@ int xen_pcpu_index(uint32_t id, bool is_ - } - EXPORT_SYMBOL_GPL(xen_pcpu_index); - -+#endif /* CONFIG_ACPI_HOTPLUG_CPU */ -+ - static int __init xen_pcpu_init(void) - { - int err; ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/include/xen/pcpu.h 2011-02-03 14:42:26.000000000 +0100 -@@ -0,0 +1,18 @@ -+#ifndef _XEN_SYSCTL_H -+#define _XEN_SYSCTL_H -+ -+#include -+ -+int register_pcpu_notifier(struct notifier_block *); -+void unregister_pcpu_notifier(struct notifier_block *); -+ -+#ifdef CONFIG_X86 -+int __must_check rdmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, -+ u32 *l, u32 *h); -+int __must_check wrmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, -+ u32 l, u32 h); -+int __must_check rdmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs); -+int __must_check wrmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs); -+#endif -+ -+#endif /* _XEN_SYSCTL_H */ diff --git a/patches.xen/xen-x86-no-lapic b/patches.xen/xen-x86-no-lapic deleted file mode 100644 index a7a538f..0000000 --- a/patches.xen/xen-x86-no-lapic +++ /dev/null @@ -1,274 +0,0 @@ -From: jbeulich@novell.com -Subject: Disallow all accesses to the local APIC page -Patch-mainline: n/a -References: bnc#191115 - ---- head-2011-03-11.orig/arch/x86/include/asm/apic.h 2011-02-17 10:23:17.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/asm/apic.h 2011-02-17 10:36:33.000000000 +0100 -@@ -10,7 +10,9 @@ - #include - #include - #include -+#ifndef CONFIG_XEN - #include -+#endif - #include - #include - #include -@@ -49,6 +51,7 @@ static inline void generic_apic_probe(vo - #ifdef CONFIG_X86_LOCAL_APIC - - extern unsigned int apic_verbosity; -+#ifndef CONFIG_XEN - extern int local_apic_timer_c2_ok; - - extern int disable_apic; -@@ -121,6 +124,8 @@ extern u64 native_apic_icr_read(void); - - extern int x2apic_mode; - -+#endif /* CONFIG_XEN */ -+ - #ifdef CONFIG_X86_X2APIC - /* - * Make previous memory operations globally visible before -@@ -371,6 +376,8 @@ struct apic { - */ - extern struct apic *apic; - -+#ifndef CONFIG_XEN -+ - /* - * APIC functionality to boot other CPUs - only used on SMP: - */ -@@ -473,6 +480,8 @@ static inline void default_wait_for_init - - extern void generic_bigsmp_probe(void); - -+#endif /* CONFIG_XEN */ -+ - - #ifdef CONFIG_X86_LOCAL_APIC - -@@ -492,6 +501,8 @@ static inline const struct cpumask *defa - DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); - - -+#ifndef CONFIG_XEN -+ - static inline unsigned int read_apic_id(void) - { - unsigned int reg; -@@ -600,6 +611,8 @@ extern int default_cpu_present_to_apicid - extern int default_check_phys_apicid_present(int phys_apicid); - #endif - -+#endif /* CONFIG_XEN */ -+ - #endif /* CONFIG_X86_LOCAL_APIC */ - - #ifdef CONFIG_X86_32 ---- head-2011-03-11.orig/arch/x86/include/asm/apicdef.h 2011-03-11 10:41:53.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/asm/apicdef.h 2011-02-03 14:34:58.000000000 +0100 -@@ -17,6 +17,8 @@ - */ - #define IO_APIC_SLOT_SIZE 1024 - -+#ifndef CONFIG_XEN -+ - #define APIC_ID 0x20 - - #define APIC_LVR 0x30 -@@ -143,6 +145,16 @@ - #define APIC_BASE_MSR 0x800 - #define X2APIC_ENABLE (1UL << 10) - -+#else /* CONFIG_XEN */ -+ -+enum { -+ APIC_DEST_ALLBUT = 0x1, -+ APIC_DEST_SELF, -+ APIC_DEST_ALLINC -+}; -+ -+#endif /* CONFIG_XEN */ -+ - #ifdef CONFIG_X86_32 - # define MAX_IO_APICS 64 - # define MAX_LOCAL_APIC 256 -@@ -151,6 +163,8 @@ - # define MAX_LOCAL_APIC 32768 - #endif - -+#ifndef CONFIG_XEN -+ - /* - * All x86-64 systems are xAPIC compatible. - * In the following, "apicid" is a physical APIC ID. -@@ -421,6 +435,8 @@ struct local_apic { - - #undef u32 - -+#endif /* CONFIG_XEN */ -+ - #ifdef CONFIG_X86_32 - #define BAD_APICID 0xFFu - #else ---- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-01 15:41:35.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/mach-xen/asm/fixmap.h 2011-02-03 14:34:58.000000000 +0100 -@@ -17,7 +17,6 @@ - #ifndef __ASSEMBLY__ - #include - #include --#include - #include - #ifdef CONFIG_X86_32 - #include -@@ -85,10 +84,10 @@ enum fixed_addresses { - #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT - FIX_OHCI1394_BASE, - #endif -+#ifndef CONFIG_XEN - #ifdef CONFIG_X86_LOCAL_APIC - FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ - #endif --#ifndef CONFIG_XEN - #ifdef CONFIG_X86_IO_APIC - FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, ---- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:12:54.000000000 +0100 -+++ head-2011-03-11/arch/x86/include/mach-xen/asm/smp.h 2011-03-03 16:50:42.000000000 +0100 -@@ -15,7 +15,7 @@ - # include - # endif - #endif --#include -+#include - #include - - extern unsigned int num_processors; -@@ -190,7 +190,7 @@ extern unsigned disabled_cpus __cpuinitd - - #include - --#ifdef CONFIG_X86_LOCAL_APIC -+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) - - #ifndef CONFIG_X86_64 - static inline int logical_smp_processor_id(void) ---- head-2011-03-11.orig/arch/x86/kernel/acpi/boot.c 2011-03-11 11:06:22.000000000 +0100 -+++ head-2011-03-11/arch/x86/kernel/acpi/boot.c 2011-03-11 11:17:06.000000000 +0100 -@@ -74,14 +74,14 @@ int acpi_sci_override_gsi __initdata; - int acpi_skip_timer_override __initdata; - int acpi_use_timer_override __initdata; - int acpi_fix_pin2_polarity __initdata; --#else --#define acpi_skip_timer_override 0 --#define acpi_fix_pin2_polarity 0 --#endif - - #ifdef CONFIG_X86_LOCAL_APIC - static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; - #endif -+#else -+#define acpi_skip_timer_override 0 -+#define acpi_fix_pin2_polarity 0 -+#endif - - #ifndef __HAVE_ARCH_CMPXCHG - #warning ACPI uses CMPXCHG, i486 and later hardware -@@ -187,6 +187,7 @@ static int __init acpi_parse_madt(struct - return -ENODEV; - } - -+#ifndef CONFIG_XEN - if (madt->address) { - acpi_lapic_addr = (u64) madt->address; - -@@ -194,7 +195,6 @@ static int __init acpi_parse_madt(struct - madt->address); - } - --#ifndef CONFIG_XEN - default_acpi_madt_oem_check(madt->header.oem_id, - madt->header.oem_table_id); - #endif -@@ -300,6 +300,7 @@ static int __init - acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, - const unsigned long end) - { -+#ifndef CONFIG_XEN - struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; - - lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; -@@ -308,6 +309,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_su - return -EINVAL; - - acpi_lapic_addr = lapic_addr_ovr->address; -+#endif - - return 0; - } ---- head-2011-03-11.orig/arch/x86/kernel/irq-xen.c 2011-02-02 15:09:43.000000000 +0100 -+++ head-2011-03-11/arch/x86/kernel/irq-xen.c 2011-02-03 14:34:58.000000000 +0100 -@@ -16,9 +16,9 @@ - #include - #include - -+#ifndef CONFIG_XEN - atomic_t irq_err_count; - --#ifndef CONFIG_XEN - /* Function pointer for generic interrupt vector handling */ - void (*x86_platform_ipi_callback)(void) = NULL; - #endif -@@ -58,7 +58,7 @@ static int show_other_interrupts(struct - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); - seq_printf(p, " Non-maskable interrupts\n"); --#ifdef CONFIG_X86_LOCAL_APIC -+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) - seq_printf(p, "%*s: ", prec, "LOC"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); -@@ -128,10 +128,12 @@ static int show_other_interrupts(struct - seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); - seq_printf(p, " Machine check polls\n"); - #endif -+#ifndef CONFIG_XEN - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); - #if defined(CONFIG_X86_IO_APIC) - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); - #endif -+#endif - return 0; - } - -@@ -229,12 +231,16 @@ u64 arch_irq_stat_cpu(unsigned int cpu) - - u64 arch_irq_stat(void) - { -+#ifndef CONFIG_XEN - u64 sum = atomic_read(&irq_err_count); - - #ifdef CONFIG_X86_IO_APIC - sum += atomic_read(&irq_mis_count); - #endif - return sum; -+#else -+ return 0; -+#endif - } - - ---- head-2011-03-11.orig/drivers/xen/core/smpboot.c 2011-02-02 15:10:16.000000000 +0100 -+++ head-2011-03-11/drivers/xen/core/smpboot.c 2011-03-03 16:50:49.000000000 +0100 -@@ -283,7 +283,7 @@ void __init smp_prepare_cpus(unsigned in - * Here we can be sure that there is an IO-APIC in the system. Let's - * go and set it up: - */ -- if (!skip_ioapic_setup && nr_ioapics) -+ if (cpu_has_apic && !skip_ioapic_setup && nr_ioapics) - setup_IO_APIC(); - #endif - } diff --git a/patches.xen/xen-x86-panic-no-reboot b/patches.xen/xen-x86-panic-no-reboot deleted file mode 100644 index b80124a..0000000 --- a/patches.xen/xen-x86-panic-no-reboot +++ /dev/null @@ -1,32 +0,0 @@ -From: jbeulich@novell.com -Subject: Don't automatically reboot Dom0 on panic (match native) -Patch-mainline: obsolete - -$subject says it all. - ---- head-2011-01-03.orig/arch/x86/kernel/setup-xen.c 2011-01-03 13:29:09.000000000 +0100 -+++ head-2011-01-03/arch/x86/kernel/setup-xen.c 2011-01-03 14:07:52.000000000 +0100 -@@ -784,15 +784,17 @@ void __init setup_arch(char **cmdline_p) - unsigned long p2m_pages; - struct physdev_set_iopl set_iopl; - -+ if (!is_initial_xendomain()) { - #ifdef CONFIG_X86_32 -- /* Force a quick death if the kernel panics (not domain 0). */ -- extern int panic_timeout; -- if (!panic_timeout && !is_initial_xendomain()) -- panic_timeout = 1; -+ /* Force a quick death if the kernel panics (not domain 0). */ -+ extern int panic_timeout; -+ if (!panic_timeout) -+ panic_timeout = 1; - #endif - -- /* Register a call for panic conditions. */ -- atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); -+ /* Register a call for panic conditions. */ -+ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); -+ } - - set_iopl.iopl = 1; - WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl)); diff --git a/patches.xen/xen-x86-per-cpu-vcpu-info b/patches.xen/xen-x86-per-cpu-vcpu-info deleted file mode 100644 index 76c0768..0000000 --- a/patches.xen/xen-x86-per-cpu-vcpu-info +++ /dev/null @@ -1,636 +0,0 @@ -From: jbeulich@novell.com -Subject: x86: use per-cpu storage for shared vcpu_info structure -Patch-mainline: obsolete - -... reducing access code size and latency, as well as being the -prerequisite for removing the limitation on 32 vCPU-s per guest. - ---- head-2011-03-17.orig/arch/x86/include/asm/percpu.h 2011-03-17 14:35:41.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/asm/percpu.h 2011-02-07 11:41:40.000000000 +0100 -@@ -309,6 +309,40 @@ do { \ - pxo_ret__; \ - }) - -+#define percpu_exchange_op(op, var, val) \ -+({ \ -+ typedef typeof(var) pxo_T__; \ -+ pxo_T__ pxo_ret__; \ -+ if (0) { \ -+ pxo_ret__ = (val); \ -+ (void)pxo_ret__; \ -+ } \ -+ switch (sizeof(var)) { \ -+ case 1: \ -+ asm(op "b %0,"__percpu_arg(1) \ -+ : "=q" (pxo_ret__), "+m" (var) \ -+ : "0" ((pxo_T__)(val))); \ -+ break; \ -+ case 2: \ -+ asm(op "w %0,"__percpu_arg(1) \ -+ : "=r" (pxo_ret__), "+m" (var) \ -+ : "0" ((pxo_T__)(val))); \ -+ break; \ -+ case 4: \ -+ asm(op "l %0,"__percpu_arg(1) \ -+ : "=r" (pxo_ret__), "+m" (var) \ -+ : "0" ((pxo_T__)(val))); \ -+ break; \ -+ case 8: \ -+ asm(op "q %0,"__percpu_arg(1) \ -+ : "=r" (pxo_ret__), "+m" (var) \ -+ : "0" ((pxo_T__)(val))); \ -+ break; \ -+ default: __bad_percpu_size(); \ -+ } \ -+ pxo_ret__; \ -+}) -+ - /* - * cmpxchg has no such implied lock semantics as a result it is much - * more efficient for cpu local operations. -@@ -366,6 +400,10 @@ do { \ - #define percpu_or(var, val) percpu_to_op("or", var, val) - #define percpu_xor(var, val) percpu_to_op("xor", var, val) - #define percpu_inc(var) percpu_unary_op("inc", var) -+#define percpu_xchg(var, val) percpu_exchange_op("xchg", var, val) -+#ifdef CONFIG_X86_XADD -+#define percpu_xadd(var, val) percpu_exchange_op("xadd", var, val) -+#endif - - #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) - #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:41:13.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:42:15.000000000 +0100 -@@ -51,12 +51,26 @@ - - extern shared_info_t *HYPERVISOR_shared_info; - -+#ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT -+DECLARE_PER_CPU(struct vcpu_info, vcpu_info); -+#define vcpu_info(cpu) (&per_cpu(vcpu_info, cpu)) -+#define current_vcpu_info() (&__get_cpu_var(vcpu_info)) -+#define vcpu_info_read(fld) percpu_read(vcpu_info.fld) -+#define vcpu_info_write(fld, val) percpu_write(vcpu_info.fld, val) -+#define vcpu_info_xchg(fld, val) percpu_xchg(vcpu_info.fld, val) -+void setup_vcpu_info(unsigned int cpu); -+void adjust_boot_vcpu_info(void); -+#else - #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) - #ifdef CONFIG_SMP - #define current_vcpu_info() vcpu_info(smp_processor_id()) - #else - #define current_vcpu_info() vcpu_info(0) - #endif -+#define vcpu_info_read(fld) (current_vcpu_info()->fld) -+#define vcpu_info_write(fld, val) (current_vcpu_info()->fld = (val)) -+static inline void setup_vcpu_info(unsigned int cpu) {} -+#endif - - #ifdef CONFIG_X86_32 - extern unsigned long hypervisor_virt_start; ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-02 15:09:52.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/irqflags.h 2011-02-03 14:42:15.000000000 +0100 -@@ -14,7 +14,7 @@ - * includes these barriers, for example. - */ - --#define xen_save_fl(void) (current_vcpu_info()->evtchn_upcall_mask) -+#define xen_save_fl(void) vcpu_info_read(evtchn_upcall_mask) - - #define xen_restore_fl(f) \ - do { \ -@@ -30,7 +30,7 @@ do { \ - - #define xen_irq_disable() \ - do { \ -- current_vcpu_info()->evtchn_upcall_mask = 1; \ -+ vcpu_info_write(evtchn_upcall_mask, 1); \ - barrier(); \ - } while (0) - -@@ -85,8 +85,6 @@ do { \ - #define evtchn_upcall_pending /* 0 */ - #define evtchn_upcall_mask 1 - --#define sizeof_vcpu_shift 6 -- - #ifdef CONFIG_X86_64 - # define __REG_si %rsi - # define __CPU_num PER_CPU_VAR(cpu_number) -@@ -95,6 +93,22 @@ do { \ - # define __CPU_num TI_cpu(%ebp) - #endif - -+#ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT -+ -+#define GET_VCPU_INFO PER_CPU(vcpu_info, __REG_si) -+#define __DISABLE_INTERRUPTS movb $1,PER_CPU_VAR(vcpu_info+evtchn_upcall_mask) -+#define __ENABLE_INTERRUPTS movb $0,PER_CPU_VAR(vcpu_info+evtchn_upcall_mask) -+#define __TEST_PENDING cmpb $0,PER_CPU_VAR(vcpu_info+evtchn_upcall_pending+0) -+#define DISABLE_INTERRUPTS(clb) __DISABLE_INTERRUPTS -+#define ENABLE_INTERRUPTS(clb) __ENABLE_INTERRUPTS -+ -+#define __SIZEOF_DISABLE_INTERRUPTS 8 -+#define __SIZEOF_TEST_PENDING 8 -+ -+#else /* CONFIG_XEN_VCPU_INFO_PLACEMENT */ -+ -+#define sizeof_vcpu_shift 6 -+ - #ifdef CONFIG_SMP - #define GET_VCPU_INFO movl __CPU_num,%esi ; \ - shl $sizeof_vcpu_shift,%esi ; \ -@@ -111,15 +125,21 @@ do { \ - #define ENABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \ - __ENABLE_INTERRUPTS - -+#define __SIZEOF_DISABLE_INTERRUPTS 4 -+#define __SIZEOF_TEST_PENDING 3 -+ -+#endif /* CONFIG_XEN_VCPU_INFO_PLACEMENT */ -+ - #ifndef CONFIG_X86_64 - #define INTERRUPT_RETURN iret --#define ENABLE_INTERRUPTS_SYSEXIT __ENABLE_INTERRUPTS ; \ -+#define ENABLE_INTERRUPTS_SYSEXIT \ -+ movb $0,evtchn_upcall_mask(%esi) /* __ENABLE_INTERRUPTS */ ; \ - sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ ; \ -- __TEST_PENDING ; \ -+ cmpb $0,evtchn_upcall_pending(%esi) /* __TEST_PENDING */ ; \ - jnz 14f /* process more events if necessary... */ ; \ - movl PT_ESI(%esp), %esi ; \ - sysexit ; \ --14: __DISABLE_INTERRUPTS ; \ -+14: movb $1,evtchn_upcall_mask(%esi) /* __DISABLE_INTERRUPTS */ ; \ - TRACE_IRQS_OFF ; \ - sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \ - mov $__KERNEL_PERCPU, %ecx ; \ ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:41:13.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:42:15.000000000 +0100 -@@ -128,6 +128,8 @@ static inline void xen_set_pgd(pgd_t *pg - - #define __pte_mfn(_pte) (((_pte).pte & PTE_PFN_MASK) >> PAGE_SHIFT) - -+extern unsigned long early_arbitrary_virt_to_mfn(void *va); -+ - extern void sync_global_pgds(unsigned long start, unsigned long end); - - /* ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:13:47.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:48:41.000000000 +0100 -@@ -247,8 +247,8 @@ static inline void xen_write_cr0(unsigne - asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); - } - --#define xen_read_cr2() (current_vcpu_info()->arch.cr2) --#define xen_write_cr2(val) ((void)(current_vcpu_info()->arch.cr2 = (val))) -+#define xen_read_cr2() vcpu_info_read(arch.cr2) -+#define xen_write_cr2(val) vcpu_info_write(arch.cr2, val) - - static inline unsigned long xen_read_cr3(void) - { ---- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:43:14.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:44:07.000000000 +0100 -@@ -346,8 +346,16 @@ static const char *__cpuinit table_looku - __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; - __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; - --void load_percpu_segment(int cpu) -+void __ref load_percpu_segment(int cpu) - { -+#ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT -+ static bool done; -+ -+ if (!done) { -+ done = true; -+ adjust_boot_vcpu_info(); -+ } -+#endif - #ifdef CONFIG_X86_32 - loadsegment(fs, __KERNEL_PERCPU); - #else ---- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-02 15:07:22.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-02-03 14:42:15.000000000 +0100 -@@ -439,6 +439,9 @@ sysenter_exit: - movl PT_EIP(%esp), %edx - movl PT_OLDESP(%esp), %ecx - xorl %ebp,%ebp -+#ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT -+ GET_VCPU_INFO -+#endif - TRACE_IRQS_ON - 1: mov PT_FS(%esp), %fs - PTGS_TO_GS -@@ -997,7 +1000,9 @@ critical_region_fixup: - - .section .rodata,"a" - critical_fixup_table: -- .byte -1,-1,-1 # testb $0xff,(%esi) = __TEST_PENDING -+ .rept __SIZEOF_TEST_PENDING -+ .byte -1 -+ .endr - .byte -1,-1 # jnz 14f - .byte 0 # pop %ebx - .byte 1 # pop %ecx -@@ -1016,7 +1021,9 @@ critical_fixup_table: - .byte 10,10,10 # add $8,%esp - #endif - .byte 12 # iret -- .byte -1,-1,-1,-1 # movb $1,1(%esi) = __DISABLE_INTERRUPTS -+ .rept __SIZEOF_DISABLE_INTERRUPTS -+ .byte -1 -+ .endr - .previous - - # Hypervisor uses this for application faults while it executes. ---- head-2011-03-17.orig/arch/x86/kernel/head-xen.c 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/head-xen.c 2011-02-03 14:42:15.000000000 +0100 -@@ -144,6 +144,8 @@ void __init xen_start_kernel(void) - HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); - clear_page(empty_zero_page); - -+ setup_vcpu_info(0); -+ - /* Set up mapping of lowest 1MB of physical memory. */ - for (i = 0; i < NR_FIX_ISAMAPS; i++) - if (is_initial_xendomain()) ---- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-02-02 15:09:52.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-02-03 14:42:15.000000000 +0100 -@@ -247,16 +247,10 @@ static void get_time_values_from_xen(uns - local_irq_restore(flags); - } - --static inline int time_values_up_to_date(unsigned int cpu) -+static inline int time_values_up_to_date(void) - { -- struct vcpu_time_info *src; -- struct shadow_time_info *dst; -- -- src = &vcpu_info(cpu)->time; -- dst = &per_cpu(shadow_time, cpu); -- - rmb(); -- return (dst->version == src->version); -+ return percpu_read(shadow_time.version) == vcpu_info_read(time.version); - } - - static void sync_xen_wallclock(unsigned long dummy); -@@ -301,7 +295,7 @@ unsigned long long xen_local_clock(void) - local_time_version = shadow->version; - rdtsc_barrier(); - time = shadow->system_timestamp + get_nsec_offset(shadow); -- if (!time_values_up_to_date(cpu)) -+ if (!time_values_up_to_date()) - get_time_values_from_xen(cpu); - barrier(); - } while (local_time_version != shadow->version); ---- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2011-02-03 14:41:13.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-03 14:42:15.000000000 +0100 -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -50,7 +51,103 @@ - EXPORT_SYMBOL(hypercall_page); - - shared_info_t *__read_mostly HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; -+#ifndef CONFIG_XEN_VCPU_INFO_PLACEMENT - EXPORT_SYMBOL(HYPERVISOR_shared_info); -+#else -+DEFINE_PER_CPU(struct vcpu_info, vcpu_info) __aligned(sizeof(struct vcpu_info)); -+EXPORT_PER_CPU_SYMBOL(vcpu_info); -+ -+void __ref setup_vcpu_info(unsigned int cpu) -+{ -+ struct vcpu_info *v = &per_cpu(vcpu_info, cpu); -+ struct vcpu_register_vcpu_info info; -+#ifdef CONFIG_X86_64 -+ static bool first = true; -+ -+ if (first) { -+ first = false; -+ info.mfn = early_arbitrary_virt_to_mfn(v); -+ } else -+#endif -+ info.mfn = arbitrary_virt_to_mfn(v); -+ info.offset = offset_in_page(v); -+ -+ if (HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info)) -+ BUG(); -+} -+ -+void __init adjust_boot_vcpu_info(void) -+{ -+ unsigned long lpfn, rpfn, lmfn, rmfn; -+ pte_t *lpte, *rpte; -+ unsigned int level; -+ mmu_update_t mmu[2]; -+ -+ /* -+ * setup_vcpu_info() cannot be used more than once for a given (v)CPU, -+ * hence we must swap the underlying MFNs of the two pages holding old -+ * and new vcpu_info of the boot CPU. -+ * -+ * Do *not* use __get_cpu_var() or percpu_{write,...}() here, as the per- -+ * CPU segment didn't get reloaded yet. Using percpu_read(), as in -+ * arch_use_lazy_mmu_mode(), though undesirable, is safe except for the -+ * accesses to variables that were updated in setup_percpu_areas(). -+ */ -+ lpte = lookup_address((unsigned long)&vcpu_info -+ + (__per_cpu_load - __per_cpu_start), -+ &level); -+ rpte = lookup_address((unsigned long)&per_cpu(vcpu_info, 0), &level); -+ BUG_ON(!lpte || !(pte_flags(*lpte) & _PAGE_PRESENT)); -+ BUG_ON(!rpte || !(pte_flags(*rpte) & _PAGE_PRESENT)); -+ lmfn = __pte_mfn(*lpte); -+ rmfn = __pte_mfn(*rpte); -+ -+ if (lmfn == rmfn) -+ return; -+ -+ lpfn = mfn_to_local_pfn(lmfn); -+ rpfn = mfn_to_local_pfn(rmfn); -+ -+ pr_info("Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n", -+ lpfn, rpfn, lmfn, rmfn); -+ -+ xen_l1_entry_update(lpte, pfn_pte_ma(rmfn, pte_pgprot(*lpte))); -+ xen_l1_entry_update(rpte, pfn_pte_ma(lmfn, pte_pgprot(*rpte))); -+#ifdef CONFIG_X86_64 -+ if (HYPERVISOR_update_va_mapping((unsigned long)__va(lpfn<> PAGE_SHIFT; -+} -+ - #ifndef CONFIG_XEN - static int __init parse_direct_gbpages_off(char *arg) - { ---- head-2011-03-17.orig/drivers/xen/Kconfig 2011-02-09 16:23:14.000000000 +0100 -+++ head-2011-03-17/drivers/xen/Kconfig 2011-02-09 16:23:27.000000000 +0100 -@@ -372,6 +372,18 @@ config XEN_COMPAT - default 0x030002 if XEN_COMPAT_030002_AND_LATER - default 0 - -+config XEN_VCPU_INFO_PLACEMENT -+ bool "Place shared vCPU info in per-CPU storage" -+# depends on X86 && (XEN_COMPAT >= 0x00030101) -+ depends on X86 -+ depends on !XEN_COMPAT_030002_AND_LATER -+ depends on !XEN_COMPAT_030004_AND_LATER -+ depends on !XEN_COMPAT_030100_AND_LATER -+ default SMP -+ ---help--- -+ This allows faster access to the per-vCPU shared info -+ structure. -+ - endmenu - - config HAVE_IRQ_IGNORE_UNHANDLED ---- head-2011-03-17.orig/drivers/xen/core/evtchn.c 2011-02-16 08:30:09.000000000 +0100 -+++ head-2011-03-17/drivers/xen/core/evtchn.c 2011-02-16 08:30:33.000000000 +0100 -@@ -355,6 +355,24 @@ static DEFINE_PER_CPU(unsigned int, upca - static DEFINE_PER_CPU(unsigned int, current_l1i); - static DEFINE_PER_CPU(unsigned int, current_l2i); - -+#ifndef vcpu_info_xchg -+#define vcpu_info_xchg(fld, val) xchg(¤t_vcpu_info()->fld, val) -+#endif -+ -+#ifndef percpu_xadd -+#define percpu_xadd(var, val) \ -+({ \ -+ typeof(var) __tmp_var__; \ -+ unsigned long flags; \ -+ local_irq_save(flags); \ -+ __tmp_var__ = get_cpu_var(var); \ -+ __get_cpu_var(var) += (val); \ -+ put_cpu_var(var); \ -+ local_irq_restore(flags); \ -+ __tmp_var__; \ -+}) -+#endif -+ - /* NB. Interrupts are disabled on entry. */ - asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs) - { -@@ -363,23 +381,23 @@ asmlinkage void __irq_entry evtchn_do_up - unsigned long masked_l1, masked_l2; - unsigned int l1i, l2i, start_l1i, start_l2i, port, count, i; - int irq; -- vcpu_info_t *vcpu_info = current_vcpu_info(); - - exit_idle(); - irq_enter(); - - do { - /* Avoid a callback storm when we reenable delivery. */ -- vcpu_info->evtchn_upcall_pending = 0; -+ vcpu_info_write(evtchn_upcall_pending, 0); - - /* Nested invocations bail immediately. */ -- percpu_add(upcall_count, 1); -- if (unlikely(percpu_read(upcall_count) != 1)) -+ if (unlikely(percpu_xadd(upcall_count, 1))) - break; - - #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ - /* Clear master flag /before/ clearing selector flag. */ - wmb(); -+#else -+ barrier(); - #endif - - #ifndef CONFIG_NO_HZ -@@ -410,7 +428,7 @@ asmlinkage void __irq_entry evtchn_do_up - } - #endif /* CONFIG_NO_HZ */ - -- l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); -+ l1 = vcpu_info_xchg(evtchn_pending_sel, 0); - - start_l1i = l1i = percpu_read(current_l1i); - start_l2i = percpu_read(current_l2i); -@@ -1517,7 +1535,6 @@ void unmask_evtchn(int port) - { - shared_info_t *s = HYPERVISOR_shared_info; - unsigned int cpu = smp_processor_id(); -- vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; - - BUG_ON(!irqs_disabled()); - -@@ -1531,10 +1548,13 @@ void unmask_evtchn(int port) - synch_clear_bit(port, s->evtchn_mask); - - /* Did we miss an interrupt 'edge'? Re-fire if so. */ -- if (synch_test_bit(port, s->evtchn_pending) && -- !synch_test_and_set_bit(port / BITS_PER_LONG, -- &vcpu_info->evtchn_pending_sel)) -- vcpu_info->evtchn_upcall_pending = 1; -+ if (synch_test_bit(port, s->evtchn_pending)) { -+ vcpu_info_t *v = current_vcpu_info(); -+ -+ if (!synch_test_and_set_bit(port / BITS_PER_LONG, -+ &v->evtchn_pending_sel)) -+ v->evtchn_upcall_pending = 1; -+ } - } - EXPORT_SYMBOL_GPL(unmask_evtchn); - ---- head-2011-03-17.orig/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:11.000000000 +0100 -+++ head-2011-03-17/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:15.000000000 +0100 -@@ -69,7 +69,7 @@ static void pre_suspend(void) - mfn_to_pfn(xen_start_info->console.domU.mfn); - } - --static void post_suspend(int suspend_cancelled) -+static void post_suspend(int suspend_cancelled, int fast_suspend) - { - int i, j, k, fpp; - unsigned long shinfo_mfn; -@@ -86,8 +86,21 @@ static void post_suspend(int suspend_can - #ifdef CONFIG_SMP - cpumask_copy(vcpu_initialized_mask, cpu_online_mask); - #endif -- for_each_possible_cpu(i) -+ for_each_possible_cpu(i) { - setup_runstate_area(i); -+ -+#ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT -+ if (fast_suspend && i != smp_processor_id() -+ && HYPERVISOR_vcpu_op(VCPUOP_down, i, NULL)) -+ BUG(); -+ -+ setup_vcpu_info(i); -+ -+ if (fast_suspend && i != smp_processor_id() -+ && HYPERVISOR_vcpu_op(VCPUOP_up, i, NULL)) -+ BUG(); -+#endif -+ } - } - - shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT; -@@ -129,7 +142,7 @@ static void post_suspend(int suspend_can - #define switch_idle_mm() ((void)0) - #define mm_pin_all() ((void)0) - #define pre_suspend() xen_pre_suspend() --#define post_suspend(x) xen_post_suspend(x) -+#define post_suspend(x, f) xen_post_suspend(x) - - #endif - -@@ -160,7 +173,7 @@ static int take_machine_down(void *_susp - BUG_ON(suspend_cancelled > 0); - suspend->resume_notifier(suspend_cancelled); - if (suspend_cancelled >= 0) -- post_suspend(suspend_cancelled); -+ post_suspend(suspend_cancelled, suspend->fast_suspend); - if (!suspend_cancelled) - xen_clockevents_resume(); - if (suspend_cancelled >= 0) ---- head-2011-03-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:50:49.000000000 +0100 -+++ head-2011-03-17/drivers/xen/core/smpboot.c 2011-02-03 14:42:15.000000000 +0100 -@@ -290,8 +290,13 @@ void __init smp_prepare_cpus(unsigned in - - void __init smp_prepare_boot_cpu(void) - { -+ unsigned int cpu; -+ - switch_to_new_gdt(smp_processor_id()); - prefill_possible_map(); -+ for_each_possible_cpu(cpu) -+ if (cpu != smp_processor_id()) -+ setup_vcpu_info(cpu); - } - - #ifdef CONFIG_HOTPLUG_CPU ---- head-2011-03-17.orig/drivers/xen/core/spinlock.c 2011-03-15 16:19:26.000000000 +0100 -+++ head-2011-03-17/drivers/xen/core/spinlock.c 2011-03-15 16:19:57.000000000 +0100 -@@ -144,7 +144,7 @@ unsigned int xen_spin_wait(arch_spinlock - spinning.prev = percpu_read(_spinning); - smp_wmb(); - percpu_write(_spinning, &spinning); -- upcall_mask = current_vcpu_info()->evtchn_upcall_mask; -+ upcall_mask = vcpu_info_read(evtchn_upcall_mask); - - do { - bool nested = false; -@@ -210,13 +210,13 @@ unsigned int xen_spin_wait(arch_spinlock - * intended event processing will happen with the poll - * call. - */ -- current_vcpu_info()->evtchn_upcall_mask = -- nested ? upcall_mask : flags; -+ vcpu_info_write(evtchn_upcall_mask, -+ nested ? upcall_mask : flags); - - if (HYPERVISOR_poll_no_timeout(&__get_cpu_var(poll_evtchn), 1)) - BUG(); - -- current_vcpu_info()->evtchn_upcall_mask = upcall_mask; -+ vcpu_info_write(evtchn_upcall_mask, upcall_mask); - - rc = !test_evtchn(percpu_read(poll_evtchn)); - if (!rc) diff --git a/patches.xen/xen-x86-pmd-handling b/patches.xen/xen-x86-pmd-handling deleted file mode 100644 index ce0aad4..0000000 --- a/patches.xen/xen-x86-pmd-handling +++ /dev/null @@ -1,617 +0,0 @@ -From: jbeulich@novell.com -Subject: consolidate pmd/pud/pgd entry handling -Patch-mainline: obsolete - ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-02 15:09:52.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:41:13.000000000 +0100 -@@ -101,10 +101,12 @@ void xen_invlpg(unsigned long ptr); - void xen_l1_entry_update(pte_t *ptr, pte_t val); - void xen_l2_entry_update(pmd_t *ptr, pmd_t val); - void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ --void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ -+void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val); /* x86_64 only */ - void xen_pgd_pin(unsigned long ptr); - void xen_pgd_unpin(unsigned long ptr); - -+void xen_init_pgd_pin(void); -+ - void xen_set_ldt(const void *ptr, unsigned int ents); - - #ifdef CONFIG_SMP -@@ -337,6 +339,18 @@ MULTI_update_va_mapping( - } - - static inline void -+MULTI_mmu_update(multicall_entry_t *mcl, mmu_update_t *req, -+ unsigned int count, unsigned int *success_count, -+ domid_t domid) -+{ -+ mcl->op = __HYPERVISOR_mmu_update; -+ mcl->args[0] = (unsigned long)req; -+ mcl->args[1] = count; -+ mcl->args[2] = (unsigned long)success_count; -+ mcl->args[3] = domid; -+} -+ -+static inline void - MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, - void *uop, unsigned int count) - { ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-01 15:41:35.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-03 14:41:13.000000000 +0100 -@@ -75,20 +75,16 @@ static inline void pmd_populate(struct m - struct page *pte) - { - unsigned long pfn = page_to_pfn(pte); -+ pmd_t ent = __pmd(((pmdval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE); - - paravirt_alloc_pte(mm, pfn); -- if (PagePinned(virt_to_page(mm->pgd))) { -- if (!PageHighMem(pte)) -- BUG_ON(HYPERVISOR_update_va_mapping( -- (unsigned long)__va(pfn << PAGE_SHIFT), -- pfn_pte(pfn, PAGE_KERNEL_RO), 0)); --#ifndef CONFIG_X86_64 -- else if (!TestSetPagePinned(pte)) -- kmap_flush_unused(); -+ if (PagePinned(virt_to_page(pmd))) { -+#ifndef CONFIG_HIGHPTE -+ BUG_ON(PageHighMem(pte)); - #endif -- set_pmd(pmd, __pmd(((pmdval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); -+ set_pmd(pmd, ent); - } else -- *pmd = __pmd(((pmdval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE); -+ *pmd = ent; - } - - #define pmd_pgtable(pmd) pmd_page(pmd) -@@ -116,39 +112,28 @@ extern void pud_populate(struct mm_struc - #else /* !CONFIG_X86_PAE */ - static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) - { -+ pud_t ent = __pud(_PAGE_TABLE | __pa(pmd)); -+ - paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); -- if (unlikely(PagePinned(virt_to_page((mm)->pgd)))) { -- BUG_ON(HYPERVISOR_update_va_mapping( -- (unsigned long)pmd, -- pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, -- PAGE_KERNEL_RO), 0)); -- set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); -- } else -- *pud = __pud(_PAGE_TABLE | __pa(pmd)); -+ if (PagePinned(virt_to_page(pud))) -+ set_pud(pud, ent); -+ else -+ *pud = ent; - } - #endif /* CONFIG_X86_PAE */ - - #if PAGETABLE_LEVELS > 3 - #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD) - --/* -- * We need to use the batch mode here, but pgd_pupulate() won't be -- * be called frequently. -- */ - static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) - { -+ pgd_t ent = __pgd(_PAGE_TABLE | __pa(pud)); -+ - paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); -- if (unlikely(PagePinned(virt_to_page((mm)->pgd)))) { -- BUG_ON(HYPERVISOR_update_va_mapping( -- (unsigned long)pud, -- pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, -- PAGE_KERNEL_RO), 0)); -- set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))); -- set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud))); -- } else { -- *(pgd) = __pgd(_PAGE_TABLE | __pa(pud)); -- *__user_pgd(pgd) = *(pgd); -- } -+ if (unlikely(PagePinned(virt_to_page(pgd)))) -+ xen_l4_entry_update(pgd, 1, ent); -+ else -+ *__user_pgd(pgd) = *pgd = ent; - } - - static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-01 15:55:04.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable.h 2011-02-07 12:14:31.000000000 +0100 -@@ -621,7 +621,7 @@ static inline pmd_t xen_local_pmdp_get_a - { - pmd_t res = *pmdp; - -- xen_pmd_clear(pmdp); -+ xen_set_pmd(pmdp, __pmd(0)); - return res; - } - ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-01 15:47:16.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-02-03 14:41:13.000000000 +0100 -@@ -61,12 +61,15 @@ static inline void __xen_pte_clear(pte_t - ptep->pte_high = 0; - } - --static inline void xen_pmd_clear(pmd_t *pmd) --{ -- xen_l2_entry_update(pmd, __pmd(0)); --} -+#define xen_pmd_clear(pmd) \ -+({ \ -+ pmd_t *__pmdp = (pmd); \ -+ PagePinned(virt_to_page(__pmdp)) \ -+ ? set_pmd(__pmdp, __pmd(0)) \ -+ : (void)(*__pmdp = __pmd(0)); \ -+}) - --static inline void pud_clear(pud_t *pudp) -+static inline void __xen_pud_clear(pud_t *pudp) - { - pgdval_t pgd; - -@@ -87,6 +90,14 @@ static inline void pud_clear(pud_t *pudp - xen_tlb_flush(); - } - -+#define xen_pud_clear(pudp) \ -+({ \ -+ pud_t *__pudp = (pudp); \ -+ PagePinned(virt_to_page(__pudp)) \ -+ ? __xen_pud_clear(__pudp) \ -+ : (void)(*__pudp = __pud(0)); \ -+}) -+ - #ifdef CONFIG_SMP - static inline pte_t xen_ptep_get_and_clear(pte_t *ptep, pte_t res) - { ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:39:36.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:41:13.000000000 +0100 -@@ -70,10 +70,13 @@ static inline void xen_set_pmd(pmd_t *pm - xen_l2_entry_update(pmdp, pmd); - } - --static inline void xen_pmd_clear(pmd_t *pmd) --{ -- xen_set_pmd(pmd, xen_make_pmd(0)); --} -+#define xen_pmd_clear(pmd) \ -+({ \ -+ pmd_t *__pmdp = (pmd); \ -+ PagePinned(virt_to_page(__pmdp)) \ -+ ? set_pmd(__pmdp, xen_make_pmd(0)) \ -+ : (void)(*__pmdp = xen_make_pmd(0)); \ -+}) - - #ifdef CONFIG_SMP - static inline pte_t xen_ptep_get_and_clear(pte_t *xp, pte_t ret) -@@ -100,23 +103,28 @@ static inline void xen_set_pud(pud_t *pu - xen_l3_entry_update(pudp, pud); - } - --static inline void xen_pud_clear(pud_t *pud) --{ -- xen_set_pud(pud, xen_make_pud(0)); --} -+#define xen_pud_clear(pud) \ -+({ \ -+ pud_t *__pudp = (pud); \ -+ PagePinned(virt_to_page(__pudp)) \ -+ ? set_pud(__pudp, xen_make_pud(0)) \ -+ : (void)(*__pudp = xen_make_pud(0)); \ -+}) - - #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD) - - static inline void xen_set_pgd(pgd_t *pgdp, pgd_t pgd) - { -- xen_l4_entry_update(pgdp, pgd); -+ xen_l4_entry_update(pgdp, 0, pgd); - } - --static inline void xen_pgd_clear(pgd_t *pgd) --{ -- xen_set_pgd(pgd, xen_make_pgd(0)); -- xen_set_pgd(__user_pgd(pgd), xen_make_pgd(0)); --} -+#define xen_pgd_clear(pgd) \ -+({ \ -+ pgd_t *__pgdp = (pgd); \ -+ PagePinned(virt_to_page(__pgdp)) \ -+ ? xen_l4_entry_update(__pgdp, 1, xen_make_pgd(0)) \ -+ : (void)(*__user_pgd(__pgdp) = *__pgdp = xen_make_pgd(0)); \ -+}) - - #define __pte_mfn(_pte) (((_pte).pte & PTE_PFN_MASK) >> PAGE_SHIFT) - ---- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2010-12-08 10:45:24.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-03 14:41:13.000000000 +0100 -@@ -358,31 +358,91 @@ void xen_l1_entry_update(pte_t *ptr, pte - } - EXPORT_SYMBOL_GPL(xen_l1_entry_update); - -+static void do_lN_entry_update(mmu_update_t *mmu, unsigned int mmu_count, -+ struct page *page) -+{ -+ if (likely(page)) { -+ multicall_entry_t mcl[2]; -+ unsigned long pfn = page_to_pfn(page); -+ -+ MULTI_update_va_mapping(mcl, -+ (unsigned long)__va(pfn << PAGE_SHIFT), -+ pfn_pte(pfn, PAGE_KERNEL_RO), 0); -+ SetPagePinned(page); -+ MULTI_mmu_update(mcl + 1, mmu, mmu_count, NULL, DOMID_SELF); -+ if (unlikely(HYPERVISOR_multicall_check(mcl, 2, NULL))) -+ BUG(); -+ } else if (unlikely(HYPERVISOR_mmu_update(mmu, mmu_count, -+ NULL, DOMID_SELF) < 0)) -+ BUG(); -+} -+ - void xen_l2_entry_update(pmd_t *ptr, pmd_t val) - { - mmu_update_t u; -+ struct page *page = NULL; -+ -+ if (likely(pmd_present(val)) && likely(!pmd_large(val)) -+ && likely(mem_map) -+ && likely(PagePinned(virt_to_page(ptr)))) { -+ page = pmd_page(val); -+ if (unlikely(PagePinned(page))) -+ page = NULL; -+ else if (PageHighMem(page)) { -+#ifndef CONFIG_HIGHPTE -+ BUG(); -+#endif -+ kmap_flush_unused(); -+ page = NULL; -+ } -+ } - u.ptr = virt_to_machine(ptr); - u.val = __pmd_val(val); -- BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); -+ do_lN_entry_update(&u, 1, page); - } - - #if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) - void xen_l3_entry_update(pud_t *ptr, pud_t val) - { - mmu_update_t u; -+ struct page *page = NULL; -+ -+ if (likely(pud_present(val)) -+#ifdef CONFIG_X86_64 -+ && likely(!pud_large(val)) -+#endif -+ && likely(mem_map) -+ && likely(PagePinned(virt_to_page(ptr)))) { -+ page = pud_page(val); -+ if (unlikely(PagePinned(page))) -+ page = NULL; -+ } - u.ptr = virt_to_machine(ptr); - u.val = __pud_val(val); -- BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); -+ do_lN_entry_update(&u, 1, page); - } - #endif - - #ifdef CONFIG_X86_64 --void xen_l4_entry_update(pgd_t *ptr, pgd_t val) -+void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val) - { -- mmu_update_t u; -- u.ptr = virt_to_machine(ptr); -- u.val = __pgd_val(val); -- BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); -+ mmu_update_t u[2]; -+ struct page *page = NULL; -+ -+ if (likely(pgd_present(val)) && likely(mem_map) -+ && likely(PagePinned(virt_to_page(ptr)))) { -+ page = pgd_page(val); -+ if (unlikely(PagePinned(page))) -+ page = NULL; -+ } -+ u[0].ptr = virt_to_machine(ptr); -+ u[0].val = __pgd_val(val); -+ if (user) { -+ u[1].ptr = virt_to_machine(__user_pgd(ptr)); -+ u[1].val = __pgd_val(val); -+ do_lN_entry_update(u, 2, page); -+ } else -+ do_lN_entry_update(u, 1, page); - } - #endif /* CONFIG_X86_64 */ - ---- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-02 15:10:16.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-03 14:41:13.000000000 +0100 -@@ -689,6 +689,8 @@ static void __init zone_sizes_init(void) - #endif - - free_area_init_nodes(max_zone_pfns); -+ -+ xen_init_pgd_pin(); - } - - void __init setup_bootmem_allocator(void) -@@ -908,8 +910,6 @@ void __init mem_init(void) - - if (boot_cpu_data.wp_works_ok < 0) - test_wp_bit(); -- -- SetPagePinned(virt_to_page(init_mm.pgd)); - } - - #ifdef CONFIG_MEMORY_HOTPLUG ---- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-02 15:10:16.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-03 14:41:13.000000000 +0100 -@@ -231,8 +231,11 @@ static pud_t *fill_pud(pgd_t *pgd, unsig - { - if (pgd_none(*pgd)) { - pud_t *pud = (pud_t *)spp_getpage(); -- make_page_readonly(pud, XENFEAT_writable_page_tables); -- pgd_populate(&init_mm, pgd, pud); -+ if (!after_bootmem) { -+ make_page_readonly(pud, XENFEAT_writable_page_tables); -+ xen_l4_entry_update(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); -+ } else -+ pgd_populate(&init_mm, pgd, pud); - if (pud != pud_offset(pgd, 0)) - printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", - pud, pud_offset(pgd, 0)); -@@ -244,8 +247,11 @@ static pmd_t *fill_pmd(pud_t *pud, unsig - { - if (pud_none(*pud)) { - pmd_t *pmd = (pmd_t *) spp_getpage(); -- make_page_readonly(pmd, XENFEAT_writable_page_tables); -- pud_populate(&init_mm, pud, pmd); -+ if (!after_bootmem) { -+ make_page_readonly(pmd, XENFEAT_writable_page_tables); -+ xen_l3_entry_update(pud, __pud(__pa(pmd) | _PAGE_TABLE)); -+ } else -+ pud_populate(&init_mm, pud, pmd); - if (pmd != pmd_offset(pud, 0)) - printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", - pmd, pmd_offset(pud, 0)); -@@ -578,7 +584,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned - XENFEAT_writable_page_tables); - *pmd = __pmd(pte_phys | _PAGE_TABLE); - } else { -- make_page_readonly(pte, XENFEAT_writable_page_tables); - spin_lock(&init_mm.page_table_lock); - pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); - spin_unlock(&init_mm.page_table_lock); -@@ -667,7 +672,6 @@ phys_pud_init(pud_t *pud_page, unsigned - else - *pud = __pud(pmd_phys | _PAGE_TABLE); - } else { -- make_page_readonly(pmd, XENFEAT_writable_page_tables); - spin_lock(&init_mm.page_table_lock); - pud_populate(&init_mm, pud, __va(pmd_phys)); - spin_unlock(&init_mm.page_table_lock); -@@ -843,7 +847,6 @@ kernel_physical_mapping_init(unsigned lo - XENFEAT_writable_page_tables); - xen_l4_entry_update(pgd, __pgd(pud_phys | _PAGE_TABLE)); - } else { -- make_page_readonly(pud, XENFEAT_writable_page_tables); - spin_lock(&init_mm.page_table_lock); - pgd_populate(&init_mm, pgd, __va(pud_phys)); - spin_unlock(&init_mm.page_table_lock); -@@ -892,7 +895,7 @@ void __init paging_init(void) - - free_area_init_nodes(max_zone_pfns); - -- SetPagePinned(virt_to_page(init_mm.pgd)); -+ xen_init_pgd_pin(); - } - - /* ---- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-03-17 14:26:03.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-03-17 14:34:34.000000000 +0100 -@@ -66,16 +66,16 @@ early_param("userpte", setup_userpte); - void __pte_free(pgtable_t pte) - { - if (!PageHighMem(pte)) { -- unsigned long va = (unsigned long)page_address(pte); -- unsigned int level; -- pte_t *ptep = lookup_address(va, &level); -- -- BUG_ON(!ptep || level != PG_LEVEL_4K || !pte_present(*ptep)); -- if (!pte_write(*ptep) -- && HYPERVISOR_update_va_mapping(va, -- mk_pte(pte, PAGE_KERNEL), -- 0)) -- BUG(); -+ if (PagePinned(pte)) { -+ unsigned long pfn = page_to_pfn(pte); -+ -+ if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), -+ pfn_pte(pfn, -+ PAGE_KERNEL), -+ 0)) -+ BUG(); -+ ClearPagePinned(pte); -+ } - } else - #ifdef CONFIG_HIGHPTE - ClearPagePinned(pte); -@@ -117,14 +117,15 @@ pmd_t *pmd_alloc_one(struct mm_struct *m - - void __pmd_free(pgtable_t pmd) - { -- unsigned long va = (unsigned long)page_address(pmd); -- unsigned int level; -- pte_t *ptep = lookup_address(va, &level); -- -- BUG_ON(!ptep || level != PG_LEVEL_4K || !pte_present(*ptep)); -- if (!pte_write(*ptep) -- && HYPERVISOR_update_va_mapping(va, mk_pte(pmd, PAGE_KERNEL), 0)) -- BUG(); -+ if (PagePinned(pmd)) { -+ unsigned long pfn = page_to_pfn(pmd); -+ -+ if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), -+ pfn_pte(pfn, PAGE_KERNEL), -+ 0)) -+ BUG(); -+ ClearPagePinned(pmd); -+ } - - ClearPageForeign(pmd); - init_page_count(pmd); -@@ -212,21 +213,20 @@ static inline unsigned int pgd_walk_set_ - { - unsigned long pfn = page_to_pfn(page); - -- if (PageHighMem(page)) { -- if (pgprot_val(flags) & _PAGE_RW) -- ClearPagePinned(page); -- else -- SetPagePinned(page); -- } else { -- MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq, -- (unsigned long)__va(pfn << PAGE_SHIFT), -- pfn_pte(pfn, flags), 0); -- if (unlikely(++seq == PIN_BATCH)) { -- if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu), -- PIN_BATCH, NULL))) -- BUG(); -- seq = 0; -- } -+ if (pgprot_val(flags) & _PAGE_RW) -+ ClearPagePinned(page); -+ else -+ SetPagePinned(page); -+ if (PageHighMem(page)) -+ return seq; -+ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq, -+ (unsigned long)__va(pfn << PAGE_SHIFT), -+ pfn_pte(pfn, flags), 0); -+ if (unlikely(++seq == PIN_BATCH)) { -+ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu), -+ PIN_BATCH, NULL))) -+ BUG(); -+ seq = 0; - } - - return seq; -@@ -273,6 +273,16 @@ static void pgd_walk(pgd_t *pgd_base, pg - } - } - -+#ifdef CONFIG_X86_PAE -+ for (; g < PTRS_PER_PGD; g++, pgd++) { -+ BUG_ON(pgd_none(*pgd)); -+ pud = pud_offset(pgd, 0); -+ BUG_ON(pud_none(*pud)); -+ pmd = pmd_offset(pud, 0); -+ seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq); -+ } -+#endif -+ - mcl = per_cpu(pb_mcl, cpu); - #ifdef CONFIG_X86_64 - if (unlikely(seq > PIN_BATCH - 2)) { -@@ -308,6 +318,51 @@ static void pgd_walk(pgd_t *pgd_base, pg - put_cpu(); - } - -+void __init xen_init_pgd_pin(void) -+{ -+ pgd_t *pgd = init_mm.pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ unsigned int g, u, m; -+ -+ if (xen_feature(XENFEAT_auto_translated_physmap)) -+ return; -+ -+ SetPagePinned(virt_to_page(pgd)); -+ for (g = 0; g < PTRS_PER_PGD; g++, pgd++) { -+#ifndef CONFIG_X86_PAE -+ if (g >= pgd_index(HYPERVISOR_VIRT_START) -+ && g <= pgd_index(HYPERVISOR_VIRT_END - 1)) -+ continue; -+#endif -+ if (!pgd_present(*pgd)) -+ continue; -+ pud = pud_offset(pgd, 0); -+ if (PTRS_PER_PUD > 1) /* not folded */ -+ SetPagePinned(virt_to_page(pud)); -+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) { -+ if (!pud_present(*pud)) -+ continue; -+ pmd = pmd_offset(pud, 0); -+ if (PTRS_PER_PMD > 1) /* not folded */ -+ SetPagePinned(virt_to_page(pmd)); -+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) { -+#ifdef CONFIG_X86_PAE -+ if (g == pgd_index(HYPERVISOR_VIRT_START) -+ && m >= pmd_index(HYPERVISOR_VIRT_START)) -+ continue; -+#endif -+ if (!pmd_present(*pmd)) -+ continue; -+ SetPagePinned(pmd_page(*pmd)); -+ } -+ } -+ } -+#ifdef CONFIG_X86_64 -+ SetPagePinned(virt_to_page(level3_user_pgt)); -+#endif -+} -+ - static void __pgd_pin(pgd_t *pgd) - { - pgd_walk(pgd, PAGE_KERNEL_RO); -@@ -506,21 +561,18 @@ static void pgd_dtor(pgd_t *pgd) - - void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) - { -- struct page *page = virt_to_page(pmd); -- unsigned long pfn = page_to_pfn(page); -- -- paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); -- - /* Note: almost everything apart from _PAGE_PRESENT is - reserved at the pmd (PDPT) level. */ -- if (PagePinned(virt_to_page(mm->pgd))) { -- BUG_ON(PageHighMem(page)); -- BUG_ON(HYPERVISOR_update_va_mapping( -- (unsigned long)__va(pfn << PAGE_SHIFT), -- pfn_pte(pfn, PAGE_KERNEL_RO), 0)); -- set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); -- } else -- *pudp = __pud(__pa(pmd) | _PAGE_PRESENT); -+ pud_t pud = __pud(__pa(pmd) | _PAGE_PRESENT); -+ -+ paravirt_alloc_pmd(mm, page_to_pfn(virt_to_page(pmd))); -+ -+ if (likely(!PagePinned(virt_to_page(pudp)))) { -+ *pudp = pud; -+ return; -+ } -+ -+ set_pud(pudp, pud); - - /* - * According to Intel App note "TLBs, Paging-Structure Caches, -@@ -615,13 +667,10 @@ static void pgd_prepopulate_pmd(struct m - i++, pud++, addr += PUD_SIZE) { - pmd_t *pmd = pmds[i]; - -- if (i >= KERNEL_PGD_BOUNDARY) { -+ if (i >= KERNEL_PGD_BOUNDARY) - memcpy(pmd, - (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), - sizeof(pmd_t) * PTRS_PER_PMD); -- make_lowmem_page_readonly( -- pmd, XENFEAT_writable_page_tables); -- } - - /* It is safe to poke machine addresses of pmds under the pgd_lock. */ - pud_populate(mm, pud, pmd); diff --git a/patches.xen/xen-x86_64-dump-user-pgt b/patches.xen/xen-x86_64-dump-user-pgt deleted file mode 100644 index e31273b..0000000 --- a/patches.xen/xen-x86_64-dump-user-pgt +++ /dev/null @@ -1,51 +0,0 @@ -From: jbeulich@novell.com -Subject: dump the correct page tables for user mode faults -Patch-mainline: obsolete - ---- head-2011-03-17.orig/arch/x86/mm/fault-xen.c 2011-03-17 14:22:21.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/fault-xen.c 2011-03-17 14:35:18.000000000 +0100 -@@ -345,6 +345,7 @@ static void dump_pagetable(unsigned long - out: - printk(KERN_CONT "\n"); - } -+#define dump_pagetable(addr, krnl) dump_pagetable(addr) - - #else /* CONFIG_X86_64: */ - -@@ -449,7 +450,7 @@ static int bad_address(void *p) - return probe_kernel_address((unsigned long *)p, dummy); - } - --static void dump_pagetable(unsigned long address) -+static void dump_pagetable(unsigned long address, bool kernel) - { - pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); - pgd_t *pgd = base + pgd_index(address); -@@ -457,6 +458,9 @@ static void dump_pagetable(unsigned long - pmd_t *pmd; - pte_t *pte; - -+ if (!kernel) -+ pgd = __user_pgd(base) + pgd_index(address); -+ - if (bad_address(pgd)) - goto bad; - -@@ -595,7 +599,7 @@ show_fault_oops(struct pt_regs *regs, un - printk(KERN_ALERT "IP:"); - printk_address(regs->ip, 1); - -- dump_pagetable(address); -+ dump_pagetable(address, !(error_code & PF_USER)); - } - - static noinline void -@@ -612,7 +616,7 @@ pgtable_bad(struct pt_regs *regs, unsign - - printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", - tsk->comm, address); -- dump_pagetable(address); -+ dump_pagetable(address, !(error_code & PF_USER)); - - tsk->thread.cr2 = address; - tsk->thread.trap_no = 14; diff --git a/patches.xen/xen-x86_64-note-init-p2m b/patches.xen/xen-x86_64-note-init-p2m deleted file mode 100644 index cee1511..0000000 --- a/patches.xen/xen-x86_64-note-init-p2m +++ /dev/null @@ -1,343 +0,0 @@ -From: jbeulich@novell.com -Subject: eliminate scalability issues from initial mapping setup -Patch-mainline: obsolete -References: bnc#417417 - -Direct Xen to place the initial P->M table outside of the initial -mapping, as otherwise the 1G (implementation) / 2G (theoretical) -restriction on the size of the initial mapping limits the amount -of memory a domain can be handed initially. - -Note that the flags passed to HYPERVISOR_update_va_mapping() from -__make_page_writable() and make_lowmem_page_writable() are -intentionally not including UVMF_ALL. This is intended to be on optimal -choice between the overhead of a potential spurious page fault (as -remote CPUs may still have read-only translations in their TLBs) and -the overhead of cross processor flushes. Flushing on the local CPU -shouldn't be as expensive (and hence can be viewed as an optimization -avoiding the spurious page fault on the local CPU), but is required -when the functions are used before the page fault handler gets set up. - ---- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-02-01 15:09:47.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-02-03 14:42:41.000000000 +0100 -@@ -124,6 +124,14 @@ void __init x86_64_start_reservations(ch - - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); - -+ if (xen_feature(XENFEAT_auto_translated_physmap)) -+ xen_start_info->mfn_list = ~0UL; -+ else if (xen_start_info->mfn_list < __START_KERNEL_map) -+ memblock_x86_reserve_range(xen_start_info->first_p2m_pfn << PAGE_SHIFT, -+ (xen_start_info->first_p2m_pfn -+ + xen_start_info->nr_p2m_frames) << PAGE_SHIFT, -+ "INITP2M"); -+ - /* - * At this point everything still needed from the boot loader - * or BIOS or kernel text should be early reserved or marked not ---- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:36.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:41.000000000 +0100 -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -146,6 +147,7 @@ ENTRY(empty_zero_page) - ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad startup_64) - ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad hypercall_page) - ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT, _PAGE_PRESENT) -+ ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad VMEMMAP_START) - ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel") - ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") - ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1) ---- head-2011-03-17.orig/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:11.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:41.000000000 +0100 -@@ -1173,7 +1173,7 @@ void __init setup_arch(char **cmdline_p) - difference = xen_start_info->nr_pages - max_pfn; - - set_xen_guest_handle(reservation.extent_start, -- ((unsigned long *)xen_start_info->mfn_list) + max_pfn); -+ phys_to_machine_mapping + max_pfn); - reservation.nr_extents = difference; - ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, - &reservation); -@@ -1190,14 +1190,86 @@ void __init setup_arch(char **cmdline_p) - phys_to_machine_mapping = alloc_bootmem_pages( - max_pfn * sizeof(unsigned long)); - memcpy(phys_to_machine_mapping, -- (unsigned long *)xen_start_info->mfn_list, -+ __va(__pa(xen_start_info->mfn_list)), - p2m_pages * sizeof(unsigned long)); - memset(phys_to_machine_mapping + p2m_pages, ~0, - (max_pfn - p2m_pages) * sizeof(unsigned long)); -- free_bootmem( -- __pa(xen_start_info->mfn_list), -- PFN_PHYS(PFN_UP(xen_start_info->nr_pages * -- sizeof(unsigned long)))); -+ -+#ifdef CONFIG_X86_64 -+ if (xen_start_info->mfn_list == VMEMMAP_START) { -+ /* -+ * Since it is well isolated we can (and since it is -+ * perhaps large we should) also free the page tables -+ * mapping the initial P->M table. -+ */ -+ unsigned long va = VMEMMAP_START, pa; -+ pgd_t *pgd = pgd_offset_k(va); -+ pud_t *pud_page = pud_offset(pgd, 0); -+ -+ BUILD_BUG_ON(VMEMMAP_START & ~PGDIR_MASK); -+ xen_l4_entry_update(pgd, __pgd(0)); -+ for(;;) { -+ pud_t *pud = pud_page + pud_index(va); -+ -+ if (pud_none(*pud)) -+ va += PUD_SIZE; -+ else if (pud_large(*pud)) { -+ pa = pud_val(*pud) & PHYSICAL_PAGE_MASK; -+ make_pages_writable(__va(pa), -+ PUD_SIZE >> PAGE_SHIFT, -+ XENFEAT_writable_page_tables); -+ free_bootmem(pa, PUD_SIZE); -+ va += PUD_SIZE; -+ } else { -+ pmd_t *pmd = pmd_offset(pud, va); -+ -+ if (pmd_large(*pmd)) { -+ pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; -+ make_pages_writable(__va(pa), -+ PMD_SIZE >> PAGE_SHIFT, -+ XENFEAT_writable_page_tables); -+ free_bootmem(pa, PMD_SIZE); -+ } else if (!pmd_none(*pmd)) { -+ pte_t *pte = pte_offset_kernel(pmd, va); -+ -+ for (i = 0; i < PTRS_PER_PTE; ++i) { -+ if (pte_none(pte[i])) -+ break; -+ pa = pte_pfn(pte[i]) << PAGE_SHIFT; -+ make_page_writable(__va(pa), -+ XENFEAT_writable_page_tables); -+ free_bootmem(pa, PAGE_SIZE); -+ } -+ ClearPagePinned(virt_to_page(pte)); -+ make_page_writable(pte, -+ XENFEAT_writable_page_tables); -+ free_bootmem(__pa(pte), PAGE_SIZE); -+ } -+ va += PMD_SIZE; -+ if (pmd_index(va)) -+ continue; -+ ClearPagePinned(virt_to_page(pmd)); -+ make_page_writable(pmd, -+ XENFEAT_writable_page_tables); -+ free_bootmem(__pa((unsigned long)pmd -+ & PAGE_MASK), -+ PAGE_SIZE); -+ } -+ if (!pud_index(va)) -+ break; -+ } -+ ClearPagePinned(virt_to_page(pud_page)); -+ make_page_writable(pud_page, -+ XENFEAT_writable_page_tables); -+ free_bootmem(__pa((unsigned long)pud_page & PAGE_MASK), -+ PAGE_SIZE); -+ } else if (!WARN_ON(xen_start_info->mfn_list -+ < __START_KERNEL_map)) -+#endif -+ free_bootmem(__pa(xen_start_info->mfn_list), -+ PFN_PHYS(PFN_UP(xen_start_info->nr_pages * -+ sizeof(unsigned long)))); -+ - - /* - * Initialise the list of the frames that specify the list of ---- head-2011-03-17.orig/arch/x86/mm/init-xen.c 2011-02-01 15:41:35.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/init-xen.c 2011-02-03 14:42:41.000000000 +0100 -@@ -340,9 +340,22 @@ unsigned long __init_refok init_memory_m - - __flush_tlb_all(); - -- if (!after_bootmem && e820_table_top > e820_table_start) -+ if (!after_bootmem && e820_table_top > e820_table_start) { -+#ifdef CONFIG_X86_64 -+ if (xen_start_info->mfn_list < __START_KERNEL_map -+ && e820_table_start <= xen_start_info->first_p2m_pfn -+ && e820_table_top > xen_start_info->first_p2m_pfn) { -+ memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, -+ xen_start_info->first_p2m_pfn -+ << PAGE_SHIFT, -+ "PGTABLE"); -+ e820_table_start = xen_start_info->first_p2m_pfn -+ + xen_start_info->nr_p2m_frames; -+ } -+#endif - memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, - e820_table_top << PAGE_SHIFT, "PGTABLE"); -+ } - - if (!after_bootmem) - early_memtest(start, end); ---- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:36.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:41.000000000 +0100 -@@ -220,6 +220,17 @@ void sync_global_pgds(unsigned long star - } - } - -+static __init unsigned long get_table_end(void) -+{ -+ BUG_ON(!e820_table_end); -+ if (xen_start_info->mfn_list < __START_KERNEL_map -+ && e820_table_end == xen_start_info->first_p2m_pfn) { -+ e820_table_end += xen_start_info->nr_p2m_frames; -+ e820_table_top += xen_start_info->nr_p2m_frames; -+ } -+ return e820_table_end++; -+} -+ - /* - * NOTE: This function is marked __ref because it calls __init function - * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. -@@ -231,8 +242,7 @@ static __ref void *spp_getpage(void) - if (after_bootmem) - ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); - else if (e820_table_end < e820_table_top) { -- ptr = __va(e820_table_end << PAGE_SHIFT); -- e820_table_end++; -+ ptr = __va(get_table_end() << PAGE_SHIFT); - clear_page(ptr); - } else - ptr = alloc_bootmem_pages(PAGE_SIZE); -@@ -427,8 +437,7 @@ static __ref void *alloc_low_page(unsign - return adr; - } - -- BUG_ON(!e820_table_end); -- pfn = e820_table_end++; -+ pfn = get_table_end(); - if (pfn >= e820_table_top) - panic("alloc_low_page: ran out of memory"); - -@@ -454,14 +463,29 @@ static inline int __meminit make_readonl - /* Make new page tables read-only on the first pass. */ - if (!xen_feature(XENFEAT_writable_page_tables) - && !max_pfn_mapped -- && (paddr >= (e820_table_start << PAGE_SHIFT)) -- && (paddr < (e820_table_top << PAGE_SHIFT))) -- readonly = 1; -+ && (paddr >= (e820_table_start << PAGE_SHIFT))) { -+ unsigned long top = e820_table_top; -+ -+ /* Account for the range get_table_end() skips. */ -+ if (xen_start_info->mfn_list < __START_KERNEL_map -+ && e820_table_end <= xen_start_info->first_p2m_pfn -+ && top > xen_start_info->first_p2m_pfn) -+ top += xen_start_info->nr_p2m_frames; -+ if (paddr < (top << PAGE_SHIFT)) -+ readonly = 1; -+ } - /* Make old page tables read-only. */ - if (!xen_feature(XENFEAT_writable_page_tables) - && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map)) - && (paddr < (e820_table_end << PAGE_SHIFT))) - readonly = 1; -+ /* Make P->M table (and its page tables) read-only. */ -+ if (!xen_feature(XENFEAT_writable_page_tables) -+ && xen_start_info->mfn_list < __START_KERNEL_map -+ && paddr >= (xen_start_info->first_p2m_pfn << PAGE_SHIFT) -+ && paddr < (xen_start_info->first_p2m_pfn -+ + xen_start_info->nr_p2m_frames) << PAGE_SHIFT) -+ readonly = 1; - - /* - * No need for writable mapping of kernel image. This also ensures that -@@ -761,6 +785,12 @@ void __init xen_init_pt(void) - (PTRS_PER_PUD - pud_index(__START_KERNEL_map)) - * sizeof(*level3_kernel_pgt)); - -+ /* Copy the initial P->M table mappings if necessary. */ -+ addr = pgd_index(xen_start_info->mfn_list); -+ if (addr < pgd_index(__START_KERNEL_map)) -+ init_level4_pgt[addr] = -+ ((pgd_t *)xen_start_info->pt_base)[addr]; -+ - /* Do an early initialization of the fixmap area. */ - addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE); - if (pud_present(level3_kernel_pgt[pud_index(addr)])) { -@@ -792,22 +822,27 @@ void __init xen_init_pt(void) - void __init xen_finish_init_mapping(void) - { - unsigned long start, end; -+ struct mmuext_op mmuext; - - /* Re-vector virtual addresses pointing into the initial - mapping to the just-established permanent ones. */ - xen_start_info = __va(__pa(xen_start_info)); - xen_start_info->pt_base = (unsigned long) - __va(__pa(xen_start_info->pt_base)); -- if (!xen_feature(XENFEAT_auto_translated_physmap)) { -+ if (!xen_feature(XENFEAT_auto_translated_physmap) -+ && xen_start_info->mfn_list >= __START_KERNEL_map) - phys_to_machine_mapping = - __va(__pa(xen_start_info->mfn_list)); -- xen_start_info->mfn_list = (unsigned long) -- phys_to_machine_mapping; -- } - if (xen_start_info->mod_start) - xen_start_info->mod_start = (unsigned long) - __va(__pa(xen_start_info->mod_start)); - -+ /* Unpin the no longer used Xen provided page tables. */ -+ mmuext.cmd = MMUEXT_UNPIN_TABLE; -+ mmuext.arg1.mfn = virt_to_mfn(xen_start_info->pt_base); -+ if (HYPERVISOR_mmuext_op(&mmuext, 1, NULL, DOMID_SELF)) -+ BUG(); -+ - /* Destroy the Xen-created mappings beyond the kernel image. */ - start = PAGE_ALIGN(_brk_end); - end = __START_KERNEL_map + (e820_table_start << PAGE_SHIFT); ---- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-03-17 14:33:38.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-03-17 14:35:24.000000000 +0100 -@@ -1500,7 +1500,7 @@ static void __make_page_writable(unsigne - - pte = lookup_address(va, &level); - BUG_ON(!pte || level != PG_LEVEL_4K); -- if (HYPERVISOR_update_va_mapping(va, pte_mkwrite(*pte), 0)) -+ if (HYPERVISOR_update_va_mapping(va, pte_mkwrite(*pte), UVMF_INVLPG)) - BUG(); - if (in_secondary_range(va)) { - unsigned long pfn = pte_pfn(*pte); ---- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2011-03-17 14:35:10.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-02-03 14:42:41.000000000 +0100 -@@ -344,7 +344,7 @@ void __init xen_init_pgd_pin(void) - if (PTRS_PER_PUD > 1) /* not folded */ - SetPagePinned(virt_to_page(pud)); - for (u = 0; u < PTRS_PER_PUD; u++, pud++) { -- if (!pud_present(*pud)) -+ if (!pud_present(*pud) || pud_large(*pud)) - continue; - pmd = pmd_offset(pud, 0); - if (PTRS_PER_PMD > 1) /* not folded */ -@@ -355,7 +355,7 @@ void __init xen_init_pgd_pin(void) - && m >= pmd_index(HYPERVISOR_VIRT_START)) - continue; - #endif -- if (!pmd_present(*pmd)) -+ if (!pmd_present(*pmd) || pmd_large(*pmd)) - continue; - SetPagePinned(pmd_page(*pmd)); - } ---- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-02-01 15:03:10.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-02-03 14:42:41.000000000 +0100 -@@ -174,6 +174,6 @@ void make_lowmem_page_writable(void *va, - pte = lookup_address((unsigned long)va, &level); - BUG_ON(!pte || level != PG_LEVEL_4K || !pte_present(*pte)); - rc = HYPERVISOR_update_va_mapping( -- (unsigned long)va, pte_mkwrite(*pte), 0); -+ (unsigned long)va, pte_mkwrite(*pte), UVMF_INVLPG); - BUG_ON(rc); - } diff --git a/patches.xen/xen-x86_64-pgd-alloc-order b/patches.xen/xen-x86_64-pgd-alloc-order deleted file mode 100644 index fb5a46e..0000000 --- a/patches.xen/xen-x86_64-pgd-alloc-order +++ /dev/null @@ -1,337 +0,0 @@ -From: jbeulich@novell.com -Subject: don't require order-1 allocations for pgd-s -Patch-mainline: n/a - -At the same time remove the useless user mode pair of init_level4_pgt. - ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-11-23 16:31:40.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/hypervisor.h 2011-02-03 14:42:36.000000000 +0100 -@@ -106,8 +106,8 @@ void do_hypervisor_callback(struct pt_re - * be MACHINE addresses. - */ - --void xen_pt_switch(unsigned long ptr); --void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ -+void xen_pt_switch(pgd_t *); -+void xen_new_user_pt(pgd_t *); /* x86_64 only */ - void xen_load_gs(unsigned int selector); /* x86_64 only */ - void xen_tlb_flush(void); - void xen_invlpg(unsigned long ptr); -@@ -115,7 +115,7 @@ void xen_invlpg(unsigned long ptr); - void xen_l1_entry_update(pte_t *ptr, pte_t val); - void xen_l2_entry_update(pmd_t *ptr, pmd_t val); - void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ --void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val); /* x86_64 only */ -+void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ - void xen_pgd_pin(pgd_t *); - void xen_pgd_unpin(pgd_t *); - ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-08 10:25:49.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context.h 2011-02-08 10:46:27.000000000 +0100 -@@ -82,6 +82,9 @@ static inline void switch_mm(struct mm_s - { - unsigned cpu = smp_processor_id(); - struct mmuext_op _op[2 + (sizeof(long) > 4)], *op = _op; -+#ifdef CONFIG_X86_64 -+ pgd_t *upgd; -+#endif - - if (likely(prev != next)) { - BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && -@@ -98,10 +101,11 @@ static inline void switch_mm(struct mm_s - op->arg1.mfn = virt_to_mfn(next->pgd); - op++; - -- /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */ -+ /* xen_new_user_pt(next->pgd) */ - #ifdef CONFIG_X86_64 - op->cmd = MMUEXT_NEW_USER_BASEPTR; -- op->arg1.mfn = virt_to_mfn(__user_pgd(next->pgd)); -+ upgd = __user_pgd(next->pgd); -+ op->arg1.mfn = likely(upgd) ? virt_to_mfn(upgd) : 0; - op++; - #endif - -@@ -132,7 +136,7 @@ static inline void switch_mm(struct mm_s - * to make sure to use no freed page tables. - */ - load_cr3(next->pgd); -- xen_new_user_pt(__pa(__user_pgd(next->pgd))); -+ xen_new_user_pt(next->pgd); - load_LDT_nolock(&next->context); - } - } ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-03 14:41:13.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc.h 2011-02-03 14:42:36.000000000 +0100 -@@ -123,15 +123,13 @@ static inline void pud_populate(struct m - #endif /* CONFIG_X86_PAE */ - - #if PAGETABLE_LEVELS > 3 --#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD) -- - static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) - { - pgd_t ent = __pgd(_PAGE_TABLE | __pa(pud)); - - paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); - if (unlikely(PagePinned(virt_to_page(pgd)))) -- xen_l4_entry_update(pgd, 1, ent); -+ xen_l4_entry_update(pgd, ent); - else - *__user_pgd(pgd) = *pgd = ent; - } ---- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:42:15.000000000 +0100 -+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-03 14:42:36.000000000 +0100 -@@ -111,18 +111,25 @@ static inline void xen_set_pud(pud_t *pu - : (void)(*__pudp = xen_make_pud(0)); \ - }) - --#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD) -+static inline pgd_t *__user_pgd(pgd_t *pgd) -+{ -+ if (unlikely(((unsigned long)pgd & PAGE_MASK) -+ == (unsigned long)init_level4_pgt)) -+ return NULL; -+ return (pgd_t *)(virt_to_page(pgd)->private -+ + ((unsigned long)pgd & ~PAGE_MASK)); -+} - - static inline void xen_set_pgd(pgd_t *pgdp, pgd_t pgd) - { -- xen_l4_entry_update(pgdp, 0, pgd); -+ xen_l4_entry_update(pgdp, pgd); - } - - #define xen_pgd_clear(pgd) \ - ({ \ - pgd_t *__pgdp = (pgd); \ - PagePinned(virt_to_page(__pgdp)) \ -- ? xen_l4_entry_update(__pgdp, 1, xen_make_pgd(0)) \ -+ ? xen_l4_entry_update(__pgdp, xen_make_pgd(0)) \ - : (void)(*__user_pgd(__pgdp) = *__pgdp = xen_make_pgd(0)); \ - }) - ---- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:44:07.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-03-17 14:44:15.000000000 +0100 -@@ -1064,8 +1064,7 @@ DEFINE_PER_CPU_FIRST(union irq_stack_uni - void xen_switch_pt(void) - { - #ifdef CONFIG_XEN -- xen_pt_switch(__pa_symbol(init_level4_pgt)); -- xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt))); -+ xen_pt_switch(init_level4_pgt); - #endif - } - ---- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-02-01 14:55:46.000000000 +0100 -+++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:36.000000000 +0100 -@@ -56,14 +56,6 @@ ENTRY(name) - __PAGE_ALIGNED_BSS - NEXT_PAGE(init_level4_pgt) - .fill 512,8,0 -- /* -- * We update two pgd entries to make kernel and user pgd consistent -- * at pgd_populate(). It can be used for kernel modules. So we place -- * this page here for those cases to avoid memory corruption. -- * We also use this page to establish the initial mapping for the -- * vsyscall area. -- */ -- .fill 512,8,0 - - NEXT_PAGE(level3_kernel_pgt) - .fill 512,8,0 ---- head-2011-03-17.orig/arch/x86/mm/hypervisor.c 2010-12-08 10:45:40.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/hypervisor.c 2011-02-03 14:42:36.000000000 +0100 -@@ -521,7 +521,7 @@ void xen_l3_entry_update(pud_t *ptr, pud - #endif - - #ifdef CONFIG_X86_64 --void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val) -+void xen_l4_entry_update(pgd_t *ptr, pgd_t val) - { - mmu_update_t u[2]; - struct page *page = NULL; -@@ -534,8 +534,11 @@ void xen_l4_entry_update(pgd_t *ptr, int - } - u[0].ptr = virt_to_machine(ptr); - u[0].val = __pgd_val(val); -- if (user) { -- u[1].ptr = virt_to_machine(__user_pgd(ptr)); -+ if (((unsigned long)ptr & ~PAGE_MASK) -+ <= pgd_index(TASK_SIZE_MAX) * sizeof(*ptr)) { -+ ptr = __user_pgd(ptr); -+ BUG_ON(!ptr); -+ u[1].ptr = virt_to_machine(ptr); - u[1].val = __pgd_val(val); - do_lN_entry_update(u, 2, page); - } else -@@ -543,21 +546,25 @@ void xen_l4_entry_update(pgd_t *ptr, int - } - #endif /* CONFIG_X86_64 */ - --void xen_pt_switch(unsigned long ptr) -+#ifdef CONFIG_X86_64 -+void xen_pt_switch(pgd_t *pgd) - { - struct mmuext_op op; - op.cmd = MMUEXT_NEW_BASEPTR; -- op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); -+ op.arg1.mfn = virt_to_mfn(pgd); - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); - } - --void xen_new_user_pt(unsigned long ptr) -+void xen_new_user_pt(pgd_t *pgd) - { - struct mmuext_op op; -+ -+ pgd = __user_pgd(pgd); - op.cmd = MMUEXT_NEW_USER_BASEPTR; -- op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); -+ op.arg1.mfn = pgd ? virt_to_mfn(pgd) : 0; - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); - } -+#endif - - void xen_tlb_flush(void) - { -@@ -634,7 +641,14 @@ void xen_pgd_pin(pgd_t *pgd) - op[0].arg1.mfn = virt_to_mfn(pgd); - #ifdef CONFIG_X86_64 - op[1].cmd = op[0].cmd = MMUEXT_PIN_L4_TABLE; -- op[1].arg1.mfn = virt_to_mfn(__user_pgd(pgd)); -+ pgd = __user_pgd(pgd); -+ if (pgd) -+ op[1].arg1.mfn = virt_to_mfn(pgd); -+ else { -+ op[1].cmd = MMUEXT_PIN_L3_TABLE; -+ op[1].arg1.mfn = pfn_to_mfn(__pa_symbol(level3_user_pgt) -+ >> PAGE_SHIFT); -+ } - #endif - if (HYPERVISOR_mmuext_op(op, NR_PGD_PIN_OPS, NULL, DOMID_SELF) < 0) - BUG(); -@@ -647,8 +661,10 @@ void xen_pgd_unpin(pgd_t *pgd) - op[0].cmd = MMUEXT_UNPIN_TABLE; - op[0].arg1.mfn = virt_to_mfn(pgd); - #ifdef CONFIG_X86_64 -+ pgd = __user_pgd(pgd); -+ BUG_ON(!pgd); - op[1].cmd = MMUEXT_UNPIN_TABLE; -- op[1].arg1.mfn = virt_to_mfn(__user_pgd(pgd)); -+ op[1].arg1.mfn = virt_to_mfn(pgd); - #endif - if (HYPERVISOR_mmuext_op(op, NR_PGD_PIN_OPS, NULL, DOMID_SELF) < 0) - BUG(); ---- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2010-11-23 16:31:40.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:36.000000000 +0100 -@@ -761,9 +761,6 @@ void __init xen_init_pt(void) - (PTRS_PER_PUD - pud_index(__START_KERNEL_map)) - * sizeof(*level3_kernel_pgt)); - -- __user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] = -- __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE); -- - /* Do an early initialization of the fixmap area. */ - addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE); - if (pud_present(level3_kernel_pgt[pud_index(addr)])) { -@@ -779,8 +776,6 @@ void __init xen_init_pt(void) - - early_make_page_readonly(init_level4_pgt, - XENFEAT_writable_page_tables); -- early_make_page_readonly(__user_pgd(init_level4_pgt), -- XENFEAT_writable_page_tables); - early_make_page_readonly(level3_kernel_pgt, - XENFEAT_writable_page_tables); - early_make_page_readonly(level3_user_pgt, ---- head-2011-03-17.orig/arch/x86/mm/pgtable-xen.c 2010-11-23 16:31:40.000000000 +0100 -+++ head-2011-03-17/arch/x86/mm/pgtable-xen.c 2011-03-17 14:35:10.000000000 +0100 -@@ -291,9 +291,11 @@ static void pgd_walk(pgd_t *pgd_base, pg - BUG(); - seq = 0; - } -+ pgd = __user_pgd(pgd_base); -+ BUG_ON(!pgd); - MULTI_update_va_mapping(mcl + seq, -- (unsigned long)__user_pgd(pgd_base), -- pfn_pte(virt_to_phys(__user_pgd(pgd_base))>>PAGE_SHIFT, flags), -+ (unsigned long)pgd, -+ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, flags), - 0); - MULTI_update_va_mapping(mcl + seq + 1, - (unsigned long)pgd_base, -@@ -689,19 +691,37 @@ static void pgd_prepopulate_pmd(struct m - } - } - -+static inline pgd_t *user_pgd_alloc(pgd_t *pgd) -+{ - #ifdef CONFIG_X86_64 --/* We allocate two contiguous pages for kernel and user. */ --#define PGD_ORDER 1 --#else --#define PGD_ORDER 0 -+ if (pgd) { -+ pgd_t *upgd = (void *)__get_free_page(PGALLOC_GFP); -+ -+ if (upgd) -+ set_page_private(virt_to_page(pgd), -+ (unsigned long)upgd); -+ else { -+ free_page((unsigned long)pgd); -+ pgd = NULL; -+ } -+ } -+#endif -+ return pgd; -+} -+ -+static inline void user_pgd_free(pgd_t *pgd) -+{ -+#ifdef CONFIG_X86_64 -+ free_page(page_private(virt_to_page(pgd))); - #endif -+} - - pgd_t *pgd_alloc(struct mm_struct *mm) - { - pgd_t *pgd; - pmd_t *pmds[PREALLOCATED_PMDS]; - -- pgd = (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ORDER); -+ pgd = user_pgd_alloc((void *)__get_free_page(PGALLOC_GFP)); - - if (pgd == NULL) - goto out; -@@ -740,7 +760,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) - out_free_pmds: - free_pmds(pmds, mm, !xen_feature(XENFEAT_pae_pgdir_above_4gb)); - out_free_pgd: -- free_pages((unsigned long)pgd, PGD_ORDER); -+ user_pgd_free(pgd); -+ free_page((unsigned long)pgd); - out: - return NULL; - } -@@ -759,7 +780,8 @@ void pgd_free(struct mm_struct *mm, pgd_ - - pgd_mop_up_pmds(mm, pgd); - paravirt_pgd_free(mm, pgd); -- free_pages((unsigned long)pgd, PGD_ORDER); -+ user_pgd_free(pgd); -+ free_page((unsigned long)pgd); - } - - /* blktap and gntdev need this, as otherwise they would implicitly (and ---- head-2011-03-17.orig/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:15.000000000 +0100 -+++ head-2011-03-17/drivers/xen/core/machine_reboot.c 2011-02-03 14:42:36.000000000 +0100 -@@ -186,8 +186,7 @@ static int take_machine_down(void *_susp - * in fast-suspend mode as that implies a new enough Xen. - */ - if (!suspend->fast_suspend) -- xen_new_user_pt(__pa(__user_pgd( -- current->active_mm->pgd))); -+ xen_new_user_pt(current->active_mm->pgd); - #endif - } - diff --git a/patches.xen/xen-x86_64-pgd-pin b/patches.xen/xen-x86_64-pgd-pin deleted file mode 100644 index b025956..0000000 --- a/patches.xen/xen-x86_64-pgd-pin +++ /dev/null @@ -1,111 +0,0 @@ -From: jbeulich@novell.com -Subject: make pinning of pgd pairs transparent to callers -Patch-mainline: obsolete - ---- head-2010-12-08.orig/arch/x86/include/mach-xen/asm/hypervisor.h 2010-11-23 16:30:41.000000000 +0100 -+++ head-2010-12-08/arch/x86/include/mach-xen/asm/hypervisor.h 2010-11-23 16:31:40.000000000 +0100 -@@ -116,8 +116,8 @@ void xen_l1_entry_update(pte_t *ptr, pte - void xen_l2_entry_update(pmd_t *ptr, pmd_t val); - void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ - void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val); /* x86_64 only */ --void xen_pgd_pin(unsigned long ptr); --void xen_pgd_unpin(unsigned long ptr); -+void xen_pgd_pin(pgd_t *); -+void xen_pgd_unpin(pgd_t *); - - void xen_init_pgd_pin(void); - ---- head-2010-12-08.orig/arch/x86/mm/hypervisor.c 2010-11-23 16:30:41.000000000 +0100 -+++ head-2010-12-08/arch/x86/mm/hypervisor.c 2010-12-08 10:45:40.000000000 +0100 -@@ -620,26 +620,38 @@ EXPORT_SYMBOL_GPL(xen_invlpg_mask); - - #endif /* CONFIG_SMP */ - --void xen_pgd_pin(unsigned long ptr) --{ -- struct mmuext_op op; - #ifdef CONFIG_X86_64 -- op.cmd = MMUEXT_PIN_L4_TABLE; --#elif defined(CONFIG_X86_PAE) -- op.cmd = MMUEXT_PIN_L3_TABLE; -+#define NR_PGD_PIN_OPS 2 - #else -- op.cmd = MMUEXT_PIN_L2_TABLE; -+#define NR_PGD_PIN_OPS 1 - #endif -- op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); -- BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); -+ -+void xen_pgd_pin(pgd_t *pgd) -+{ -+ struct mmuext_op op[NR_PGD_PIN_OPS]; -+ -+ op[0].cmd = MMUEXT_PIN_L3_TABLE; -+ op[0].arg1.mfn = virt_to_mfn(pgd); -+#ifdef CONFIG_X86_64 -+ op[1].cmd = op[0].cmd = MMUEXT_PIN_L4_TABLE; -+ op[1].arg1.mfn = virt_to_mfn(__user_pgd(pgd)); -+#endif -+ if (HYPERVISOR_mmuext_op(op, NR_PGD_PIN_OPS, NULL, DOMID_SELF) < 0) -+ BUG(); - } - --void xen_pgd_unpin(unsigned long ptr) -+void xen_pgd_unpin(pgd_t *pgd) - { -- struct mmuext_op op; -- op.cmd = MMUEXT_UNPIN_TABLE; -- op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); -- BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); -+ struct mmuext_op op[NR_PGD_PIN_OPS]; -+ -+ op[0].cmd = MMUEXT_UNPIN_TABLE; -+ op[0].arg1.mfn = virt_to_mfn(pgd); -+#ifdef CONFIG_X86_64 -+ op[1].cmd = MMUEXT_UNPIN_TABLE; -+ op[1].arg1.mfn = virt_to_mfn(__user_pgd(pgd)); -+#endif -+ if (HYPERVISOR_mmuext_op(op, NR_PGD_PIN_OPS, NULL, DOMID_SELF) < 0) -+ BUG(); - } - - void xen_set_ldt(const void *ptr, unsigned int ents) ---- head-2010-12-08.orig/arch/x86/mm/init_64-xen.c 2010-11-23 16:30:41.000000000 +0100 -+++ head-2010-12-08/arch/x86/mm/init_64-xen.c 2010-11-23 16:31:40.000000000 +0100 -@@ -790,10 +790,8 @@ void __init xen_init_pt(void) - early_make_page_readonly(level1_fixmap_pgt, - XENFEAT_writable_page_tables); - -- if (!xen_feature(XENFEAT_writable_page_tables)) { -- xen_pgd_pin(__pa_symbol(init_level4_pgt)); -- xen_pgd_pin(__pa_symbol(__user_pgd(init_level4_pgt))); -- } -+ if (!xen_feature(XENFEAT_writable_page_tables)) -+ xen_pgd_pin(init_level4_pgt); - } - - void __init xen_finish_init_mapping(void) ---- head-2010-12-08.orig/arch/x86/mm/pgtable-xen.c 2010-04-15 11:48:29.000000000 +0200 -+++ head-2010-12-08/arch/x86/mm/pgtable-xen.c 2010-11-23 16:31:40.000000000 +0100 -@@ -368,19 +368,13 @@ static void __pgd_pin(pgd_t *pgd) - { - pgd_walk(pgd, PAGE_KERNEL_RO); - kmap_flush_unused(); -- xen_pgd_pin(__pa(pgd)); /* kernel */ --#ifdef CONFIG_X86_64 -- xen_pgd_pin(__pa(__user_pgd(pgd))); /* user */ --#endif -+ xen_pgd_pin(pgd); - SetPagePinned(virt_to_page(pgd)); - } - - static void __pgd_unpin(pgd_t *pgd) - { -- xen_pgd_unpin(__pa(pgd)); --#ifdef CONFIG_X86_64 -- xen_pgd_unpin(__pa(__user_pgd(pgd))); --#endif -+ xen_pgd_unpin(pgd); - pgd_walk(pgd, PAGE_KERNEL); - ClearPagePinned(virt_to_page(pgd)); - } diff --git a/patches.xen/xen-x86_64-unmapped-initrd b/patches.xen/xen-x86_64-unmapped-initrd deleted file mode 100644 index 94cc7b8..0000000 --- a/patches.xen/xen-x86_64-unmapped-initrd +++ /dev/null @@ -1,252 +0,0 @@ -From: jbeulich@novell.com -Subject: eliminate scalability issues from initrd handling -Patch-mainline: n/a - -Size restrictions native kernels wouldn't have resulted from the initrd -getting mapped into the initial mapping. The kernel doesn't really need -the initrd to be mapped, so use new infrastructure available in 4.1+ -Xen to avoid the mapping and hence the restriction. - ---- head-2011-01-30.orig/arch/x86/include/mach-xen/asm/setup.h 2011-02-01 14:54:13.000000000 +0100 -+++ head-2011-01-30/arch/x86/include/mach-xen/asm/setup.h 2011-02-03 14:42:45.000000000 +0100 -@@ -3,6 +3,13 @@ - void xen_start_kernel(void); - void xen_arch_setup(void); - -+#ifdef CONFIG_X86_64 -+void reserve_pfn_range(unsigned long pfn, unsigned long nr, char *); -+void reserve_pgtable_low(void); -+#endif -+ -+extern unsigned long xen_initrd_start; -+ - #endif - - #include_next ---- head-2011-01-30.orig/arch/x86/kernel/head-xen.c 2011-02-03 14:42:15.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/head-xen.c 2011-02-03 14:42:45.000000000 +0100 -@@ -75,6 +75,8 @@ extern void nmi(void); - #define CALLBACK_ADDR(fn) { __KERNEL_CS, (unsigned long)(fn) } - #endif - -+unsigned long __initdata xen_initrd_start; -+ - unsigned long *__read_mostly machine_to_phys_mapping = - (void *)MACH2PHYS_VIRT_START; - EXPORT_SYMBOL(machine_to_phys_mapping); ---- head-2011-01-30.orig/arch/x86/kernel/head32-xen.c 2011-02-01 15:41:35.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/head32-xen.c 2011-02-03 14:42:45.000000000 +0100 -@@ -89,6 +89,11 @@ void __init i386_start_kernel(void) - break; - } - #else -+#ifdef CONFIG_BLK_DEV_INITRD -+ BUG_ON(xen_start_info->flags & SIF_MOD_START_PFN); -+ if (xen_start_info->mod_start) -+ xen_initrd_start = __pa(xen_start_info->mod_start); -+#endif - { - int max_cmdline; - ---- head-2011-01-30.orig/arch/x86/kernel/head64-xen.c 2011-02-03 14:42:41.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/head64-xen.c 2011-02-03 14:42:45.000000000 +0100 -@@ -124,13 +124,23 @@ void __init x86_64_start_reservations(ch - - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); - -+#ifdef CONFIG_BLK_DEV_INITRD -+ /* Reserve INITRD if needed. */ -+ if (xen_start_info->flags & SIF_MOD_START_PFN) { -+ reserve_pfn_range(xen_start_info->mod_start, -+ PFN_UP(xen_start_info->mod_len), -+ "RAMDISK"); -+ xen_initrd_start = xen_start_info->mod_start << PAGE_SHIFT; -+ } else if (xen_start_info->mod_start) -+ xen_initrd_start = __pa(xen_start_info->mod_start); -+#endif -+ - if (xen_feature(XENFEAT_auto_translated_physmap)) - xen_start_info->mfn_list = ~0UL; - else if (xen_start_info->mfn_list < __START_KERNEL_map) -- memblock_x86_reserve_range(xen_start_info->first_p2m_pfn << PAGE_SHIFT, -- (xen_start_info->first_p2m_pfn -- + xen_start_info->nr_p2m_frames) << PAGE_SHIFT, -- "INITP2M"); -+ reserve_pfn_range(xen_start_info->first_p2m_pfn, -+ xen_start_info->nr_p2m_frames, -+ "INITP2M"); - - /* - * At this point everything still needed from the boot loader ---- head-2011-01-30.orig/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:41.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/head_64-xen.S 2011-02-03 14:42:45.000000000 +0100 -@@ -147,6 +147,7 @@ ENTRY(empty_zero_page) - ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad startup_64) - ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad hypercall_page) - ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT, _PAGE_PRESENT) -+ ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN, .long 1) - ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad VMEMMAP_START) - ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel") - ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") ---- head-2011-01-30.orig/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:41.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/setup-xen.c 2011-02-03 14:42:45.000000000 +0100 -@@ -406,7 +406,7 @@ static void __init relocate_initrd(void) - #else - printk(KERN_ERR "initrd extends beyond end of memory " - "(0x%08lx > 0x%08lx)\ndisabling initrd\n", -- __pa(xen_start_info->mod_start) + xen_start_info->mod_len, -+ xen_initrd_start + xen_start_info->mod_len, - max_low_pfn_mapped << PAGE_SHIFT); - initrd_start = 0; - #endif -@@ -425,7 +425,7 @@ static void __init reserve_initrd(void) - !ramdisk_image || !ramdisk_size) - return; /* No initrd provided by bootloader */ - #else -- unsigned long ramdisk_image = __pa(xen_start_info->mod_start); -+ unsigned long ramdisk_image = xen_initrd_start; - unsigned long ramdisk_size = xen_start_info->mod_len; - unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - unsigned long end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; ---- head-2011-01-30.orig/arch/x86/mm/init-xen.c 2011-02-03 14:42:41.000000000 +0100 -+++ head-2011-01-30/arch/x86/mm/init-xen.c 2011-02-03 14:42:45.000000000 +0100 -@@ -342,16 +342,7 @@ unsigned long __init_refok init_memory_m - - if (!after_bootmem && e820_table_top > e820_table_start) { - #ifdef CONFIG_X86_64 -- if (xen_start_info->mfn_list < __START_KERNEL_map -- && e820_table_start <= xen_start_info->first_p2m_pfn -- && e820_table_top > xen_start_info->first_p2m_pfn) { -- memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, -- xen_start_info->first_p2m_pfn -- << PAGE_SHIFT, -- "PGTABLE"); -- e820_table_start = xen_start_info->first_p2m_pfn -- + xen_start_info->nr_p2m_frames; -- } -+ reserve_pgtable_low(); - #endif - memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, - e820_table_top << PAGE_SHIFT, "PGTABLE"); ---- head-2011-01-30.orig/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:41.000000000 +0100 -+++ head-2011-01-30/arch/x86/mm/init_64-xen.c 2011-02-03 14:42:45.000000000 +0100 -@@ -220,13 +220,73 @@ void sync_global_pgds(unsigned long star - } - } - -+static struct reserved_pfn_range { -+ unsigned long pfn, nr; -+} reserved_pfn_ranges[3] __meminitdata; -+ -+void __init reserve_pfn_range(unsigned long pfn, unsigned long nr, char *name) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { -+ struct reserved_pfn_range *range = reserved_pfn_ranges + i; -+ -+ if (!range->nr) { -+ range->pfn = pfn; -+ range->nr = nr; -+ break; -+ } -+ BUG_ON(range->pfn < pfn + nr && pfn < range->pfn + range->nr); -+ if (range->pfn > pfn) { -+ i = ARRAY_SIZE(reserved_pfn_ranges) - 1; -+ if (reserved_pfn_ranges[i].nr) -+ continue; -+ for (; reserved_pfn_ranges + i > range; --i) -+ reserved_pfn_ranges[i] -+ = reserved_pfn_ranges[i - 1]; -+ range->pfn = pfn; -+ range->nr = nr; -+ break; -+ } -+ } -+ BUG_ON(i >= ARRAY_SIZE(reserved_pfn_ranges)); -+ memblock_x86_reserve_range(pfn << PAGE_SHIFT, -+ (pfn + nr) << PAGE_SHIFT, name); -+} -+ -+void __init reserve_pgtable_low(void) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { -+ struct reserved_pfn_range *range = reserved_pfn_ranges + i; -+ -+ if (!range->nr) -+ break; -+ if (e820_table_start <= range->pfn -+ && e820_table_top > range->pfn) { -+ memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, -+ range->pfn << PAGE_SHIFT, -+ "PGTABLE"); -+ e820_table_start = range->pfn + range->nr; -+ } -+ } -+} -+ - static __init unsigned long get_table_end(void) - { -+ unsigned int i; -+ - BUG_ON(!e820_table_end); -- if (xen_start_info->mfn_list < __START_KERNEL_map -- && e820_table_end == xen_start_info->first_p2m_pfn) { -- e820_table_end += xen_start_info->nr_p2m_frames; -- e820_table_top += xen_start_info->nr_p2m_frames; -+ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { -+ struct reserved_pfn_range *range = reserved_pfn_ranges + i; -+ -+ if (!range->nr) -+ break; -+ if (e820_table_end == range->pfn) { -+ e820_table_end += range->nr; -+ e820_table_top += range->nr; -+ } - } - return e820_table_end++; - } -@@ -465,14 +525,25 @@ static inline int __meminit make_readonl - && !max_pfn_mapped - && (paddr >= (e820_table_start << PAGE_SHIFT))) { - unsigned long top = e820_table_top; -+ unsigned int i; -+ -+ /* Account for the ranges get_table_end() skips. */ -+ for (i = 0; i < ARRAY_SIZE(reserved_pfn_ranges); ++i) { -+ const struct reserved_pfn_range *range; - -- /* Account for the range get_table_end() skips. */ -- if (xen_start_info->mfn_list < __START_KERNEL_map -- && e820_table_end <= xen_start_info->first_p2m_pfn -- && top > xen_start_info->first_p2m_pfn) -- top += xen_start_info->nr_p2m_frames; -+ range = reserved_pfn_ranges + i; -+ if (!range->nr) -+ continue; -+ if (e820_table_end <= range->pfn && top > range->pfn) { -+ if (paddr > (range->pfn << PAGE_SHIFT) -+ && paddr < ((range->pfn + range->nr) -+ << PAGE_SHIFT)) -+ break; -+ top += range->nr; -+ } -+ } - if (paddr < (top << PAGE_SHIFT)) -- readonly = 1; -+ readonly = (i >= ARRAY_SIZE(reserved_pfn_ranges)); - } - /* Make old page tables read-only. */ - if (!xen_feature(XENFEAT_writable_page_tables) -@@ -833,9 +904,6 @@ void __init xen_finish_init_mapping(void - && xen_start_info->mfn_list >= __START_KERNEL_map) - phys_to_machine_mapping = - __va(__pa(xen_start_info->mfn_list)); -- if (xen_start_info->mod_start) -- xen_start_info->mod_start = (unsigned long) -- __va(__pa(xen_start_info->mod_start)); - - /* Unpin the no longer used Xen provided page tables. */ - mmuext.cmd = MMUEXT_UNPIN_TABLE; diff --git a/patches.xen/xen3-auto-arch-i386.diff b/patches.xen/xen3-auto-arch-i386.diff deleted file mode 100644 index 4c0744d..0000000 --- a/patches.xen/xen3-auto-arch-i386.diff +++ /dev/null @@ -1,193 +0,0 @@ -Subject: xen3 arch-i386 -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - ---- head-2011-01-30.orig/arch/x86/kernel/asm-offsets_32.c 2011-02-01 13:47:44.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/asm-offsets_32.c 2011-01-31 14:54:00.000000000 +0100 -@@ -93,9 +93,14 @@ void foo(void) - OFFSET(pbe_orig_address, pbe, orig_address); - OFFSET(pbe_next, pbe, next); - -+#ifndef CONFIG_X86_NO_TSS - /* Offset from the sysenter stack to tss.sp0 */ -- DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - -+ DEFINE(SYSENTER_stack_sp0, offsetof(struct tss_struct, x86_tss.sp0) - - sizeof(struct tss_struct)); -+#else -+ /* sysenter stack points directly to sp0 */ -+ DEFINE(SYSENTER_stack_sp0, 0); -+#endif - - DEFINE(PAGE_SIZE_asm, PAGE_SIZE); - DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); ---- head-2011-01-30.orig/arch/x86/kernel/entry_32.S 2011-02-01 13:57:16.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/entry_32.S 2011-02-01 14:10:27.000000000 +0100 -@@ -375,7 +375,7 @@ ENTRY(ia32_sysenter_target) - CFI_SIGNAL_FRAME - CFI_DEF_CFA esp, 0 - CFI_REGISTER esp, ebp -- movl TSS_sysenter_sp0(%esp),%esp -+ movl SYSENTER_stack_sp0(%esp),%esp - sysenter_past_esp: - /* - * Interrupts are disabled here, but we can't trace it until -@@ -1307,7 +1307,7 @@ END(page_fault) - * that sets up the real kernel stack. Check here, since we can't - * allow the wrong stack to be used. - * -- * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have -+ * "SYSENTER_stack_sp0+12" is because the NMI/debug handler will have - * already pushed 3 words if it hits on the sysenter instruction: - * eflags, cs and eip. - * -@@ -1319,7 +1319,7 @@ END(page_fault) - cmpw $__KERNEL_CS, 4(%esp) - jne \ok - \label: -- movl TSS_sysenter_sp0 + \offset(%esp), %esp -+ movl SYSENTER_stack_sp0 + \offset(%esp), %esp - CFI_DEF_CFA esp, 0 - CFI_UNDEFINED eip - pushfl_cfi ---- head-2011-01-30.orig/arch/x86/kernel/machine_kexec_32.c 2010-01-19 14:51:07.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/machine_kexec_32.c 2011-01-31 14:54:00.000000000 +0100 -@@ -27,6 +27,10 @@ - #include - #include - -+#ifdef CONFIG_XEN -+#include -+#endif -+ - static void machine_kexec_free_page_tables(struct kimage *image) - { - free_page((unsigned long)image->arch.pgd); -@@ -97,6 +101,55 @@ static void machine_kexec_prepare_page_t - __pa(control_page), __pa(control_page)); - } - -+#ifdef CONFIG_XEN -+ -+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT) -+ -+#if PAGES_NR > KEXEC_XEN_NO_PAGES -+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break -+#endif -+ -+#if PA_CONTROL_PAGE != 0 -+#error PA_CONTROL_PAGE is non zero - Xen support will break -+#endif -+ -+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image) -+{ -+ void *control_page; -+ -+ memset(xki->page_list, 0, sizeof(xki->page_list)); -+ -+ control_page = page_address(image->control_code_page); -+ memcpy(control_page, relocate_kernel, PAGE_SIZE); -+ -+ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page); -+ xki->page_list[PA_PGD] = __ma(kexec_pgd); -+#ifdef CONFIG_X86_PAE -+ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0); -+ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1); -+#endif -+ xki->page_list[PA_PTE_0] = __ma(kexec_pte0); -+ xki->page_list[PA_PTE_1] = __ma(kexec_pte1); -+ -+} -+ -+int __init machine_kexec_setup_resources(struct resource *hypervisor, -+ struct resource *phys_cpus, -+ int nr_phys_cpus) -+{ -+ int k; -+ -+ /* The per-cpu crash note resources belong to the hypervisor resource */ -+ for (k = 0; k < nr_phys_cpus; k++) -+ request_resource(hypervisor, phys_cpus + k); -+ -+ return 0; -+} -+ -+void machine_kexec_register_resources(struct resource *res) { ; } -+ -+#endif /* CONFIG_XEN */ -+ - /* - * A architecture hook called to validate the - * proposed image and prepare the control pages -@@ -134,6 +187,7 @@ void machine_kexec_cleanup(struct kimage - machine_kexec_free_page_tables(image); - } - -+#ifndef CONFIG_XEN - /* - * Do not allocate memory (or fail in any way) in machine_kexec(). - * We are past the point of no return, committed to rebooting now. -@@ -199,6 +253,7 @@ void machine_kexec(struct kimage *image) - - __ftrace_enabled_restore(save_ftrace_enabled); - } -+#endif - - void arch_crash_save_vmcoreinfo(void) - { ---- head-2011-01-30.orig/arch/x86/kernel/vm86_32.c 2011-02-01 13:47:44.000000000 +0100 -+++ head-2011-01-30/arch/x86/kernel/vm86_32.c 2011-01-31 14:54:00.000000000 +0100 -@@ -125,7 +125,9 @@ static int copy_vm86_regs_from_user(stru - - struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) - { -+#ifndef CONFIG_X86_NO_TSS - struct tss_struct *tss; -+#endif - struct pt_regs *ret; - unsigned long tmp; - -@@ -148,12 +150,16 @@ struct pt_regs *save_v86_state(struct ke - do_exit(SIGSEGV); - } - -+#ifndef CONFIG_X86_NO_TSS - tss = &per_cpu(init_tss, get_cpu()); -+#endif - current->thread.sp0 = current->thread.saved_sp0; - current->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tss, ¤t->thread); - current->thread.saved_sp0 = 0; -+#ifndef CONFIG_X86_NO_TSS - put_cpu(); -+#endif - - ret = KVM86->regs32; - -@@ -280,7 +286,9 @@ out: - - static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) - { -+#ifndef CONFIG_X86_NO_TSS - struct tss_struct *tss; -+#endif - /* - * make sure the vm86() system call doesn't try to do anything silly - */ -@@ -324,12 +332,16 @@ static void do_sys_vm86(struct kernel_vm - tsk->thread.saved_fs = info->regs32->fs; - tsk->thread.saved_gs = get_user_gs(info->regs32); - -+#ifndef CONFIG_X86_NO_TSS - tss = &per_cpu(init_tss, get_cpu()); -+#endif - tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; - if (cpu_has_sep) - tsk->thread.sysenter_cs = 0; - load_sp0(tss, &tsk->thread); -+#ifndef CONFIG_X86_NO_TSS - put_cpu(); -+#endif - - tsk->thread.screen_bitmap = info->screen_bitmap; - if (info->flags & VM86_SCREEN_BITMAP) diff --git a/patches.xen/xen3-auto-arch-x86.diff b/patches.xen/xen3-auto-arch-x86.diff deleted file mode 100644 index dc067b0..0000000 --- a/patches.xen/xen3-auto-arch-x86.diff +++ /dev/null @@ -1,516 +0,0 @@ -Subject: xen3 arch-x86 -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - -List of files that don't require modification anymore (and hence -removed from this patch), for reference and in case upstream wants to -take the forward porting patches: -2.6.26/arch/x86/kernel/crash.c -2.6.30/arch/x86/kernel/acpi/boot.c - ---- - arch/x86/Makefile | 24 +++++++++++++++++++++++- - arch/x86/boot/Makefile | 9 +++++++++ - arch/x86/include/asm/acpi.h | 27 +++++++++++++++++++++++++++ - arch/x86/include/asm/apic.h | 2 ++ - arch/x86/include/asm/kexec.h | 13 +++++++++++++ - arch/x86/include/asm/topology.h | 2 +- - arch/x86/include/asm/types.h | 2 +- - arch/x86/kernel/Makefile | 9 +++++++++ - arch/x86/kernel/acpi/Makefile | 4 ++++ - arch/x86/kernel/cpu/mcheck/Makefile | 1 + - arch/x86/kernel/cpu/mcheck/mce.c | 21 +++++++++++++++++++++ - arch/x86/kernel/cpu/mtrr/Makefile | 1 + - arch/x86/lib/Makefile | 2 ++ - arch/x86/mm/Makefile | 2 ++ - arch/x86/oprofile/Makefile | 7 +++++++ - arch/x86/pci/Makefile | 3 +++ - arch/x86/power/cpu.c | 4 ++++ - arch/x86/vdso/Makefile | 2 ++ - arch/x86/vdso/vdso32-setup.c | 34 ++++++++++++++++++++++++++++++++++ - 19 files changed, 166 insertions(+), 3 deletions(-) - -Index: linux-2.6.38-master/arch/x86/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/Makefile -+++ linux-2.6.38-master/arch/x86/Makefile -@@ -116,6 +116,10 @@ endif - # prevent gcc from generating any FP code by mistake - KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) - -+# Xen subarch support -+mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-x86/mach-xen -+mcore-$(CONFIG_X86_XEN) := arch/x86/mach-xen/ -+ - KBUILD_CFLAGS += $(mflags-y) - KBUILD_AFLAGS += $(mflags-y) - -@@ -151,9 +155,26 @@ boot := arch/x86/boot - - BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage - --PHONY += bzImage $(BOOT_TARGETS) -+PHONY += bzImage vmlinuz $(BOOT_TARGETS) -+ -+ifdef CONFIG_XEN -+CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \ -+ -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS) -+ -+ifdef CONFIG_X86_64 -+LDFLAGS_vmlinux := -e startup_64 -+endif - - # Default kernel to build -+all: vmlinuz -+ -+# KBUILD_IMAGE specifies the target image being built -+KBUILD_IMAGE := $(boot)/vmlinuz -+ -+vmlinuz: vmlinux -+ $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) -+else -+# Default kernel to build - all: bzImage - - # KBUILD_IMAGE specify target image being built -@@ -169,6 +190,7 @@ endif - - $(BOOT_TARGETS): vmlinux - $(Q)$(MAKE) $(build)=$(boot) $@ -+endif - - PHONY += install - install: -Index: linux-2.6.38-master/arch/x86/boot/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/boot/Makefile -+++ linux-2.6.38-master/arch/x86/boot/Makefile -@@ -23,6 +23,7 @@ ROOT_DEV := CURRENT - SVGA_MODE := -DSVGA_MODE=NORMAL_VGA - - targets := vmlinux.bin setup.bin setup.elf bzImage -+targets += vmlinuz vmlinux-stripped - targets += fdimage fdimage144 fdimage288 image.iso mtools.conf - subdir- := compressed - -@@ -195,6 +196,14 @@ bzlilo: $(obj)/bzImage - cp System.map $(INSTALL_PATH)/ - if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi - -+$(obj)/vmlinuz: $(obj)/vmlinux-stripped FORCE -+ $(call if_changed,gzip) -+ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' -+ -+$(obj)/vmlinux-stripped: OBJCOPYFLAGS := -g --strip-unneeded -+$(obj)/vmlinux-stripped: vmlinux FORCE -+ $(call if_changed,objcopy) -+ - install: - sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ - System.map "$(INSTALL_PATH)" -Index: linux-2.6.38-master/arch/x86/kernel/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/kernel/Makefile -+++ linux-2.6.38-master/arch/x86/kernel/Makefile -@@ -110,9 +110,12 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) - - obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o - -+obj-$(CONFIG_X86_XEN) += fixup.o -+ - ### - # 64 bit specific files - ifeq ($(CONFIG_X86_64),y) -+ obj-$(CONFIG_X86_XEN_GENAPIC) += genapic_xen_64.o - obj-$(CONFIG_AUDIT) += audit_64.o - - obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o -@@ -121,4 +124,10 @@ ifeq ($(CONFIG_X86_64),y) - - obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o - obj-y += vsmp_64.o -+ -+ time_64-$(CONFIG_XEN) += time_32.o -+ pci-dma_64-$(CONFIG_XEN) += pci-dma_32.o - endif -+ -+disabled-obj-$(CONFIG_XEN) := i8259_$(BITS).o reboot.o smpboot_$(BITS).o -+%/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := -Index: linux-2.6.38-master/arch/x86/kernel/acpi/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/kernel/acpi/Makefile -+++ linux-2.6.38-master/arch/x86/kernel/acpi/Makefile -@@ -5,6 +5,9 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wake - - ifneq ($(CONFIG_ACPI_PROCESSOR),) - obj-y += cstate.o -+ifneq ($(CONFIG_PROCESSOR_EXTERNAL_CONTROL),) -+obj-$(CONFIG_XEN) += processor_extcntl_xen.o -+endif - endif - - $(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin -@@ -12,3 +15,4 @@ $(obj)/wakeup_rm.o: $(obj)/realmode/w - $(obj)/realmode/wakeup.bin: FORCE - $(Q)$(MAKE) $(build)=$(obj)/realmode - -+disabled-obj-$(CONFIG_XEN) := cstate.o wakeup_$(BITS).o -Index: linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/kernel/cpu/mcheck/Makefile -+++ linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/Makefile -@@ -3,6 +3,7 @@ obj-y = mce.o mce-severity.o - obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o - obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o - obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o -+obj-$(CONFIG_X86_XEN_MCE) += mce_dom0.o - obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o - obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o - -Index: linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/mce.c -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/kernel/cpu/mcheck/mce.c -+++ linux-2.6.38-master/arch/x86/kernel/cpu/mcheck/mce.c -@@ -1146,8 +1146,15 @@ void mce_log_therm_throt_event(__u64 sta - * Periodic polling timer for "silent" machine check errors. If the - * poller finds an MCE, poll 2x faster. When the poller finds no more - * errors, poll 2x slower (up to check_interval seconds). -+ * -+ * We will disable polling in DOM0 since all CMCI/Polling -+ * mechanism will be done in XEN for Intel CPUs - */ -+#if defined (CONFIG_X86_XEN_MCE) -+static int check_interval = 0; /* disable polling */ -+#else - static int check_interval = 5 * 60; /* 5 minutes */ -+#endif - - static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ - static DEFINE_PER_CPU(struct timer_list, mce_timer); -@@ -1312,6 +1319,7 @@ static int __cpuinit __mcheck_cpu_apply_ - - /* This should be disabled by the BIOS, but isn't always */ - if (c->x86_vendor == X86_VENDOR_AMD) { -+#ifndef CONFIG_XEN - if (c->x86 == 15 && banks > 4) { - /* - * disable GART TBL walk error reporting, which -@@ -1320,6 +1328,7 @@ static int __cpuinit __mcheck_cpu_apply_ - */ - clear_bit(10, (unsigned long *)&mce_banks[4].ctl); - } -+#endif - if (c->x86 <= 17 && mce_bootlog < 0) { - /* - * Lots of broken BIOS around that don't clear them -@@ -1387,6 +1396,7 @@ static void __cpuinit __mcheck_cpu_ancie - - static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) - { -+#ifndef CONFIG_X86_64_XEN - switch (c->x86_vendor) { - case X86_VENDOR_INTEL: - mce_intel_feature_init(c); -@@ -1397,6 +1407,7 @@ static void __mcheck_cpu_init_vendor(str - default: - break; - } -+#endif - } - - static void __mcheck_cpu_init_timer(void) -@@ -2142,6 +2153,16 @@ static __init int mcheck_init_device(voi - register_hotcpu_notifier(&mce_cpu_notifier); - misc_register(&mce_log_device); - -+#ifdef CONFIG_X86_XEN_MCE -+ if (is_initial_xendomain()) { -+ /* Register vIRQ handler for MCE LOG processing */ -+ extern void bind_virq_for_mce(void); -+ -+ printk(KERN_DEBUG "MCE: bind virq for DOM0 logging\n"); -+ bind_virq_for_mce(); -+ } -+#endif -+ - return err; - } - -Index: linux-2.6.38-master/arch/x86/kernel/cpu/mtrr/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/kernel/cpu/mtrr/Makefile -+++ linux-2.6.38-master/arch/x86/kernel/cpu/mtrr/Makefile -@@ -1,3 +1,4 @@ - obj-y := main.o if.o generic.o cleanup.o - obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o - -+obj-$(CONFIG_XEN) := main.o if.o -Index: linux-2.6.38-master/arch/x86/lib/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/lib/Makefile -+++ linux-2.6.38-master/arch/x86/lib/Makefile -@@ -43,3 +43,5 @@ else - lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o - lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o - endif -+ -+lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o -Index: linux-2.6.38-master/arch/x86/mm/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/mm/Makefile -+++ linux-2.6.38-master/arch/x86/mm/Makefile -@@ -26,6 +26,8 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BIT - obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o - obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o - -+obj-$(CONFIG_XEN) += hypervisor.o -+ - obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o - - obj-$(CONFIG_MEMTEST) += memtest.o -Index: linux-2.6.38-master/arch/x86/oprofile/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/oprofile/Makefile -+++ linux-2.6.38-master/arch/x86/oprofile/Makefile -@@ -6,7 +6,14 @@ DRIVER_OBJS = $(addprefix ../../../drive - oprofilefs.o oprofile_stats.o \ - timer_int.o ) - -+ifdef CONFIG_XEN -+XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \ -+ xenoprofile.o) -+oprofile-y := $(DRIVER_OBJS) \ -+ $(XENOPROF_COMMON_OBJS) xenoprof.o -+else - oprofile-y := $(DRIVER_OBJS) init.o backtrace.o - oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ - op_model_ppro.o op_model_p4.o - oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o -+endif -Index: linux-2.6.38-master/arch/x86/pci/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/pci/Makefile -+++ linux-2.6.38-master/arch/x86/pci/Makefile -@@ -5,6 +5,9 @@ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$ - obj-$(CONFIG_PCI_DIRECT) += direct.o - obj-$(CONFIG_PCI_OLPC) += olpc.o - obj-$(CONFIG_PCI_XEN) += xen.o -+# pcifront should be after mmconfig.o and direct.o as it should only -+# take over if direct access to the PCI bus is unavailable -+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o - - obj-y += fixup.o - obj-$(CONFIG_X86_INTEL_CE) += ce4100.o -Index: linux-2.6.38-master/arch/x86/power/cpu.c -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/power/cpu.c -+++ linux-2.6.38-master/arch/x86/power/cpu.c -@@ -129,6 +129,7 @@ static void do_fpu_end(void) - - static void fix_processor_context(void) - { -+#ifndef CONFIG_X86_NO_TSS - int cpu = smp_processor_id(); - struct tss_struct *t = &per_cpu(init_tss, cpu); - -@@ -141,7 +142,10 @@ static void fix_processor_context(void) - - #ifdef CONFIG_X86_64 - get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; -+#endif -+#endif - -+#ifdef CONFIG_X86_64 - syscall_init(); /* This sets MSR_*STAR and related */ - #endif - load_TR_desc(); /* This does ltr */ -Index: linux-2.6.38-master/arch/x86/include/asm/acpi.h -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/include/asm/acpi.h -+++ linux-2.6.38-master/arch/x86/include/asm/acpi.h -@@ -30,6 +30,10 @@ - #include - #include - -+#ifdef CONFIG_XEN -+#include -+#endif -+ - #define COMPILER_DEPENDENT_INT64 long long - #define COMPILER_DEPENDENT_UINT64 unsigned long long - -@@ -122,6 +126,27 @@ extern unsigned long acpi_wakeup_address - /* early initialization routine */ - extern void acpi_reserve_wakeup_memory(void); - -+#ifdef CONFIG_XEN -+static inline int acpi_notify_hypervisor_state(u8 sleep_state, -+ u32 pm1a_cnt_val, -+ u32 pm1b_cnt_val) -+{ -+ struct xen_platform_op op = { -+ .cmd = XENPF_enter_acpi_sleep, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ .u = { -+ .enter_acpi_sleep = { -+ .pm1a_cnt_val = pm1a_cnt_val, -+ .pm1b_cnt_val = pm1b_cnt_val, -+ .sleep_state = sleep_state, -+ }, -+ }, -+ }; -+ -+ return HYPERVISOR_platform_op(&op); -+} -+#endif /* CONFIG_XEN */ -+ - /* - * Check if the CPU can handle C2 and deeper - */ -@@ -180,7 +205,9 @@ static inline void disable_acpi(void) { - - #endif /* !CONFIG_ACPI */ - -+#ifndef CONFIG_XEN - #define ARCH_HAS_POWER_INIT 1 -+#endif - - struct bootnode; - -Index: linux-2.6.38-master/arch/x86/include/asm/apic.h -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/include/asm/apic.h -+++ linux-2.6.38-master/arch/x86/include/asm/apic.h -@@ -15,7 +15,9 @@ - #include - #include - -+#ifndef CONFIG_XEN - #define ARCH_APICTIMER_STOPS_ON_C3 1 -+#endif - - /* - * Debugging macros -Index: linux-2.6.38-master/arch/x86/include/asm/kexec.h -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/include/asm/kexec.h -+++ linux-2.6.38-master/arch/x86/include/asm/kexec.h -@@ -163,6 +163,19 @@ struct kimage_arch { - }; - #endif - -+/* Under Xen we need to work with machine addresses. These macros give the -+ * machine address of a certain page to the generic kexec code instead of -+ * the pseudo physical address which would be given by the default macros. -+ */ -+ -+#ifdef CONFIG_XEN -+#define KEXEC_ARCH_HAS_PAGE_MACROS -+#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page)) -+#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn)) -+#define kexec_virt_to_phys(addr) virt_to_machine(addr) -+#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr)) -+#endif -+ - #endif /* __ASSEMBLY__ */ - - #endif /* _ASM_X86_KEXEC_H */ -Index: linux-2.6.38-master/arch/x86/include/asm/topology.h -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/include/asm/topology.h -+++ linux-2.6.38-master/arch/x86/include/asm/topology.h -@@ -30,7 +30,7 @@ - # define ENABLE_TOPO_DEFINES - # endif - #else --# ifdef CONFIG_SMP -+# if defined(CONFIG_SMP) && !defined(CONFIG_XEN) - # define ENABLE_TOPO_DEFINES - # endif - #endif -Index: linux-2.6.38-master/arch/x86/include/asm/types.h -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/include/asm/types.h -+++ linux-2.6.38-master/arch/x86/include/asm/types.h -@@ -9,7 +9,7 @@ - #ifndef __ASSEMBLY__ - - typedef u64 dma64_addr_t; --#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G) -+#if defined(CONFIG_X86_64) || defined(CONFIG_XEN) || defined(CONFIG_HIGHMEM64G) - /* DMA addresses come in 32-bit and 64-bit flavours. */ - typedef u64 dma_addr_t; - #else -Index: linux-2.6.38-master/arch/x86/vdso/Makefile -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/vdso/Makefile -+++ linux-2.6.38-master/arch/x86/vdso/Makefile -@@ -65,6 +65,8 @@ obj-$(VDSO32-y) += vdso32-syms.lds - vdso32.so-$(VDSO32-y) += int80 - vdso32.so-$(CONFIG_COMPAT) += syscall - vdso32.so-$(VDSO32-y) += sysenter -+xen-vdso32-$(subst 1,$(CONFIG_COMPAT),$(shell expr $(CONFIG_XEN_COMPAT)0 '<' 0x0302000)) += int80 -+vdso32.so-$(CONFIG_XEN) += $(xen-vdso32-y) - - vdso32-images = $(vdso32.so-y:%=vdso32-%.so) - -Index: linux-2.6.38-master/arch/x86/vdso/vdso32-setup.c -=================================================================== ---- linux-2.6.38-master.orig/arch/x86/vdso/vdso32-setup.c -+++ linux-2.6.38-master/arch/x86/vdso/vdso32-setup.c -@@ -26,6 +26,10 @@ - #include - #include - -+#ifdef CONFIG_XEN -+#include -+#endif -+ - enum { - VDSO_DISABLED = 0, - VDSO_ENABLED = 1, -@@ -225,6 +229,7 @@ static inline void map_compat_vdso(int m - - void enable_sep_cpu(void) - { -+#ifndef CONFIG_XEN - int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); - -@@ -239,6 +244,35 @@ void enable_sep_cpu(void) - wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); - wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); - put_cpu(); -+#else -+ extern asmlinkage void ia32pv_sysenter_target(void); -+ static struct callback_register sysenter = { -+ .type = CALLBACKTYPE_sysenter, -+ .address = { __KERNEL_CS, (unsigned long)ia32pv_sysenter_target }, -+ }; -+ -+ if (!boot_cpu_has(X86_FEATURE_SEP)) -+ return; -+ -+ get_cpu(); -+ -+ if (xen_feature(XENFEAT_supervisor_mode_kernel)) -+ sysenter.address.eip = (unsigned long)ia32_sysenter_target; -+ -+ switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) { -+ case 0: -+ break; -+#if CONFIG_XEN_COMPAT < 0x030200 -+ case -ENOSYS: -+ sysenter.type = CALLBACKTYPE_sysenter_deprecated; -+ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0) -+ break; -+#endif -+ default: -+ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability); -+ break; -+ } -+#endif - } - - static struct vm_area_struct gate_vma; diff --git a/patches.xen/xen3-auto-arch-x86_64.diff b/patches.xen/xen3-auto-arch-x86_64.diff deleted file mode 100644 index bc6dfa4..0000000 --- a/patches.xen/xen3-auto-arch-x86_64.diff +++ /dev/null @@ -1,211 +0,0 @@ -Subject: xen3 arch-x86_64 -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - ---- head-2010-04-15.orig/arch/x86/kernel/asm-offsets_64.c 2010-04-15 09:37:46.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/asm-offsets_64.c 2010-01-19 16:00:48.000000000 +0100 -@@ -115,8 +115,10 @@ int main(void) - ENTRY(cr8); - BLANK(); - #undef ENTRY -+#ifndef CONFIG_X86_NO_TSS - DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist)); - BLANK(); -+#endif - DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); - BLANK(); - DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); ---- head-2010-04-15.orig/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:38:56.000000000 +0200 -+++ head-2010-04-15/arch/x86/kernel/machine_kexec_64.c 2010-04-15 09:44:51.000000000 +0200 -@@ -21,6 +21,119 @@ - #include - #include - -+#ifdef CONFIG_XEN -+ -+/* In the case of Xen, override hypervisor functions to be able to create -+ * a regular identity mapping page table... -+ */ -+ -+#include -+#include -+ -+#define x__pmd(x) ((pmd_t) { (x) } ) -+#define x__pud(x) ((pud_t) { (x) } ) -+#define x__pgd(x) ((pgd_t) { (x) } ) -+ -+#define x_pmd_val(x) ((x).pmd) -+#define x_pud_val(x) ((x).pud) -+#define x_pgd_val(x) ((x).pgd) -+ -+static inline void x_set_pmd(pmd_t *dst, pmd_t val) -+{ -+ x_pmd_val(*dst) = x_pmd_val(val); -+} -+ -+static inline void x_set_pud(pud_t *dst, pud_t val) -+{ -+ x_pud_val(*dst) = phys_to_machine(x_pud_val(val)); -+} -+ -+static inline void x_pud_clear (pud_t *pud) -+{ -+ x_pud_val(*pud) = 0; -+} -+ -+static inline void x_set_pgd(pgd_t *dst, pgd_t val) -+{ -+ x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val)); -+} -+ -+static inline void x_pgd_clear (pgd_t * pgd) -+{ -+ x_pgd_val(*pgd) = 0; -+} -+ -+#define X__PAGE_KERNEL_LARGE_EXEC \ -+ _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE -+#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY -+ -+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT) -+ -+#if PAGES_NR > KEXEC_XEN_NO_PAGES -+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break -+#endif -+ -+#if PA_CONTROL_PAGE != 0 -+#error PA_CONTROL_PAGE is non zero - Xen support will break -+#endif -+ -+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image) -+{ -+ void *control_page; -+ void *table_page; -+ -+ memset(xki->page_list, 0, sizeof(xki->page_list)); -+ -+ control_page = page_address(image->control_code_page) + PAGE_SIZE; -+ memcpy(control_page, relocate_kernel, PAGE_SIZE); -+ -+ table_page = page_address(image->control_code_page); -+ -+ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page); -+ xki->page_list[PA_TABLE_PAGE] = __ma(table_page); -+ -+ xki->page_list[PA_PGD] = __ma(kexec_pgd); -+ xki->page_list[PA_PUD_0] = __ma(kexec_pud0); -+ xki->page_list[PA_PUD_1] = __ma(kexec_pud1); -+ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0); -+ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1); -+ xki->page_list[PA_PTE_0] = __ma(kexec_pte0); -+ xki->page_list[PA_PTE_1] = __ma(kexec_pte1); -+} -+ -+int __init machine_kexec_setup_resources(struct resource *hypervisor, -+ struct resource *phys_cpus, -+ int nr_phys_cpus) -+{ -+ int k; -+ -+ /* The per-cpu crash note resources belong to the hypervisor resource */ -+ for (k = 0; k < nr_phys_cpus; k++) -+ request_resource(hypervisor, phys_cpus + k); -+ -+ return 0; -+} -+ -+void machine_kexec_register_resources(struct resource *res) { ; } -+ -+#else /* CONFIG_XEN */ -+ -+#define x__pmd(x) __pmd(x) -+#define x__pud(x) __pud(x) -+#define x__pgd(x) __pgd(x) -+ -+#define x_set_pmd(x, y) set_pmd(x, y) -+#define x_set_pud(x, y) set_pud(x, y) -+#define x_set_pgd(x, y) set_pgd(x, y) -+ -+#define x_pud_clear(x) pud_clear(x) -+#define x_pgd_clear(x) pgd_clear(x) -+ -+#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC -+#define X_KERNPG_TABLE _KERNPG_TABLE -+ -+#endif /* CONFIG_XEN */ -+ - static int init_one_level2_page(struct kimage *image, pgd_t *pgd, - unsigned long addr) - { -@@ -63,7 +176,7 @@ static void init_level2_page(pmd_t *leve - addr &= PAGE_MASK; - end_addr = addr + PUD_SIZE; - while (addr < end_addr) { -- set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); -+ x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC)); - addr += PMD_SIZE; - } - } -@@ -88,12 +201,12 @@ static int init_level3_page(struct kimag - } - level2p = (pmd_t *)page_address(page); - init_level2_page(level2p, addr); -- set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE)); -+ x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE)); - addr += PUD_SIZE; - } - /* clear the unused entries */ - while (addr < end_addr) { -- pud_clear(level3p++); -+ x_pud_clear(level3p++); - addr += PUD_SIZE; - } - out: -@@ -123,12 +236,12 @@ static int init_level4_page(struct kimag - result = init_level3_page(image, level3p, addr, last_addr); - if (result) - goto out; -- set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); -+ x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE)); - addr += PGDIR_SIZE; - } - /* clear the unused entries */ - while (addr < end_addr) { -- pgd_clear(level4p++); -+ x_pgd_clear(level4p++); - addr += PGDIR_SIZE; - } - out: -@@ -189,8 +302,14 @@ static int init_pgtable(struct kimage *i - { - pgd_t *level4p; - int result; -+ unsigned long x_max_pfn = max_pfn; -+ -+#ifdef CONFIG_XEN -+ x_max_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL); -+#endif -+ - level4p = (pgd_t *)__va(start_pgtable); -- result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); -+ result = init_level4_page(image, level4p, 0, x_max_pfn << PAGE_SHIFT); - if (result) - return result; - /* -@@ -224,6 +343,7 @@ void machine_kexec_cleanup(struct kimage - free_transition_pgtable(image); - } - -+#ifndef CONFIG_XEN - /* - * Do not allocate memory (or fail in any way) in machine_kexec(). - * We are past the point of no return, committed to rebooting now. -@@ -283,6 +403,7 @@ void machine_kexec(struct kimage *image) - - __ftrace_enabled_restore(save_ftrace_enabled); - } -+#endif - - void arch_crash_save_vmcoreinfo(void) - { diff --git a/patches.xen/xen3-auto-blktap2-pvops.diff b/patches.xen/xen3-auto-blktap2-pvops.diff deleted file mode 100644 index ee89650..0000000 --- a/patches.xen/xen3-auto-blktap2-pvops.diff +++ /dev/null @@ -1,2373 +0,0 @@ -Subject: pv-ops blktap2 -From: https://git.kernel.org/?p=linux/kernel/git/jeremy/xen.git (commit 892d2f052e979cf1916647c752b94cf62ec1c6dc) -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/xen/blktap2-new/Makefile 2011-02-24 13:49:49.000000000 +0100 -@@ -0,0 +1,3 @@ -+obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o -+ -+blktap-objs := control.o ring.o device.o request.o sysfs.o ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/xen/blktap2-new/blktap.h 2011-02-24 13:49:49.000000000 +0100 -@@ -0,0 +1,209 @@ -+#ifndef _BLKTAP_H_ -+#define _BLKTAP_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int blktap_debug_level; -+extern int blktap_ring_major; -+extern int blktap_device_major; -+ -+#define BTPRINTK(level, tag, force, _f, _a...) \ -+ do { \ -+ if (blktap_debug_level > level && \ -+ (force || printk_ratelimit())) \ -+ printk(tag "%s: " _f, __func__, ##_a); \ -+ } while (0) -+ -+#define BTDBG(_f, _a...) BTPRINTK(8, KERN_DEBUG, 1, _f, ##_a) -+#define BTINFO(_f, _a...) BTPRINTK(0, KERN_INFO, 0, _f, ##_a) -+#define BTWARN(_f, _a...) BTPRINTK(0, KERN_WARNING, 0, _f, ##_a) -+#define BTERR(_f, _a...) BTPRINTK(0, KERN_ERR, 0, _f, ##_a) -+ -+#define MAX_BLKTAP_DEVICE 1024 -+ -+#define BLKTAP_DEVICE 4 -+#define BLKTAP_DEVICE_CLOSED 5 -+#define BLKTAP_SHUTDOWN_REQUESTED 8 -+ -+/* blktap IOCTLs: */ -+#define BLKTAP2_IOCTL_KICK_FE 1 -+#define BLKTAP2_IOCTL_ALLOC_TAP 200 -+#define BLKTAP2_IOCTL_FREE_TAP 201 -+#define BLKTAP2_IOCTL_CREATE_DEVICE 202 -+#define BLKTAP2_IOCTL_REMOVE_DEVICE 207 -+ -+#define BLKTAP2_MAX_MESSAGE_LEN 256 -+ -+#define BLKTAP2_RING_MESSAGE_CLOSE 3 -+ -+#define BLKTAP_REQUEST_FREE 0 -+#define BLKTAP_REQUEST_PENDING 1 -+ -+/* -+ * The maximum number of requests that can be outstanding at any time -+ * is determined by -+ * -+ * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST] -+ * -+ * where mmap_alloc < MAX_DYNAMIC_MEM. -+ * -+ * TODO: -+ * mmap_alloc is initialised to 2 and should be adjustable on the fly via -+ * sysfs. -+ */ -+#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) -+#define MAX_DYNAMIC_MEM BLK_RING_SIZE -+#define MAX_PENDING_REQS BLK_RING_SIZE -+#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST) -+#define MMAP_VADDR(_start, _req, _seg) \ -+ (_start + \ -+ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ -+ ((_seg) * PAGE_SIZE)) -+ -+struct grant_handle_pair { -+ grant_handle_t kernel; -+ grant_handle_t user; -+}; -+#define INVALID_GRANT_HANDLE 0xFFFF -+ -+struct blktap_handle { -+ unsigned int ring; -+ unsigned int device; -+ unsigned int minor; -+}; -+ -+struct blktap_params { -+ char name[BLKTAP2_MAX_MESSAGE_LEN]; -+ unsigned long long capacity; -+ unsigned long sector_size; -+}; -+ -+struct blktap_device { -+ spinlock_t lock; -+ struct gendisk *gd; -+}; -+ -+struct blktap_ring { -+ struct task_struct *task; -+ -+ struct vm_area_struct *vma; -+ struct blkif_front_ring ring; -+ unsigned long ring_vstart; -+ unsigned long user_vstart; -+ -+ int n_pending; -+ struct blktap_request *pending[MAX_PENDING_REQS]; -+ -+ wait_queue_head_t poll_wait; -+ -+ dev_t devno; -+ struct device *dev; -+}; -+ -+struct blktap_statistics { -+ unsigned long st_print; -+ int st_rd_req; -+ int st_wr_req; -+ int st_oo_req; -+ int st_rd_sect; -+ int st_wr_sect; -+ s64 st_rd_cnt; -+ s64 st_rd_sum_usecs; -+ s64 st_rd_max_usecs; -+ s64 st_wr_cnt; -+ s64 st_wr_sum_usecs; -+ s64 st_wr_max_usecs; -+}; -+ -+struct blktap_request { -+ struct blktap *tap; -+ struct request *rq; -+ int usr_idx; -+ -+ int operation; -+ struct timeval time; -+ -+ struct scatterlist sg_table[BLKIF_MAX_SEGMENTS_PER_REQUEST]; -+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; -+ int nr_pages; -+}; -+ -+#define blktap_for_each_sg(_sg, _req, _i) \ -+ for (_sg = (_req)->sg_table, _i = 0; \ -+ _i < (_req)->nr_pages; \ -+ (_sg)++, (_i)++) -+ -+struct blktap { -+ int minor; -+ unsigned long dev_inuse; -+ -+ struct blktap_ring ring; -+ struct blktap_device device; -+ struct blktap_page_pool *pool; -+ -+ wait_queue_head_t remove_wait; -+ struct work_struct remove_work; -+ char name[BLKTAP2_MAX_MESSAGE_LEN]; -+ -+ struct blktap_statistics stats; -+}; -+ -+struct blktap_page_pool { -+ struct mempool_s *bufs; -+ spinlock_t lock; -+ struct kobject kobj; -+ wait_queue_head_t wait; -+}; -+ -+extern struct mutex blktap_lock; -+extern struct blktap **blktaps; -+extern int blktap_max_minor; -+ -+int blktap_control_destroy_tap(struct blktap *); -+size_t blktap_control_debug(struct blktap *, char *, size_t); -+ -+int blktap_ring_init(void); -+void blktap_ring_exit(void); -+size_t blktap_ring_debug(struct blktap *, char *, size_t); -+int blktap_ring_create(struct blktap *); -+int blktap_ring_destroy(struct blktap *); -+struct blktap_request *blktap_ring_make_request(struct blktap *); -+void blktap_ring_free_request(struct blktap *,struct blktap_request *); -+void blktap_ring_submit_request(struct blktap *, struct blktap_request *); -+int blktap_ring_map_request_segment(struct blktap *, struct blktap_request *, int); -+int blktap_ring_map_request(struct blktap *, struct blktap_request *); -+void blktap_ring_unmap_request(struct blktap *, struct blktap_request *); -+void blktap_ring_set_message(struct blktap *, int); -+void blktap_ring_kick_user(struct blktap *); -+ -+int blktap_sysfs_init(void); -+void blktap_sysfs_exit(void); -+int blktap_sysfs_create(struct blktap *); -+void blktap_sysfs_destroy(struct blktap *); -+ -+int blktap_device_init(void); -+void blktap_device_exit(void); -+size_t blktap_device_debug(struct blktap *, char *, size_t); -+int blktap_device_create(struct blktap *, struct blktap_params *); -+int blktap_device_destroy(struct blktap *); -+void blktap_device_destroy_sync(struct blktap *); -+void blktap_device_run_queue(struct blktap *); -+void blktap_device_end_request(struct blktap *, struct blktap_request *, int); -+ -+int blktap_page_pool_init(struct kobject *); -+void blktap_page_pool_exit(void); -+struct blktap_page_pool *blktap_page_pool_get(const char *); -+ -+size_t blktap_request_debug(struct blktap *, char *, size_t); -+struct blktap_request *blktap_request_alloc(struct blktap *); -+int blktap_request_get_pages(struct blktap *, struct blktap_request *, int); -+void blktap_request_free(struct blktap *, struct blktap_request *); -+void blktap_request_bounce(struct blktap *, struct blktap_request *, int, int); -+ -+ -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/xen/blktap2-new/control.c 2011-02-24 13:49:49.000000000 +0100 -@@ -0,0 +1,315 @@ -+#include -+#include -+#include -+#include -+#include -+ -+#include "blktap.h" -+ -+DEFINE_MUTEX(blktap_lock); -+ -+struct blktap **blktaps; -+int blktap_max_minor; -+static struct blktap_page_pool *default_pool; -+ -+static struct blktap * -+blktap_control_get_minor(void) -+{ -+ int minor; -+ struct blktap *tap; -+ -+ tap = kzalloc(sizeof(*tap), GFP_KERNEL); -+ if (unlikely(!tap)) -+ return NULL; -+ -+ mutex_lock(&blktap_lock); -+ -+ for (minor = 0; minor < blktap_max_minor; minor++) -+ if (!blktaps[minor]) -+ break; -+ -+ if (minor == MAX_BLKTAP_DEVICE) -+ goto fail; -+ -+ if (minor == blktap_max_minor) { -+ void *p; -+ int n; -+ -+ n = min(2 * blktap_max_minor, MAX_BLKTAP_DEVICE); -+ p = krealloc(blktaps, n * sizeof(blktaps[0]), GFP_KERNEL); -+ if (!p) -+ goto fail; -+ -+ blktaps = p; -+ minor = blktap_max_minor; -+ blktap_max_minor = n; -+ -+ memset(&blktaps[minor], 0, (n - minor) * sizeof(blktaps[0])); -+ } -+ -+ tap->minor = minor; -+ blktaps[minor] = tap; -+ -+ __module_get(THIS_MODULE); -+out: -+ mutex_unlock(&blktap_lock); -+ return tap; -+ -+fail: -+ mutex_unlock(&blktap_lock); -+ kfree(tap); -+ tap = NULL; -+ goto out; -+} -+ -+static void -+blktap_control_put_minor(struct blktap* tap) -+{ -+ blktaps[tap->minor] = NULL; -+ kfree(tap); -+ -+ module_put(THIS_MODULE); -+} -+ -+static struct blktap* -+blktap_control_create_tap(void) -+{ -+ struct blktap *tap; -+ int err; -+ -+ tap = blktap_control_get_minor(); -+ if (!tap) -+ return NULL; -+ -+ kobject_get(&default_pool->kobj); -+ tap->pool = default_pool; -+ -+ err = blktap_ring_create(tap); -+ if (err) -+ goto fail_tap; -+ -+ err = blktap_sysfs_create(tap); -+ if (err) -+ goto fail_ring; -+ -+ return tap; -+ -+fail_ring: -+ blktap_ring_destroy(tap); -+fail_tap: -+ blktap_control_put_minor(tap); -+ -+ return NULL; -+} -+ -+int -+blktap_control_destroy_tap(struct blktap *tap) -+{ -+ int err; -+ -+ err = blktap_ring_destroy(tap); -+ if (err) -+ return err; -+ -+ kobject_put(&tap->pool->kobj); -+ -+ blktap_sysfs_destroy(tap); -+ -+ blktap_control_put_minor(tap); -+ -+ return 0; -+} -+ -+static int -+blktap_control_ioctl(struct inode *inode, struct file *filp, -+ unsigned int cmd, unsigned long arg) -+{ -+ struct blktap *tap; -+ -+ switch (cmd) { -+ case BLKTAP2_IOCTL_ALLOC_TAP: { -+ struct blktap_handle h; -+ void __user *ptr = (void __user*)arg; -+ -+ tap = blktap_control_create_tap(); -+ if (!tap) -+ return -ENOMEM; -+ -+ h.ring = blktap_ring_major; -+ h.device = blktap_device_major; -+ h.minor = tap->minor; -+ -+ if (copy_to_user(ptr, &h, sizeof(h))) { -+ blktap_control_destroy_tap(tap); -+ return -EFAULT; -+ } -+ -+ return 0; -+ } -+ -+ case BLKTAP2_IOCTL_FREE_TAP: { -+ int minor = arg; -+ -+ if (minor > MAX_BLKTAP_DEVICE) -+ return -EINVAL; -+ -+ tap = blktaps[minor]; -+ if (!tap) -+ return -ENODEV; -+ -+ return blktap_control_destroy_tap(tap); -+ } -+ } -+ -+ return -ENOIOCTLCMD; -+} -+ -+static struct file_operations blktap_control_file_operations = { -+ .owner = THIS_MODULE, -+ .ioctl = blktap_control_ioctl, -+}; -+ -+static struct miscdevice blktap_control = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "blktap-control", -+ .fops = &blktap_control_file_operations, -+}; -+ -+static struct device *control_device; -+ -+static ssize_t -+blktap_control_show_default_pool(struct device *device, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ return sprintf(buf, "%s", kobject_name(&default_pool->kobj)); -+} -+ -+static ssize_t -+blktap_control_store_default_pool(struct device *device, -+ struct device_attribute *attr, -+ const char *buf, size_t size) -+{ -+ struct blktap_page_pool *pool, *tmp = default_pool; -+ -+ pool = blktap_page_pool_get(buf); -+ if (IS_ERR(pool)) -+ return PTR_ERR(pool); -+ -+ default_pool = pool; -+ kobject_put(&tmp->kobj); -+ -+ return size; -+} -+ -+static DEVICE_ATTR(default_pool, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH, -+ blktap_control_show_default_pool, -+ blktap_control_store_default_pool); -+ -+size_t -+blktap_control_debug(struct blktap *tap, char *buf, size_t size) -+{ -+ char *s = buf, *end = buf + size; -+ -+ s += snprintf(s, end - s, -+ "tap %u:%u name:'%s' flags:%#08lx\n", -+ MAJOR(tap->ring.devno), MINOR(tap->ring.devno), -+ tap->name, tap->dev_inuse); -+ -+ return s - buf; -+} -+ -+static int __init -+blktap_control_init(void) -+{ -+ int err; -+ -+ err = misc_register(&blktap_control); -+ if (err) -+ return err; -+ -+ control_device = blktap_control.this_device; -+ -+ blktap_max_minor = min(64, MAX_BLKTAP_DEVICE); -+ blktaps = kzalloc(blktap_max_minor * sizeof(blktaps[0]), GFP_KERNEL); -+ if (!blktaps) { -+ BTERR("failed to allocate blktap minor map"); -+ return -ENOMEM; -+ } -+ -+ err = blktap_page_pool_init(&control_device->kobj); -+ if (err) -+ return err; -+ -+ default_pool = blktap_page_pool_get("default"); -+ if (!default_pool) -+ return -ENOMEM; -+ -+ err = device_create_file(control_device, &dev_attr_default_pool); -+ if (err) -+ return err; -+ -+ return 0; -+} -+ -+static void -+blktap_control_exit(void) -+{ -+ if (default_pool) { -+ kobject_put(&default_pool->kobj); -+ default_pool = NULL; -+ } -+ -+ blktap_page_pool_exit(); -+ -+ if (blktaps) { -+ kfree(blktaps); -+ blktaps = NULL; -+ } -+ -+ if (control_device) { -+ misc_deregister(&blktap_control); -+ control_device = NULL; -+ } -+} -+ -+static void -+blktap_exit(void) -+{ -+ blktap_control_exit(); -+ blktap_ring_exit(); -+ blktap_sysfs_exit(); -+ blktap_device_exit(); -+} -+ -+static int __init -+blktap_init(void) -+{ -+ int err; -+ -+ err = blktap_device_init(); -+ if (err) -+ goto fail; -+ -+ err = blktap_ring_init(); -+ if (err) -+ goto fail; -+ -+ err = blktap_sysfs_init(); -+ if (err) -+ goto fail; -+ -+ err = blktap_control_init(); -+ if (err) -+ goto fail; -+ -+ return 0; -+ -+fail: -+ blktap_exit(); -+ return err; -+} -+ -+module_init(blktap_init); -+module_exit(blktap_exit); -+MODULE_LICENSE("Dual BSD/GPL"); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/xen/blktap2-new/device.c 2011-02-24 13:49:49.000000000 +0100 -@@ -0,0 +1,564 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "blktap.h" -+ -+int blktap_device_major; -+ -+#define dev_to_blktap(_dev) container_of(_dev, struct blktap, device) -+ -+static int -+blktap_device_open(struct block_device *bdev, fmode_t mode) -+{ -+ struct gendisk *disk = bdev->bd_disk; -+ struct blktap_device *tapdev = disk->private_data; -+ -+ if (!tapdev) -+ return -ENXIO; -+ -+ /* NB. we might have bounced a bd trylock by tapdisk. when -+ * failing for reasons not !tapdev, make sure to kick tapdisk -+ * out of destroy wait state again. */ -+ -+ return 0; -+} -+ -+static int -+blktap_device_release(struct gendisk *disk, fmode_t mode) -+{ -+ struct blktap_device *tapdev = disk->private_data; -+ struct block_device *bdev = bdget_disk(disk, 0); -+ struct blktap *tap = dev_to_blktap(tapdev); -+ -+ bdput(bdev); -+ -+ if (!bdev->bd_openers) { -+ set_bit(BLKTAP_DEVICE_CLOSED, &tap->dev_inuse); -+ blktap_ring_kick_user(tap); -+ } -+ -+ return 0; -+} -+ -+static int -+blktap_device_getgeo(struct block_device *bd, struct hd_geometry *hg) -+{ -+ /* We don't have real geometry info, but let's at least return -+ values consistent with the size of the device */ -+ sector_t nsect = get_capacity(bd->bd_disk); -+ sector_t cylinders = nsect; -+ -+ hg->heads = 0xff; -+ hg->sectors = 0x3f; -+ sector_div(cylinders, hg->heads * hg->sectors); -+ hg->cylinders = cylinders; -+ if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) -+ hg->cylinders = 0xffff; -+ return 0; -+} -+ -+static int -+blktap_device_ioctl(struct block_device *bd, fmode_t mode, -+ unsigned command, unsigned long argument) -+{ -+ int i; -+ -+ switch (command) { -+ case CDROMMULTISESSION: -+ BTDBG("FIXME: support multisession CDs later\n"); -+ for (i = 0; i < sizeof(struct cdrom_multisession); i++) -+ if (put_user(0, (char __user *)(argument + i))) -+ return -EFAULT; -+ return 0; -+ -+ case SCSI_IOCTL_GET_IDLUN: -+ if (!access_ok(VERIFY_WRITE, argument, -+ sizeof(struct scsi_idlun))) -+ return -EFAULT; -+ -+ /* return 0 for now. */ -+ __put_user(0, &((struct scsi_idlun __user *)argument)->dev_id); -+ __put_user(0, -+ &((struct scsi_idlun __user *)argument)->host_unique_id); -+ return 0; -+ -+ default: -+ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", -+ command);*/ -+ return -EINVAL; /* same return as native Linux */ -+ } -+ -+ return 0; -+} -+ -+static struct block_device_operations blktap_device_file_operations = { -+ .owner = THIS_MODULE, -+ .open = blktap_device_open, -+ .release = blktap_device_release, -+ .ioctl = blktap_device_ioctl, -+ .getgeo = blktap_device_getgeo -+}; -+ -+/* NB. __blktap holding the queue lock; blktap where unlocked */ -+ -+static inline struct request* -+__blktap_next_queued_rq(struct request_queue *q) -+{ -+ return blk_peek_request(q); -+} -+ -+static inline void -+__blktap_dequeue_rq(struct request *rq) -+{ -+ blk_start_request(rq); -+} -+ -+/* NB. err == 0 indicates success, failures < 0 */ -+ -+static inline void -+__blktap_end_queued_rq(struct request *rq, int err) -+{ -+ blk_start_request(rq); -+ __blk_end_request(rq, err, blk_rq_bytes(rq)); -+} -+ -+static inline void -+__blktap_end_rq(struct request *rq, int err) -+{ -+ __blk_end_request(rq, err, blk_rq_bytes(rq)); -+} -+ -+static inline void -+blktap_end_rq(struct request *rq, int err) -+{ -+ spin_lock_irq(rq->q->queue_lock); -+ __blktap_end_rq(rq, err); -+ spin_unlock_irq(rq->q->queue_lock); -+} -+ -+void -+blktap_device_end_request(struct blktap *tap, -+ struct blktap_request *request, -+ int error) -+{ -+ struct blktap_device *tapdev = &tap->device; -+ struct request *rq = request->rq; -+ -+ blktap_ring_unmap_request(tap, request); -+ -+ blktap_ring_free_request(tap, request); -+ -+ dev_dbg(disk_to_dev(tapdev->gd), -+ "end_request: op=%d error=%d bytes=%d\n", -+ rq_data_dir(rq), error, blk_rq_bytes(rq)); -+ -+ blktap_end_rq(rq, error); -+} -+ -+int -+blktap_device_make_request(struct blktap *tap, struct request *rq) -+{ -+ struct blktap_device *tapdev = &tap->device; -+ struct blktap_request *request; -+ int write, nsegs; -+ int err; -+ -+ request = blktap_ring_make_request(tap); -+ if (IS_ERR(request)) { -+ err = PTR_ERR(request); -+ request = NULL; -+ -+ if (err == -ENOSPC || err == -ENOMEM) -+ goto stop; -+ -+ goto fail; -+ } -+ -+ write = rq_data_dir(rq) == WRITE; -+ nsegs = blk_rq_map_sg(rq->q, rq, request->sg_table); -+ -+ dev_dbg(disk_to_dev(tapdev->gd), -+ "make_request: op=%c bytes=%d nsegs=%d\n", -+ write ? 'w' : 'r', blk_rq_bytes(rq), nsegs); -+ -+ request->rq = rq; -+ request->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ; -+ -+ err = blktap_request_get_pages(tap, request, nsegs); -+ if (err) -+ goto stop; -+ -+ err = blktap_ring_map_request(tap, request); -+ if (err) -+ goto fail; -+ -+ blktap_ring_submit_request(tap, request); -+ -+ return 0; -+ -+stop: -+ tap->stats.st_oo_req++; -+ err = -EBUSY; -+ -+_out: -+ if (request) -+ blktap_ring_free_request(tap, request); -+ -+ return err; -+fail: -+ if (printk_ratelimit()) -+ dev_warn(disk_to_dev(tapdev->gd), -+ "make request: %d, failing\n", err); -+ goto _out; -+} -+ -+/* -+ * called from tapdisk context -+ */ -+void -+blktap_device_run_queue(struct blktap *tap) -+{ -+ struct blktap_device *tapdev = &tap->device; -+ struct request_queue *q; -+ struct request *rq; -+ int err; -+ -+ if (!tapdev->gd) -+ return; -+ -+ q = tapdev->gd->queue; -+ -+ spin_lock_irq(&tapdev->lock); -+ queue_flag_clear(QUEUE_FLAG_STOPPED, q); -+ -+ do { -+ rq = __blktap_next_queued_rq(q); -+ if (!rq) -+ break; -+ -+ if (!blk_fs_request(rq)) { -+ __blktap_end_queued_rq(rq, -EOPNOTSUPP); -+ continue; -+ } -+ -+ spin_unlock_irq(&tapdev->lock); -+ -+ err = blktap_device_make_request(tap, rq); -+ -+ spin_lock_irq(&tapdev->lock); -+ -+ if (err == -EBUSY) { -+ blk_stop_queue(q); -+ break; -+ } -+ -+ __blktap_dequeue_rq(rq); -+ -+ if (unlikely(err)) -+ __blktap_end_rq(rq, err); -+ } while (1); -+ -+ spin_unlock_irq(&tapdev->lock); -+} -+ -+static void -+blktap_device_do_request(struct request_queue *rq) -+{ -+ struct blktap_device *tapdev = rq->queuedata; -+ struct blktap *tap = dev_to_blktap(tapdev); -+ -+ blktap_ring_kick_user(tap); -+} -+ -+static void -+blktap_device_configure(struct blktap *tap, -+ struct blktap_params *params) -+{ -+ struct request_queue *rq; -+ struct blktap_device *dev = &tap->device; -+ -+ dev = &tap->device; -+ rq = dev->gd->queue; -+ -+ spin_lock_irq(&dev->lock); -+ -+ set_capacity(dev->gd, params->capacity); -+ -+ /* Hard sector size and max sectors impersonate the equiv. hardware. */ -+ blk_queue_logical_block_size(rq, params->sector_size); -+ blk_queue_max_sectors(rq, 512); -+ -+ /* Each segment in a request is up to an aligned page in size. */ -+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); -+ blk_queue_max_segment_size(rq, PAGE_SIZE); -+ -+ /* Ensure a merged request will fit in a single I/O ring slot. */ -+ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); -+ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); -+ -+ /* Make sure buffer addresses are sector-aligned. */ -+ blk_queue_dma_alignment(rq, 511); -+ -+ /* We are reordering, but cacheless. */ -+ blk_queue_ordered(rq, QUEUE_ORDERED_DRAIN, NULL); -+ -+ spin_unlock_irq(&dev->lock); -+} -+ -+static int -+blktap_device_validate_params(struct blktap *tap, -+ struct blktap_params *params) -+{ -+ struct device *dev = tap->ring.dev; -+ int sector_order, name_sz; -+ -+ sector_order = ffs(params->sector_size) - 1; -+ -+ if (sector_order < 9 || -+ sector_order > 12 || -+ params->sector_size != 1U<capacity || -+ (params->capacity > ULLONG_MAX >> sector_order)) -+ goto fail; -+ -+ name_sz = min(sizeof(params->name), sizeof(tap->name)); -+ if (strnlen(params->name, name_sz) >= name_sz) -+ goto fail; -+ -+ return 0; -+ -+fail: -+ params->name[name_sz-1] = 0; -+ dev_err(dev, "capacity: %llu, sector-size: %lu, name: %s\n", -+ params->capacity, params->sector_size, params->name); -+ return -EINVAL; -+} -+ -+int -+blktap_device_destroy(struct blktap *tap) -+{ -+ struct blktap_device *tapdev = &tap->device; -+ struct block_device *bdev; -+ struct gendisk *gd; -+ int err; -+ -+ gd = tapdev->gd; -+ if (!gd) -+ return 0; -+ -+ bdev = bdget_disk(gd, 0); -+ -+ err = !mutex_trylock(&bdev->bd_mutex); -+ if (err) { -+ /* NB. avoid a deadlock. the last opener syncs the -+ * bdev holding bd_mutex. */ -+ err = -EBUSY; -+ goto out_nolock; -+ } -+ -+ if (bdev->bd_openers) { -+ err = -EBUSY; -+ goto out; -+ } -+ -+ del_gendisk(gd); -+ gd->private_data = NULL; -+ -+ blk_cleanup_queue(gd->queue); -+ -+ put_disk(gd); -+ tapdev->gd = NULL; -+ -+ clear_bit(BLKTAP_DEVICE, &tap->dev_inuse); -+ err = 0; -+out: -+ mutex_unlock(&bdev->bd_mutex); -+out_nolock: -+ bdput(bdev); -+ -+ return err; -+} -+ -+static void -+blktap_device_fail_queue(struct blktap *tap) -+{ -+ struct blktap_device *tapdev = &tap->device; -+ struct request_queue *q = tapdev->gd->queue; -+ -+ spin_lock_irq(&tapdev->lock); -+ queue_flag_clear(QUEUE_FLAG_STOPPED, q); -+ -+ do { -+ struct request *rq = __blktap_next_queued_rq(q); -+ if (!rq) -+ break; -+ -+ __blktap_end_queued_rq(rq, -EIO); -+ } while (1); -+ -+ spin_unlock_irq(&tapdev->lock); -+} -+ -+static int -+blktap_device_try_destroy(struct blktap *tap) -+{ -+ int err; -+ -+ err = blktap_device_destroy(tap); -+ if (err) -+ blktap_device_fail_queue(tap); -+ -+ return err; -+} -+ -+void -+blktap_device_destroy_sync(struct blktap *tap) -+{ -+ wait_event(tap->ring.poll_wait, -+ !blktap_device_try_destroy(tap)); -+} -+ -+int -+blktap_device_create(struct blktap *tap, struct blktap_params *params) -+{ -+ int minor, err; -+ struct gendisk *gd; -+ struct request_queue *rq; -+ struct blktap_device *tapdev; -+ -+ gd = NULL; -+ rq = NULL; -+ tapdev = &tap->device; -+ minor = tap->minor; -+ -+ if (test_bit(BLKTAP_DEVICE, &tap->dev_inuse)) -+ return -EEXIST; -+ -+ if (blktap_device_validate_params(tap, params)) -+ return -EINVAL; -+ -+ gd = alloc_disk(1); -+ if (!gd) { -+ err = -ENOMEM; -+ goto fail; -+ } -+ -+ if (minor < 26) { -+ sprintf(gd->disk_name, "td%c", 'a' + minor % 26); -+ } else if (minor < (26 + 1) * 26) { -+ sprintf(gd->disk_name, "td%c%c", -+ 'a' + minor / 26 - 1,'a' + minor % 26); -+ } else { -+ const unsigned int m1 = (minor / 26 - 1) / 26 - 1; -+ const unsigned int m2 = (minor / 26 - 1) % 26; -+ const unsigned int m3 = minor % 26; -+ sprintf(gd->disk_name, "td%c%c%c", -+ 'a' + m1, 'a' + m2, 'a' + m3); -+ } -+ -+ gd->major = blktap_device_major; -+ gd->first_minor = minor; -+ gd->fops = &blktap_device_file_operations; -+ gd->private_data = tapdev; -+ -+ spin_lock_init(&tapdev->lock); -+ rq = blk_init_queue(blktap_device_do_request, &tapdev->lock); -+ if (!rq) { -+ err = -ENOMEM; -+ goto fail; -+ } -+ elevator_init(rq, "noop"); -+ -+ gd->queue = rq; -+ rq->queuedata = tapdev; -+ tapdev->gd = gd; -+ -+ blktap_device_configure(tap, params); -+ add_disk(gd); -+ -+ if (params->name[0]) -+ strncpy(tap->name, params->name, sizeof(tap->name)-1); -+ -+ set_bit(BLKTAP_DEVICE, &tap->dev_inuse); -+ -+ dev_info(disk_to_dev(gd), "sector-size: %u capacity: %llu\n", -+ queue_logical_block_size(rq), -+ (unsigned long long)get_capacity(gd)); -+ -+ return 0; -+ -+fail: -+ if (gd) -+ del_gendisk(gd); -+ if (rq) -+ blk_cleanup_queue(rq); -+ -+ return err; -+} -+ -+size_t -+blktap_device_debug(struct blktap *tap, char *buf, size_t size) -+{ -+ struct gendisk *disk = tap->device.gd; -+ struct request_queue *q; -+ struct block_device *bdev; -+ char *s = buf, *end = buf + size; -+ -+ if (!disk) -+ return 0; -+ -+ q = disk->queue; -+ -+ s += snprintf(s, end - s, -+ "disk capacity:%llu sector size:%u\n", -+ (unsigned long long)get_capacity(disk), -+ queue_logical_block_size(q)); -+ -+ s += snprintf(s, end - s, -+ "queue flags:%#lx plugged:%d stopped:%d empty:%d\n", -+ q->queue_flags, -+ blk_queue_plugged(q), blk_queue_stopped(q), -+ elv_queue_empty(q)); -+ -+ bdev = bdget_disk(disk, 0); -+ if (bdev) { -+ s += snprintf(s, end - s, -+ "bdev openers:%d closed:%d\n", -+ bdev->bd_openers, -+ test_bit(BLKTAP_DEVICE_CLOSED, &tap->dev_inuse)); -+ bdput(bdev); -+ } -+ -+ return s - buf; -+} -+ -+int __init -+blktap_device_init() -+{ -+ int major; -+ -+ /* Dynamically allocate a major for this device */ -+ major = register_blkdev(0, "tapdev"); -+ if (major < 0) { -+ BTERR("Couldn't register blktap device\n"); -+ return -ENOMEM; -+ } -+ -+ blktap_device_major = major; -+ BTINFO("blktap device major %d\n", major); -+ -+ return 0; -+} -+ -+void -+blktap_device_exit(void) -+{ -+ if (blktap_device_major) -+ unregister_blkdev(blktap_device_major, "tapdev"); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/xen/blktap2-new/request.c 2011-02-24 13:49:49.000000000 +0100 -@@ -0,0 +1,418 @@ -+#include -+#include -+#include -+#include -+#include -+ -+#include "blktap.h" -+ -+/* max pages per shared pool. just to prevent accidental dos. */ -+#define POOL_MAX_PAGES (256*BLKIF_MAX_SEGMENTS_PER_REQUEST) -+ -+/* default page pool size. when considering to shrink a shared pool, -+ * note that paused tapdisks may grab a whole lot of pages for a long -+ * time. */ -+#define POOL_DEFAULT_PAGES (2 * MMAP_PAGES) -+ -+/* max number of pages allocatable per request. */ -+#define POOL_MAX_REQUEST_PAGES BLKIF_MAX_SEGMENTS_PER_REQUEST -+ -+/* min request structs per pool. These grow dynamically. */ -+#define POOL_MIN_REQS BLK_RING_SIZE -+ -+static struct kset *pool_set; -+ -+#define kobj_to_pool(_kobj) \ -+ container_of(_kobj, struct blktap_page_pool, kobj) -+ -+static struct kmem_cache *request_cache; -+static mempool_t *request_pool; -+ -+static void -+__page_pool_wake(struct blktap_page_pool *pool) -+{ -+ mempool_t *mem = pool->bufs; -+ -+ /* -+ NB. slightly wasteful to always wait for a full segment -+ set. but this ensures the next disk makes -+ progress. presently, the repeated request struct -+ alloc/release cycles would otherwise keep everyone spinning. -+ */ -+ -+ if (mem->curr_nr >= POOL_MAX_REQUEST_PAGES) -+ wake_up(&pool->wait); -+} -+ -+int -+blktap_request_get_pages(struct blktap *tap, -+ struct blktap_request *request, int nr_pages) -+{ -+ struct blktap_page_pool *pool = tap->pool; -+ mempool_t *mem = pool->bufs; -+ struct page *page; -+ -+ BUG_ON(request->nr_pages != 0); -+ BUG_ON(nr_pages > POOL_MAX_REQUEST_PAGES); -+ -+ if (mem->curr_nr < nr_pages) -+ return -ENOMEM; -+ -+ /* NB. avoid thundering herds of tapdisks colliding. */ -+ spin_lock(&pool->lock); -+ -+ if (mem->curr_nr < nr_pages) { -+ spin_unlock(&pool->lock); -+ return -ENOMEM; -+ } -+ -+ while (request->nr_pages < nr_pages) { -+ page = mempool_alloc(mem, GFP_NOWAIT); -+ BUG_ON(!page); -+ request->pages[request->nr_pages++] = page; -+ } -+ -+ spin_unlock(&pool->lock); -+ -+ return 0; -+} -+ -+static void -+blktap_request_put_pages(struct blktap *tap, -+ struct blktap_request *request) -+{ -+ struct blktap_page_pool *pool = tap->pool; -+ struct page *page; -+ -+ while (request->nr_pages) { -+ page = request->pages[--request->nr_pages]; -+ mempool_free(page, pool->bufs); -+ } -+} -+ -+size_t -+blktap_request_debug(struct blktap *tap, char *buf, size_t size) -+{ -+ struct blktap_page_pool *pool = tap->pool; -+ mempool_t *mem = pool->bufs; -+ char *s = buf, *end = buf + size; -+ -+ s += snprintf(buf, end - s, -+ "pool:%s pages:%d free:%d\n", -+ kobject_name(&pool->kobj), -+ mem->min_nr, mem->curr_nr); -+ -+ return s - buf; -+} -+ -+struct blktap_request* -+blktap_request_alloc(struct blktap *tap) -+{ -+ struct blktap_request *request; -+ -+ request = mempool_alloc(request_pool, GFP_NOWAIT); -+ if (request) -+ request->tap = tap; -+ -+ return request; -+} -+ -+void -+blktap_request_free(struct blktap *tap, -+ struct blktap_request *request) -+{ -+ blktap_request_put_pages(tap, request); -+ -+ mempool_free(request, request_pool); -+ -+ __page_pool_wake(tap->pool); -+} -+ -+void -+blktap_request_bounce(struct blktap *tap, -+ struct blktap_request *request, -+ int seg, int write) -+{ -+ struct scatterlist *sg = &request->sg_table[seg]; -+ void *s, *p; -+ -+ BUG_ON(seg >= request->nr_pages); -+ -+ s = sg_virt(sg); -+ p = page_address(request->pages[seg]) + sg->offset; -+ -+ if (write) -+ memcpy(p, s, sg->length); -+ else -+ memcpy(s, p, sg->length); -+} -+ -+static void -+blktap_request_ctor(void *obj) -+{ -+ struct blktap_request *request = obj; -+ -+ memset(request, 0, sizeof(*request)); -+ sg_init_table(request->sg_table, ARRAY_SIZE(request->sg_table)); -+} -+ -+static int -+blktap_page_pool_resize(struct blktap_page_pool *pool, int target) -+{ -+ mempool_t *bufs = pool->bufs; -+ int err; -+ -+ /* NB. mempool asserts min_nr >= 1 */ -+ target = max(1, target); -+ -+ err = mempool_resize(bufs, target, GFP_KERNEL); -+ if (err) -+ return err; -+ -+ __page_pool_wake(pool); -+ -+ return 0; -+} -+ -+struct pool_attribute { -+ struct attribute attr; -+ -+ ssize_t (*show)(struct blktap_page_pool *pool, -+ char *buf); -+ -+ ssize_t (*store)(struct blktap_page_pool *pool, -+ const char *buf, size_t count); -+}; -+ -+#define kattr_to_pool_attr(_kattr) \ -+ container_of(_kattr, struct pool_attribute, attr) -+ -+static ssize_t -+blktap_page_pool_show_size(struct blktap_page_pool *pool, -+ char *buf) -+{ -+ mempool_t *mem = pool->bufs; -+ return sprintf(buf, "%d", mem->min_nr); -+} -+ -+static ssize_t -+blktap_page_pool_store_size(struct blktap_page_pool *pool, -+ const char *buf, size_t size) -+{ -+ int target; -+ -+ /* -+ * NB. target fixup to avoid undesired results. less than a -+ * full segment set can wedge the disk. much more than a -+ * couple times the physical queue depth is rarely useful. -+ */ -+ -+ target = simple_strtoul(buf, NULL, 0); -+ target = max(POOL_MAX_REQUEST_PAGES, target); -+ target = min(target, POOL_MAX_PAGES); -+ -+ return blktap_page_pool_resize(pool, target) ? : size; -+} -+ -+static struct pool_attribute blktap_page_pool_attr_size = -+ __ATTR(size, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH, -+ blktap_page_pool_show_size, -+ blktap_page_pool_store_size); -+ -+static ssize_t -+blktap_page_pool_show_free(struct blktap_page_pool *pool, -+ char *buf) -+{ -+ mempool_t *mem = pool->bufs; -+ return sprintf(buf, "%d", mem->curr_nr); -+} -+ -+static struct pool_attribute blktap_page_pool_attr_free = -+ __ATTR(free, S_IRUSR|S_IRGRP|S_IROTH, -+ blktap_page_pool_show_free, -+ NULL); -+ -+static struct attribute *blktap_page_pool_attrs[] = { -+ &blktap_page_pool_attr_size.attr, -+ &blktap_page_pool_attr_free.attr, -+ NULL, -+}; -+ -+static inline struct kobject* -+__blktap_kset_find_obj(struct kset *kset, const char *name) -+{ -+ struct kobject *k; -+ struct kobject *ret = NULL; -+ -+ spin_lock(&kset->list_lock); -+ list_for_each_entry(k, &kset->list, entry) { -+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) { -+ ret = kobject_get(k); -+ break; -+ } -+ } -+ spin_unlock(&kset->list_lock); -+ return ret; -+} -+ -+static ssize_t -+blktap_page_pool_show_attr(struct kobject *kobj, struct attribute *kattr, -+ char *buf) -+{ -+ struct blktap_page_pool *pool = kobj_to_pool(kobj); -+ struct pool_attribute *attr = kattr_to_pool_attr(kattr); -+ -+ if (attr->show) -+ return attr->show(pool, buf); -+ -+ return -EIO; -+} -+ -+static ssize_t -+blktap_page_pool_store_attr(struct kobject *kobj, struct attribute *kattr, -+ const char *buf, size_t size) -+{ -+ struct blktap_page_pool *pool = kobj_to_pool(kobj); -+ struct pool_attribute *attr = kattr_to_pool_attr(kattr); -+ -+ if (attr->show) -+ return attr->store(pool, buf, size); -+ -+ return -EIO; -+} -+ -+static struct sysfs_ops blktap_page_pool_sysfs_ops = { -+ .show = blktap_page_pool_show_attr, -+ .store = blktap_page_pool_store_attr, -+}; -+ -+static void -+blktap_page_pool_release(struct kobject *kobj) -+{ -+ struct blktap_page_pool *pool = kobj_to_pool(kobj); -+ mempool_destroy(pool->bufs); -+ kfree(pool); -+} -+ -+struct kobj_type blktap_page_pool_ktype = { -+ .release = blktap_page_pool_release, -+ .sysfs_ops = &blktap_page_pool_sysfs_ops, -+ .default_attrs = blktap_page_pool_attrs, -+}; -+ -+static void* -+__mempool_page_alloc(gfp_t gfp_mask, void *pool_data) -+{ -+ struct page *page; -+ -+ if (!(gfp_mask & __GFP_WAIT)) -+ return NULL; -+ -+ page = alloc_page(gfp_mask); -+ if (page) -+ SetPageReserved(page); -+ -+ return page; -+} -+ -+static void -+__mempool_page_free(void *element, void *pool_data) -+{ -+ struct page *page = element; -+ -+ ClearPageReserved(page); -+ put_page(page); -+} -+ -+static struct kobject* -+blktap_page_pool_create(const char *name, int nr_pages) -+{ -+ struct blktap_page_pool *pool; -+ int err; -+ -+ pool = kzalloc(sizeof(*pool), GFP_KERNEL); -+ if (!pool) -+ goto fail; -+ -+ spin_lock_init(&pool->lock); -+ init_waitqueue_head(&pool->wait); -+ -+ pool->bufs = mempool_create(nr_pages, -+ __mempool_page_alloc, __mempool_page_free, -+ pool); -+ if (!pool->bufs) -+ goto fail_pool; -+ -+ kobject_init(&pool->kobj, &blktap_page_pool_ktype); -+ pool->kobj.kset = pool_set; -+ err = kobject_add(&pool->kobj, &pool_set->kobj, "%s", name); -+ if (err) -+ goto fail_bufs; -+ -+ return &pool->kobj; -+ -+ kobject_del(&pool->kobj); -+fail_bufs: -+ mempool_destroy(pool->bufs); -+fail_pool: -+ kfree(pool); -+fail: -+ return NULL; -+} -+ -+struct blktap_page_pool* -+blktap_page_pool_get(const char *name) -+{ -+ struct kobject *kobj; -+ -+ kobj = __blktap_kset_find_obj(pool_set, name); -+ if (!kobj) -+ kobj = blktap_page_pool_create(name, -+ POOL_DEFAULT_PAGES); -+ if (!kobj) -+ return ERR_PTR(-ENOMEM); -+ -+ return kobj_to_pool(kobj); -+} -+ -+int __init -+blktap_page_pool_init(struct kobject *parent) -+{ -+ request_cache = -+ kmem_cache_create("blktap-request", -+ sizeof(struct blktap_request), 0, -+ 0, blktap_request_ctor); -+ if (!request_cache) -+ return -ENOMEM; -+ -+ request_pool = -+ mempool_create_slab_pool(POOL_MIN_REQS, request_cache); -+ if (!request_pool) -+ return -ENOMEM; -+ -+ pool_set = kset_create_and_add("pools", NULL, parent); -+ if (!pool_set) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+void -+blktap_page_pool_exit(void) -+{ -+ if (pool_set) { -+ BUG_ON(!list_empty(&pool_set->list)); -+ kset_unregister(pool_set); -+ pool_set = NULL; -+ } -+ -+ if (request_pool) { -+ mempool_destroy(request_pool); -+ request_pool = NULL; -+ } -+ -+ if (request_cache) { -+ kmem_cache_destroy(request_cache); -+ request_cache = NULL; -+ } -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/xen/blktap2-new/ring.c 2011-02-24 13:49:49.000000000 +0100 -@@ -0,0 +1,550 @@ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "blktap.h" -+ -+int blktap_ring_major; -+static struct cdev blktap_ring_cdev; -+ -+ /* -+ * BLKTAP - immediately before the mmap area, -+ * we have a bunch of pages reserved for shared memory rings. -+ */ -+#define RING_PAGES 1 -+ -+static void -+blktap_ring_read_response(struct blktap *tap, -+ const struct blkif_response *rsp) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ struct blktap_request *request; -+ int usr_idx, err; -+ -+ request = NULL; -+ -+ usr_idx = rsp->id; -+ if (usr_idx < 0 || usr_idx >= MAX_PENDING_REQS) { -+ err = -ERANGE; -+ goto invalid; -+ } -+ -+ request = ring->pending[usr_idx]; -+ -+ if (!request) { -+ err = -ESRCH; -+ goto invalid; -+ } -+ -+ if (rsp->operation != request->operation) { -+ err = -EINVAL; -+ goto invalid; -+ } -+ -+ dev_dbg(ring->dev, -+ "request %d [%p] response: %d\n", -+ request->usr_idx, request, rsp->status); -+ -+ err = rsp->status == BLKIF_RSP_OKAY ? 0 : -EIO; -+end_request: -+ blktap_device_end_request(tap, request, err); -+ return; -+ -+invalid: -+ dev_warn(ring->dev, -+ "invalid response, idx:%d status:%d op:%d/%d: err %d\n", -+ usr_idx, rsp->status, -+ rsp->operation, request->operation, -+ err); -+ if (request) -+ goto end_request; -+} -+ -+static void -+blktap_read_ring(struct blktap *tap) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ struct blkif_response rsp; -+ RING_IDX rc, rp; -+ -+ down_read(¤t->mm->mmap_sem); -+ if (!ring->vma) { -+ up_read(¤t->mm->mmap_sem); -+ return; -+ } -+ -+ /* for each outstanding message on the ring */ -+ rp = ring->ring.sring->rsp_prod; -+ rmb(); -+ -+ for (rc = ring->ring.rsp_cons; rc != rp; rc++) { -+ memcpy(&rsp, RING_GET_RESPONSE(&ring->ring, rc), sizeof(rsp)); -+ blktap_ring_read_response(tap, &rsp); -+ } -+ -+ ring->ring.rsp_cons = rc; -+ -+ up_read(¤t->mm->mmap_sem); -+} -+ -+static int blktap_ring_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -+{ -+ return VM_FAULT_SIGBUS; -+} -+ -+static void -+blktap_ring_fail_pending(struct blktap *tap) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ struct blktap_request *request; -+ int usr_idx; -+ -+ for (usr_idx = 0; usr_idx < MAX_PENDING_REQS; usr_idx++) { -+ request = ring->pending[usr_idx]; -+ if (!request) -+ continue; -+ -+ blktap_device_end_request(tap, request, -EIO); -+ } -+} -+ -+static void -+blktap_ring_vm_close(struct vm_area_struct *vma) -+{ -+ struct blktap *tap = vma->vm_private_data; -+ struct blktap_ring *ring = &tap->ring; -+ struct page *page = virt_to_page(ring->ring.sring); -+ -+ blktap_ring_fail_pending(tap); -+ -+ zap_page_range(vma, vma->vm_start, PAGE_SIZE, NULL); -+ ClearPageReserved(page); -+ __free_page(page); -+ -+ ring->vma = NULL; -+ -+ if (test_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) -+ blktap_control_destroy_tap(tap); -+} -+ -+static struct vm_operations_struct blktap_ring_vm_operations = { -+ .close = blktap_ring_vm_close, -+ .fault = blktap_ring_fault, -+}; -+ -+int -+blktap_ring_map_segment(struct blktap *tap, -+ struct blktap_request *request, -+ int seg) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ unsigned long uaddr; -+ -+ uaddr = MMAP_VADDR(ring->user_vstart, request->usr_idx, seg); -+ return vm_insert_page(ring->vma, uaddr, request->pages[seg]); -+} -+ -+int -+blktap_ring_map_request(struct blktap *tap, -+ struct blktap_request *request) -+{ -+ int seg, err = 0; -+ int write; -+ -+ write = request->operation == BLKIF_OP_WRITE; -+ -+ for (seg = 0; seg < request->nr_pages; seg++) { -+ if (write) -+ blktap_request_bounce(tap, request, seg, write); -+ -+ err = blktap_ring_map_segment(tap, request, seg); -+ if (err) -+ break; -+ } -+ -+ if (err) -+ blktap_ring_unmap_request(tap, request); -+ -+ return err; -+} -+ -+void -+blktap_ring_unmap_request(struct blktap *tap, -+ struct blktap_request *request) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ unsigned long uaddr; -+ unsigned size; -+ int seg, read; -+ -+ uaddr = MMAP_VADDR(ring->user_vstart, request->usr_idx, 0); -+ size = request->nr_pages << PAGE_SHIFT; -+ read = request->operation == BLKIF_OP_READ; -+ -+ if (read) -+ for (seg = 0; seg < request->nr_pages; seg++) -+ blktap_request_bounce(tap, request, seg, !read); -+ -+ zap_page_range(ring->vma, uaddr, size, NULL); -+} -+ -+void -+blktap_ring_free_request(struct blktap *tap, -+ struct blktap_request *request) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ -+ ring->pending[request->usr_idx] = NULL; -+ ring->n_pending--; -+ -+ blktap_request_free(tap, request); -+} -+ -+struct blktap_request* -+blktap_ring_make_request(struct blktap *tap) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ struct blktap_request *request; -+ int usr_idx; -+ -+ if (RING_FULL(&ring->ring)) -+ return ERR_PTR(-ENOSPC); -+ -+ request = blktap_request_alloc(tap); -+ if (!request) -+ return ERR_PTR(-ENOMEM); -+ -+ for (usr_idx = 0; usr_idx < BLK_RING_SIZE; usr_idx++) -+ if (!ring->pending[usr_idx]) -+ break; -+ -+ BUG_ON(usr_idx >= BLK_RING_SIZE); -+ -+ request->tap = tap; -+ request->usr_idx = usr_idx; -+ -+ ring->pending[usr_idx] = request; -+ ring->n_pending++; -+ -+ return request; -+} -+ -+void -+blktap_ring_submit_request(struct blktap *tap, -+ struct blktap_request *request) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ struct blkif_request *breq; -+ struct scatterlist *sg; -+ int i, nsecs = 0; -+ -+ dev_dbg(ring->dev, -+ "request %d [%p] submit\n", request->usr_idx, request); -+ -+ breq = RING_GET_REQUEST(&ring->ring, ring->ring.req_prod_pvt); -+ -+ breq->id = request->usr_idx; -+ breq->sector_number = blk_rq_pos(request->rq); -+ breq->handle = 0; -+ breq->operation = request->operation; -+ breq->nr_segments = request->nr_pages; -+ -+ blktap_for_each_sg(sg, request, i) { -+ struct blkif_request_segment *seg = &breq->seg[i]; -+ int first, count; -+ -+ count = sg->length >> 9; -+ first = sg->offset >> 9; -+ -+ seg->first_sect = first; -+ seg->last_sect = first + count - 1; -+ -+ nsecs += count; -+ } -+ -+ ring->ring.req_prod_pvt++; -+ -+ do_gettimeofday(&request->time); -+ -+ -+ if (request->operation == BLKIF_OP_WRITE) { -+ tap->stats.st_wr_sect += nsecs; -+ tap->stats.st_wr_req++; -+ } -+ -+ if (request->operation == BLKIF_OP_READ) { -+ tap->stats.st_rd_sect += nsecs; -+ tap->stats.st_rd_req++; -+ } -+} -+ -+static int -+blktap_ring_open(struct inode *inode, struct file *filp) -+{ -+ struct blktap *tap = NULL; -+ int minor; -+ -+ minor = iminor(inode); -+ -+ if (minor < blktap_max_minor) -+ tap = blktaps[minor]; -+ -+ if (!tap) -+ return -ENXIO; -+ -+ if (test_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) -+ return -ENXIO; -+ -+ if (tap->ring.task) -+ return -EBUSY; -+ -+ filp->private_data = tap; -+ tap->ring.task = current; -+ -+ return 0; -+} -+ -+static int -+blktap_ring_release(struct inode *inode, struct file *filp) -+{ -+ struct blktap *tap = filp->private_data; -+ -+ blktap_device_destroy_sync(tap); -+ -+ tap->ring.task = NULL; -+ -+ if (test_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) -+ blktap_control_destroy_tap(tap); -+ -+ return 0; -+} -+ -+static int -+blktap_ring_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ struct blktap *tap = filp->private_data; -+ struct blktap_ring *ring = &tap->ring; -+ struct blkif_sring *sring; -+ struct page *page = NULL; -+ int err; -+ -+ if (ring->vma) -+ return -EBUSY; -+ -+ page = alloc_page(GFP_KERNEL|__GFP_ZERO); -+ if (!page) -+ return -ENOMEM; -+ -+ SetPageReserved(page); -+ -+ err = vm_insert_page(vma, vma->vm_start, page); -+ if (err) -+ goto fail; -+ -+ sring = page_address(page); -+ SHARED_RING_INIT(sring); -+ FRONT_RING_INIT(&ring->ring, sring, PAGE_SIZE); -+ -+ ring->ring_vstart = vma->vm_start; -+ ring->user_vstart = ring->ring_vstart + PAGE_SIZE; -+ -+ vma->vm_private_data = tap; -+ -+ vma->vm_flags |= VM_DONTCOPY; -+ vma->vm_flags |= VM_RESERVED; -+ -+ vma->vm_ops = &blktap_ring_vm_operations; -+ -+ ring->vma = vma; -+ return 0; -+ -+fail: -+ if (page) { -+ zap_page_range(vma, vma->vm_start, PAGE_SIZE, NULL); -+ ClearPageReserved(page); -+ __free_page(page); -+ } -+ -+ return err; -+} -+ -+static int -+blktap_ring_ioctl(struct inode *inode, struct file *filp, -+ unsigned int cmd, unsigned long arg) -+{ -+ struct blktap *tap = filp->private_data; -+ struct blktap_ring *ring = &tap->ring; -+ -+ BTDBG("%d: cmd: %u, arg: %lu\n", tap->minor, cmd, arg); -+ -+ if (!ring->vma || ring->vma->vm_mm != current->mm) -+ return -EACCES; -+ -+ switch(cmd) { -+ case BLKTAP2_IOCTL_KICK_FE: -+ -+ blktap_read_ring(tap); -+ return 0; -+ -+ case BLKTAP2_IOCTL_CREATE_DEVICE: { -+ struct blktap_params params; -+ void __user *ptr = (void *)arg; -+ -+ if (!arg) -+ return -EINVAL; -+ -+ if (copy_from_user(¶ms, ptr, sizeof(params))) -+ return -EFAULT; -+ -+ return blktap_device_create(tap, ¶ms); -+ } -+ -+ case BLKTAP2_IOCTL_REMOVE_DEVICE: -+ -+ return blktap_device_destroy(tap); -+ } -+ -+ return -ENOIOCTLCMD; -+} -+ -+static unsigned int blktap_ring_poll(struct file *filp, poll_table *wait) -+{ -+ struct blktap *tap = filp->private_data; -+ struct blktap_ring *ring = &tap->ring; -+ int work; -+ -+ poll_wait(filp, &tap->pool->wait, wait); -+ poll_wait(filp, &ring->poll_wait, wait); -+ -+ down_read(¤t->mm->mmap_sem); -+ if (ring->vma && tap->device.gd) -+ blktap_device_run_queue(tap); -+ up_read(¤t->mm->mmap_sem); -+ -+ work = ring->ring.req_prod_pvt - ring->ring.sring->req_prod; -+ RING_PUSH_REQUESTS(&ring->ring); -+ -+ if (work || -+ ring->ring.sring->private.tapif_user.msg || -+ test_and_clear_bit(BLKTAP_DEVICE_CLOSED, &tap->dev_inuse)) -+ return POLLIN | POLLRDNORM; -+ -+ return 0; -+} -+ -+static struct file_operations blktap_ring_file_operations = { -+ .owner = THIS_MODULE, -+ .open = blktap_ring_open, -+ .release = blktap_ring_release, -+ .ioctl = blktap_ring_ioctl, -+ .mmap = blktap_ring_mmap, -+ .poll = blktap_ring_poll, -+}; -+ -+void -+blktap_ring_kick_user(struct blktap *tap) -+{ -+ wake_up(&tap->ring.poll_wait); -+} -+ -+int -+blktap_ring_destroy(struct blktap *tap) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ -+ if (ring->task || ring->vma) -+ return -EBUSY; -+ -+ return 0; -+} -+ -+int -+blktap_ring_create(struct blktap *tap) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ -+ init_waitqueue_head(&ring->poll_wait); -+ ring->devno = MKDEV(blktap_ring_major, tap->minor); -+ -+ return 0; -+} -+ -+size_t -+blktap_ring_debug(struct blktap *tap, char *buf, size_t size) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ char *s = buf, *end = buf + size; -+ int usr_idx; -+ -+ s += snprintf(s, end - s, -+ "begin pending:%d\n", ring->n_pending); -+ -+ for (usr_idx = 0; usr_idx < MAX_PENDING_REQS; usr_idx++) { -+ struct blktap_request *request; -+ struct timeval *time; -+ int write; -+ -+ request = ring->pending[usr_idx]; -+ if (!request) -+ continue; -+ -+ write = request->operation == BLKIF_OP_WRITE; -+ time = &request->time; -+ -+ s += snprintf(s, end - s, -+ "%02d: usr_idx:%02d " -+ "op:%c nr_pages:%02d time:%lu.%09lu\n", -+ usr_idx, request->usr_idx, -+ write ? 'W' : 'R', request->nr_pages, -+ time->tv_sec, time->tv_usec); -+ } -+ -+ s += snprintf(s, end - s, "end pending\n"); -+ -+ return s - buf; -+} -+ -+ -+int __init -+blktap_ring_init(void) -+{ -+ dev_t dev = 0; -+ int err; -+ -+ cdev_init(&blktap_ring_cdev, &blktap_ring_file_operations); -+ blktap_ring_cdev.owner = THIS_MODULE; -+ -+ err = alloc_chrdev_region(&dev, 0, MAX_BLKTAP_DEVICE, "blktap2"); -+ if (err < 0) { -+ BTERR("error registering ring devices: %d\n", err); -+ return err; -+ } -+ -+ err = cdev_add(&blktap_ring_cdev, dev, MAX_BLKTAP_DEVICE); -+ if (err) { -+ BTERR("error adding ring device: %d\n", err); -+ unregister_chrdev_region(dev, MAX_BLKTAP_DEVICE); -+ return err; -+ } -+ -+ blktap_ring_major = MAJOR(dev); -+ BTINFO("blktap ring major: %d\n", blktap_ring_major); -+ -+ return 0; -+} -+ -+void -+blktap_ring_exit(void) -+{ -+ if (!blktap_ring_major) -+ return; -+ -+ cdev_del(&blktap_ring_cdev); -+ unregister_chrdev_region(MKDEV(blktap_ring_major, 0), -+ MAX_BLKTAP_DEVICE); -+ -+ blktap_ring_major = 0; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/xen/blktap2-new/sysfs.c 2011-02-24 13:49:49.000000000 +0100 -@@ -0,0 +1,288 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "blktap.h" -+ -+int blktap_debug_level = 1; -+ -+static struct class *class; -+ -+static ssize_t -+blktap_sysfs_set_name(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) -+{ -+ struct blktap *tap; -+ -+ tap = dev_get_drvdata(dev); -+ if (!tap) -+ return 0; -+ -+ if (size >= BLKTAP2_MAX_MESSAGE_LEN) -+ return -ENAMETOOLONG; -+ -+ if (strnlen(buf, size) != size) -+ return -EINVAL; -+ -+ strcpy(tap->name, buf); -+ -+ return size; -+} -+ -+static ssize_t -+blktap_sysfs_get_name(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct blktap *tap; -+ ssize_t size; -+ -+ tap = dev_get_drvdata(dev); -+ if (!tap) -+ return 0; -+ -+ if (tap->name[0]) -+ size = sprintf(buf, "%s\n", tap->name); -+ else -+ size = sprintf(buf, "%d\n", tap->minor); -+ -+ return size; -+} -+static DEVICE_ATTR(name, S_IRUGO|S_IWUSR, -+ blktap_sysfs_get_name, blktap_sysfs_set_name); -+ -+static void -+blktap_sysfs_remove_work(struct work_struct *work) -+{ -+ struct blktap *tap -+ = container_of(work, struct blktap, remove_work); -+ blktap_control_destroy_tap(tap); -+} -+ -+static ssize_t -+blktap_sysfs_remove_device(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t size) -+{ -+ struct blktap *tap; -+ int err; -+ -+ tap = dev_get_drvdata(dev); -+ if (!tap) -+ return size; -+ -+ if (test_and_set_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse)) -+ goto wait; -+ -+ if (tap->ring.vma) { -+ struct blkif_sring *sring = tap->ring.ring.sring; -+ sring->private.tapif_user.msg = BLKTAP2_RING_MESSAGE_CLOSE; -+ blktap_ring_kick_user(tap); -+ } else { -+ INIT_WORK(&tap->remove_work, blktap_sysfs_remove_work); -+ schedule_work(&tap->remove_work); -+ } -+wait: -+ err = wait_event_interruptible(tap->remove_wait, -+ !dev_get_drvdata(dev)); -+ if (err) -+ return err; -+ -+ return size; -+} -+static DEVICE_ATTR(remove, S_IWUSR, NULL, blktap_sysfs_remove_device); -+ -+static ssize_t -+blktap_sysfs_debug_device(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct blktap *tap; -+ char *s = buf, *end = buf + PAGE_SIZE; -+ -+ tap = dev_get_drvdata(dev); -+ if (!tap) -+ return 0; -+ -+ s += blktap_control_debug(tap, s, end - s); -+ -+ s += blktap_request_debug(tap, s, end - s); -+ -+ s += blktap_device_debug(tap, s, end - s); -+ -+ s += blktap_ring_debug(tap, s, end - s); -+ -+ return s - buf; -+} -+static DEVICE_ATTR(debug, S_IRUGO, blktap_sysfs_debug_device, NULL); -+ -+static ssize_t -+blktap_sysfs_show_task(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct blktap *tap; -+ ssize_t rv = 0; -+ -+ tap = dev_get_drvdata(dev); -+ if (!tap) -+ return 0; -+ -+ if (tap->ring.task) -+ rv = sprintf(buf, "%d\n", tap->ring.task->pid); -+ -+ return rv; -+} -+static DEVICE_ATTR(task, S_IRUGO, blktap_sysfs_show_task, NULL); -+ -+static ssize_t -+blktap_sysfs_show_pool(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct blktap *tap = dev_get_drvdata(dev); -+ return sprintf(buf, "%s", kobject_name(&tap->pool->kobj)); -+} -+ -+static ssize_t -+blktap_sysfs_store_pool(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t size) -+{ -+ struct blktap *tap = dev_get_drvdata(dev); -+ struct blktap_page_pool *pool, *tmp = tap->pool; -+ -+ if (tap->device.gd) -+ return -EBUSY; -+ -+ pool = blktap_page_pool_get(buf); -+ if (IS_ERR(pool)) -+ return PTR_ERR(pool); -+ -+ tap->pool = pool; -+ kobject_put(&tmp->kobj); -+ -+ return size; -+} -+DEVICE_ATTR(pool, S_IRUSR|S_IWUSR, -+ blktap_sysfs_show_pool, blktap_sysfs_store_pool); -+ -+int -+blktap_sysfs_create(struct blktap *tap) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ struct device *dev; -+ int err = 0; -+ -+ init_waitqueue_head(&tap->remove_wait); -+ -+ dev = device_create(class, NULL, ring->devno, -+ tap, "blktap%d", tap->minor); -+ if (IS_ERR(dev)) -+ err = PTR_ERR(dev); -+ if (!err) -+ err = device_create_file(dev, &dev_attr_name); -+ if (!err) -+ err = device_create_file(dev, &dev_attr_remove); -+ if (!err) -+ err = device_create_file(dev, &dev_attr_debug); -+ if (!err) -+ err = device_create_file(dev, &dev_attr_task); -+ if (!err) -+ err = device_create_file(dev, &dev_attr_pool); -+ if (!err) -+ ring->dev = dev; -+ else -+ device_unregister(dev); -+ -+ return err; -+} -+ -+void -+blktap_sysfs_destroy(struct blktap *tap) -+{ -+ struct blktap_ring *ring = &tap->ring; -+ struct device *dev; -+ -+ dev = ring->dev; -+ -+ if (!dev) -+ return; -+ -+ dev_set_drvdata(dev, NULL); -+ wake_up(&tap->remove_wait); -+ -+ device_unregister(dev); -+ ring->dev = NULL; -+} -+ -+static ssize_t -+blktap_sysfs_show_verbosity(struct class *class, char *buf) -+{ -+ return sprintf(buf, "%d\n", blktap_debug_level); -+} -+ -+static ssize_t -+blktap_sysfs_set_verbosity(struct class *class, const char *buf, size_t size) -+{ -+ int level; -+ -+ if (sscanf(buf, "%d", &level) == 1) { -+ blktap_debug_level = level; -+ return size; -+ } -+ -+ return -EINVAL; -+} -+static CLASS_ATTR(verbosity, S_IRUGO|S_IWUSR, -+ blktap_sysfs_show_verbosity, blktap_sysfs_set_verbosity); -+ -+static ssize_t -+blktap_sysfs_show_devices(struct class *class, char *buf) -+{ -+ int i, ret; -+ struct blktap *tap; -+ -+ mutex_lock(&blktap_lock); -+ -+ ret = 0; -+ for (i = 0; i < blktap_max_minor; i++) { -+ tap = blktaps[i]; -+ if (!tap) -+ continue; -+ -+ if (!test_bit(BLKTAP_DEVICE, &tap->dev_inuse)) -+ continue; -+ -+ ret += sprintf(buf + ret, "%d %s\n", tap->minor, tap->name); -+ } -+ -+ mutex_unlock(&blktap_lock); -+ -+ return ret; -+} -+static CLASS_ATTR(devices, S_IRUGO, blktap_sysfs_show_devices, NULL); -+ -+void -+blktap_sysfs_exit(void) -+{ -+ if (class) -+ class_destroy(class); -+} -+ -+int __init -+blktap_sysfs_init(void) -+{ -+ struct class *cls; -+ int err = 0; -+ -+ cls = class_create(THIS_MODULE, "blktap2"); -+ if (IS_ERR(cls)) -+ err = PTR_ERR(cls); -+ if (!err) -+ err = class_create_file(cls, &class_attr_verbosity); -+ if (!err) -+ err = class_create_file(cls, &class_attr_devices); -+ if (!err) -+ class = cls; -+ else -+ class_destroy(cls); -+ -+ return err; -+} diff --git a/patches.xen/xen3-auto-common.diff b/patches.xen/xen3-auto-common.diff deleted file mode 100644 index abfdd0d..0000000 --- a/patches.xen/xen3-auto-common.diff +++ /dev/null @@ -1,3992 +0,0 @@ -Subject: xen3 common -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - -List of files that don't require modification anymore (and hence got -removed from this patch), for reference and in case upstream wants to -take the forward porting patches: -2.6.19/include/linux/skbuff.h -2.6.19/net/core/dev.c -2.6.19/net/core/skbuff.c -2.6.19/net/ipv4/netfilter/nf_nat_proto_tcp.c -2.6.19/net/ipv4/netfilter/nf_nat_proto_udp.c -2.6.19/net/ipv4/xfrm4_output.c -2.6.22/include/linux/sched.h -2.6.22/kernel/softlockup.c -2.6.22/kernel/timer.c -2.6.25/mm/highmem.c -2.6.30/include/linux/pci_regs.h - ---- head-2011-03-11.orig/drivers/Makefile 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/Makefile 2011-01-31 14:53:38.000000000 +0100 -@@ -35,6 +35,7 @@ obj-$(CONFIG_PARPORT) += parport/ - obj-y += base/ block/ misc/ mfd/ nfc/ - obj-$(CONFIG_NUBUS) += nubus/ - obj-y += macintosh/ -+obj-$(CONFIG_XEN) += xen/ - obj-$(CONFIG_IDE) += ide/ - obj-$(CONFIG_SCSI) += scsi/ - obj-$(CONFIG_ATA) += ata/ ---- head-2011-03-11.orig/drivers/acpi/Makefile 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/Makefile 2011-01-31 14:53:38.000000000 +0100 -@@ -67,6 +67,9 @@ obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys. - processor-y := processor_driver.o processor_throttling.o - processor-y += processor_idle.o processor_thermal.o - processor-$(CONFIG_CPU_FREQ) += processor_perflib.o -+ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+processor-objs += processor_perflib.o processor_extcntl.o -+endif - - obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o - obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o ---- head-2011-03-11.orig/drivers/acpi/acpica/hwsleep.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/acpica/hwsleep.c 2011-01-31 14:53:38.000000000 +0100 -@@ -236,7 +236,11 @@ acpi_status asmlinkage acpi_enter_sleep_ - u32 pm1b_control; - struct acpi_bit_register_info *sleep_type_reg_info; - struct acpi_bit_register_info *sleep_enable_reg_info; -+#if !(defined(CONFIG_XEN) && defined(CONFIG_X86)) - u32 in_value; -+#else -+ int err; -+#endif - struct acpi_object_list arg_list; - union acpi_object arg; - acpi_status status; -@@ -347,6 +351,7 @@ acpi_status asmlinkage acpi_enter_sleep_ - - /* Write #2: Write both SLP_TYP + SLP_EN */ - -+#if !(defined(CONFIG_XEN) && defined(CONFIG_X86)) - status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); -@@ -386,6 +391,16 @@ acpi_status asmlinkage acpi_enter_sleep_ - /* Spin until we wake */ - - } while (!in_value); -+#else -+ /* PV ACPI just need check hypercall return value */ -+ err = acpi_notify_hypervisor_state(sleep_state, -+ PM1Acontrol, PM1Bcontrol); -+ if (err) { -+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, -+ "Hypervisor failure [%d]\n", err)); -+ return_ACPI_STATUS(AE_ERROR); -+ } -+#endif - - return_ACPI_STATUS(AE_OK); - } ---- head-2011-03-11.orig/drivers/acpi/processor_driver.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/processor_driver.c 2011-01-31 14:53:38.000000000 +0100 -@@ -325,7 +325,8 @@ static int acpi_processor_get_info(struc - */ - if (pr->id == -1) { - if (ACPI_FAILURE -- (acpi_processor_hotadd_init(pr->handle, &pr->id))) { -+ (acpi_processor_hotadd_init(pr->handle, &pr->id)) && -+ !processor_cntl_external()) { - return -ENODEV; - } - } -@@ -376,7 +377,11 @@ static int acpi_processor_get_info(struc - return 0; - } - -+#ifndef CONFIG_XEN - static DEFINE_PER_CPU(void *, processor_device_array); -+#else -+static void *processor_device_array[NR_ACPI_CPUS]; -+#endif - - static void acpi_processor_notify(struct acpi_device *device, u32 event) - { -@@ -462,8 +467,11 @@ static int __cpuinit acpi_processor_add( - strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); - device->driver_data = pr; - -+ processor_extcntl_init(); -+ - result = acpi_processor_get_info(device); -- if (result) { -+ if (result || -+ ((pr->id == -1) && !processor_cntl_external())) { - /* Processor is physically not present */ - return 0; - } -@@ -473,23 +481,36 @@ static int __cpuinit acpi_processor_add( - return 0; - #endif - -- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); -+ BUG_ON(!processor_cntl_external() && -+ ((pr->id >= nr_cpu_ids) || (pr->id < 0))); - - /* - * Buggy BIOS check - * ACPI id of processors can be reported wrongly by the BIOS. - * Don't trust it blindly - */ -+#ifndef CONFIG_XEN - if (per_cpu(processor_device_array, pr->id) != NULL && - per_cpu(processor_device_array, pr->id) != device) { -+#else -+ BUG_ON(pr->acpi_id >= NR_ACPI_CPUS); -+ if (processor_device_array[pr->acpi_id] != NULL && -+ processor_device_array[pr->acpi_id] != device) { -+#endif - printk(KERN_WARNING "BIOS reported wrong ACPI id " - "for the processor\n"); - result = -ENODEV; - goto err_free_cpumask; - } -+#ifndef CONFIG_XEN - per_cpu(processor_device_array, pr->id) = device; - - per_cpu(processors, pr->id) = pr; -+#else -+ processor_device_array[pr->acpi_id] = device; -+ if (pr->id != -1) -+ per_cpu(processors, pr->id) = pr; -+#endif - - sysdev = get_cpu_sysdev(pr->id); - if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { -@@ -497,16 +518,28 @@ static int __cpuinit acpi_processor_add( - goto err_free_cpumask; - } - --#ifdef CONFIG_CPU_FREQ -+#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) - acpi_processor_ppc_has_changed(pr, 0); - #endif -- acpi_processor_get_throttling_info(pr); -- acpi_processor_get_limit_info(pr); - -+ /* -+ * pr->id may equal to -1 while processor_cntl_external enabled. -+ * throttle and thermal module don't support this case. -+ * Tx only works when dom0 vcpu == pcpu num by far, as we give -+ * control to dom0. -+ */ -+ if (pr->id != -1) { -+ acpi_processor_get_throttling_info(pr); -+ acpi_processor_get_limit_info(pr); -+ } - - if (cpuidle_get_driver() == &acpi_idle_driver) - acpi_processor_power_init(pr, device); - -+ result = processor_extcntl_prepare(pr); -+ if (result) -+ goto end; -+ - pr->cdev = thermal_cooling_device_register("Processor", device, - &processor_cooling_ops); - if (IS_ERR(pr->cdev)) { -@@ -556,7 +589,7 @@ static int acpi_processor_remove(struct - - pr = acpi_driver_data(device); - -- if (pr->id >= nr_cpu_ids) -+ if (!processor_cntl_external() && pr->id >= nr_cpu_ids) - goto free; - - if (type == ACPI_BUS_REMOVAL_EJECT) { -@@ -575,8 +608,14 @@ static int acpi_processor_remove(struct - pr->cdev = NULL; - } - -+#ifndef CONFIG_XEN - per_cpu(processors, pr->id) = NULL; - per_cpu(processor_device_array, pr->id) = NULL; -+#else -+ if (pr->id != -1) -+ per_cpu(processors, pr->id) = NULL; -+ processor_device_array[pr->acpi_id] = NULL; -+#endif - - free: - free_cpumask_var(pr->throttling.shared_cpu_map); -@@ -632,6 +671,10 @@ int acpi_processor_device_add(acpi_handl - return -ENODEV; - } - -+ if (processor_cntl_external() && acpi_driver_data(*device)) -+ processor_notify_external(acpi_driver_data(*device), -+ PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD); -+ - return 0; - } - -@@ -661,6 +704,10 @@ static void __ref acpi_processor_hotplug - "Unable to add the device\n"); - break; - } -+ pr = acpi_driver_data(device); -+ if (processor_cntl_external() && pr) -+ processor_notify_external(pr, -+ PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD); - break; - case ACPI_NOTIFY_EJECT_REQUEST: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, -@@ -677,6 +724,9 @@ static void __ref acpi_processor_hotplug - "Driver data is NULL, dropping EJECT\n"); - return; - } -+ if (processor_cntl_external()) -+ processor_notify_external(pr, PROCESSOR_HOTPLUG, -+ HOTPLUG_TYPE_REMOVE); - break; - default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, -@@ -741,6 +791,11 @@ static acpi_status acpi_processor_hotadd - - static int acpi_processor_handle_eject(struct acpi_processor *pr) - { -+#ifdef CONFIG_XEN -+ if (pr->id == -1) -+ return (0); -+#endif -+ - if (cpu_online(pr->id)) - cpu_down(pr->id); - ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/acpi/processor_extcntl.c 2011-01-31 14:53:38.000000000 +0100 -@@ -0,0 +1,241 @@ -+/* -+ * processor_extcntl.c - channel to external control logic -+ * -+ * Copyright (C) 2008, Intel corporation -+ * -+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or (at -+ * your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define ACPI_PROCESSOR_COMPONENT 0x01000000 -+#define ACPI_PROCESSOR_CLASS "processor" -+#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" -+#define _COMPONENT ACPI_PROCESSOR_COMPONENT -+ACPI_MODULE_NAME("acpi_processor") -+ -+static int processor_extcntl_parse_csd(struct acpi_processor *pr); -+static int processor_extcntl_get_performance(struct acpi_processor *pr); -+/* -+ * External processor control logic may register with its own set of -+ * ops to get ACPI related notification. One example is like VMM. -+ */ -+const struct processor_extcntl_ops *processor_extcntl_ops; -+EXPORT_SYMBOL(processor_extcntl_ops); -+ -+static int processor_notify_smm(void) -+{ -+ acpi_status status; -+ static int is_done = 0; -+ -+ /* only need successfully notify BIOS once */ -+ /* avoid double notification which may lead to unexpected result */ -+ if (is_done) -+ return 0; -+ -+ /* Can't write pstate_cnt to smi_cmd if either value is zero */ -+ if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) { -+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,"No SMI port or pstate_cnt\n")); -+ return 0; -+ } -+ -+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, -+ "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", -+ acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd)); -+ -+ /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use -+ * it anyway, so we need to support it... */ -+ if (acpi_fadt_is_v1) { -+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, -+ "Using v1.0 FADT reserved value for pstate_cnt\n")); -+ } -+ -+ status = acpi_os_write_port(acpi_fadt.smi_cmd, -+ (u32) acpi_fadt.pstate_cnt, 8); -+ if (ACPI_FAILURE(status)) -+ return status; -+ -+ is_done = 1; -+ -+ return 0; -+} -+ -+int processor_notify_external(struct acpi_processor *pr, int event, int type) -+{ -+ int ret = -EINVAL; -+ -+ if (!processor_cntl_external()) -+ return -EINVAL; -+ -+ switch (event) { -+ case PROCESSOR_PM_INIT: -+ case PROCESSOR_PM_CHANGE: -+ if ((type >= PM_TYPE_MAX) || -+ !processor_extcntl_ops->pm_ops[type]) -+ break; -+ -+ ret = processor_extcntl_ops->pm_ops[type](pr, event); -+ break; -+ case PROCESSOR_HOTPLUG: -+ if (processor_extcntl_ops->hotplug) -+ ret = processor_extcntl_ops->hotplug(pr, type); -+ break; -+ default: -+ printk(KERN_ERR "Unsupport processor events %d.\n", event); -+ break; -+ } -+ -+ return ret; -+} -+ -+/* -+ * External control logic can decide to grab full or part of physical -+ * processor control bits. Take a VMM for example, physical processors -+ * are owned by VMM and thus existence information like hotplug is -+ * always required to be notified to VMM. Similar is processor idle -+ * state which is also necessarily controlled by VMM. But for other -+ * control bits like performance/throttle states, VMM may choose to -+ * control or not upon its own policy. -+ */ -+void processor_extcntl_init(void) -+{ -+ if (!processor_extcntl_ops) -+ arch_acpi_processor_init_extcntl(&processor_extcntl_ops); -+} -+ -+/* -+ * This is called from ACPI processor init, and targeted to hold -+ * some tricky housekeeping jobs to satisfy external control model. -+ * For example, we may put dependency parse stub here for idle -+ * and performance state. Those information may be not available -+ * if splitting from dom0 control logic like cpufreq driver. -+ */ -+int processor_extcntl_prepare(struct acpi_processor *pr) -+{ -+ /* parse cstate dependency information */ -+ if (processor_pm_external()) -+ processor_extcntl_parse_csd(pr); -+ -+ /* Initialize performance states */ -+ if (processor_pmperf_external()) -+ processor_extcntl_get_performance(pr); -+ -+ return 0; -+} -+ -+/* -+ * Currently no _CSD is implemented which is why existing ACPI code -+ * doesn't parse _CSD at all. But to keep interface complete with -+ * external control logic, we put a placeholder here for future -+ * compatibility. -+ */ -+static int processor_extcntl_parse_csd(struct acpi_processor *pr) -+{ -+ int i; -+ -+ for (i = 0; i < pr->power.count; i++) { -+ if (!pr->power.states[i].valid) -+ continue; -+ -+ /* No dependency by default */ -+ pr->power.states[i].domain_info = NULL; -+ pr->power.states[i].csd_count = 0; -+ } -+ -+ return 0; -+} -+ -+/* -+ * Existing ACPI module does parse performance states at some point, -+ * when acpi-cpufreq driver is loaded which however is something -+ * we'd like to disable to avoid confliction with external control -+ * logic. So we have to collect raw performance information here -+ * when ACPI processor object is found and started. -+ */ -+static int processor_extcntl_get_performance(struct acpi_processor *pr) -+{ -+ int ret; -+ struct acpi_processor_performance *perf; -+ struct acpi_psd_package *pdomain; -+ -+ if (pr->performance) -+ return -EBUSY; -+ -+ perf = kzalloc(sizeof(struct acpi_processor_performance), GFP_KERNEL); -+ if (!perf) -+ return -ENOMEM; -+ -+ pr->performance = perf; -+ /* Get basic performance state information */ -+ ret = acpi_processor_get_performance_info(pr); -+ if (ret < 0) -+ goto err_out; -+ -+ /* -+ * Well, here we need retrieve performance dependency information -+ * from _PSD object. The reason why existing interface is not used -+ * is due to the reason that existing interface sticks to Linux cpu -+ * id to construct some bitmap, however we want to split ACPI -+ * processor objects from Linux cpu id logic. For example, even -+ * when Linux is configured as UP, we still want to parse all ACPI -+ * processor objects to external logic. In this case, it's preferred -+ * to use ACPI ID instead. -+ */ -+ pdomain = &pr->performance->domain_info; -+ pdomain->num_processors = 0; -+ ret = acpi_processor_get_psd(pr); -+ if (ret < 0) { -+ /* -+ * _PSD is optional - assume no coordination if absent (or -+ * broken), matching native kernels' behavior. -+ */ -+ pdomain->num_entries = ACPI_PSD_REV0_ENTRIES; -+ pdomain->revision = ACPI_PSD_REV0_REVISION; -+ pdomain->domain = pr->acpi_id; -+ pdomain->coord_type = DOMAIN_COORD_TYPE_SW_ALL; -+ pdomain->num_processors = 1; -+ } -+ -+ /* Some sanity check */ -+ if ((pdomain->revision != ACPI_PSD_REV0_REVISION) || -+ (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) || -+ ((pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL) && -+ (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY) && -+ (pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL))) { -+ ret = -EINVAL; -+ goto err_out; -+ } -+ -+ /* Last step is to notify BIOS that external logic exists */ -+ processor_notify_smm(); -+ -+ processor_notify_external(pr, PROCESSOR_PM_INIT, PM_TYPE_PERF); -+ -+ return 0; -+err_out: -+ pr->performance = NULL; -+ kfree(perf); -+ return ret; -+} ---- head-2011-03-11.orig/drivers/acpi/processor_idle.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/processor_idle.c 2011-01-31 14:53:38.000000000 +0100 -@@ -458,7 +458,8 @@ static int acpi_processor_get_power_info - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); -- } else { -+ /* This doesn't apply to external control case */ -+ } else if (!processor_pm_external()) { - continue; - } - if (cx.type == ACPI_STATE_C1 && -@@ -497,6 +498,12 @@ static int acpi_processor_get_power_info - - cx.power = obj->integer.value; - -+#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+ /* cache control methods to notify external logic */ -+ if (processor_pm_external()) -+ memcpy(&cx.reg, reg, sizeof(*reg)); -+#endif -+ - current_count++; - memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - -@@ -1130,6 +1137,11 @@ int __cpuinit acpi_processor_power_init( - if (cpuidle_register_device(&pr->power.dev)) - return -EIO; - } -+ -+ if (processor_pm_external()) -+ processor_notify_external(pr, -+ PROCESSOR_PM_INIT, PM_TYPE_IDLE); -+ - return 0; - } - ---- head-2011-03-11.orig/drivers/acpi/processor_perflib.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/processor_perflib.c 2011-01-31 14:53:38.000000000 +0100 -@@ -79,6 +79,7 @@ MODULE_PARM_DESC(ignore_ppc, "If the fre - - static int acpi_processor_ppc_status; - -+#ifdef CONFIG_CPU_FREQ - static int acpi_processor_ppc_notifier(struct notifier_block *nb, - unsigned long event, void *data) - { -@@ -121,6 +122,7 @@ static int acpi_processor_ppc_notifier(s - static struct notifier_block acpi_ppc_notifier_block = { - .notifier_call = acpi_processor_ppc_notifier, - }; -+#endif /* CONFIG_CPU_FREQ */ - - static int acpi_processor_get_platform_limit(struct acpi_processor *pr) - { -@@ -209,7 +211,12 @@ int acpi_processor_ppc_has_changed(struc - if (ret < 0) - return (ret); - else -+#ifdef CONFIG_CPU_FREQ - return cpufreq_update_policy(pr->id); -+#elif defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL) -+ return processor_notify_external(pr, -+ PROCESSOR_PM_CHANGE, PM_TYPE_PERF); -+#endif - } - - int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) -@@ -225,6 +232,7 @@ int acpi_processor_get_bios_limit(int cp - } - EXPORT_SYMBOL(acpi_processor_get_bios_limit); - -+#ifdef CONFIG_CPU_FREQ - void acpi_processor_ppc_init(void) - { - if (!cpufreq_register_notifier -@@ -243,6 +251,7 @@ void acpi_processor_ppc_exit(void) - - acpi_processor_ppc_status &= ~PPC_REGISTERED; - } -+#endif /* CONFIG_CPU_FREQ */ - - static int acpi_processor_get_performance_control(struct acpi_processor *pr) - { -@@ -390,7 +399,10 @@ static int acpi_processor_get_performanc - return result; - } - --static int acpi_processor_get_performance_info(struct acpi_processor *pr) -+#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+static -+#endif -+int acpi_processor_get_performance_info(struct acpi_processor *pr) - { - int result = 0; - acpi_status status = AE_OK; -@@ -435,6 +447,7 @@ static int acpi_processor_get_performanc - return result; - } - -+#ifdef CONFIG_CPU_FREQ - int acpi_processor_notify_smm(struct module *calling_module) - { - acpi_status status; -@@ -495,8 +508,12 @@ int acpi_processor_notify_smm(struct mod - } - - EXPORT_SYMBOL(acpi_processor_notify_smm); -+#endif /* CONFIG_CPU_FREQ */ - --static int acpi_processor_get_psd(struct acpi_processor *pr) -+#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+static -+#endif -+int acpi_processor_get_psd(struct acpi_processor *pr) - { - int result = 0; - acpi_status status = AE_OK; ---- head-2011-03-11.orig/drivers/acpi/sleep.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/acpi/sleep.c 2011-01-31 14:53:38.000000000 +0100 -@@ -60,6 +60,7 @@ static struct notifier_block tts_notifie - static int acpi_sleep_prepare(u32 acpi_state) - { - #ifdef CONFIG_ACPI_SLEEP -+#ifndef CONFIG_ACPI_PV_SLEEP - /* do we have a wakeup address for S2 and S3? */ - if (acpi_state == ACPI_STATE_S3) { - if (!acpi_wakeup_address) { -@@ -69,6 +70,7 @@ static int acpi_sleep_prepare(u32 acpi_s - (acpi_physical_address)acpi_wakeup_address); - - } -+#endif - ACPI_FLUSH_CPU_CACHE(); - #endif - printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", -@@ -264,7 +266,14 @@ static int acpi_suspend_enter(suspend_st - break; - - case ACPI_STATE_S3: -+#ifdef CONFIG_ACPI_PV_SLEEP -+ /* Hyperviosr will save and restore CPU context -+ * and then we can skip low level housekeeping here. -+ */ -+ acpi_enter_sleep_state(acpi_state); -+#else - do_suspend_lowlevel(); -+#endif - break; - } - ---- head-2011-03-11.orig/drivers/char/agp/intel-gtt.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/char/agp/intel-gtt.c 2011-03-11 10:51:50.000000000 +0100 -@@ -147,6 +147,13 @@ static struct page *i8xx_alloc_pages(voi - if (page == NULL) - return NULL; - -+#ifdef CONFIG_XEN -+ if (xen_create_contiguous_region((unsigned long)page_address(page), 2, 32)) { -+ __free_pages(page, 2); -+ return NULL; -+ } -+#endif -+ - if (set_pages_uc(page, 4) < 0) { - set_pages_wb(page, 4); - __free_pages(page, 2); -@@ -163,6 +170,9 @@ static void i8xx_destroy_pages(struct pa - return; - - set_pages_wb(page, 4); -+#ifdef CONFIG_XEN -+ xen_destroy_contiguous_region((unsigned long)page_address(page), 2); -+#endif - put_page(page); - __free_pages(page, 2); - atomic_dec(&agp_bridge->current_memory_agp); ---- head-2011-03-11.orig/drivers/char/mem.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/char/mem.c 2011-01-31 14:53:38.000000000 +0100 -@@ -89,6 +89,7 @@ void __weak unxlate_dev_mem_ptr(unsigned - { - } - -+#ifndef ARCH_HAS_DEV_MEM - /* - * This funcion reads the *physical* memory. The f_pos points directly to the - * memory location. -@@ -211,6 +212,7 @@ static ssize_t write_mem(struct file *fi - *ppos += written; - return written; - } -+#endif - - int __weak phys_mem_access_prot_allowed(struct file *file, - unsigned long pfn, unsigned long size, pgprot_t *vma_prot) -@@ -337,6 +339,9 @@ static int mmap_mem(struct file *file, s - static int mmap_kmem(struct file *file, struct vm_area_struct *vma) - { - unsigned long pfn; -+#ifdef CONFIG_XEN -+ unsigned long i, count; -+#endif - - /* Turn a kernel-virtual address into a physical page frame */ - pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; -@@ -351,6 +356,13 @@ static int mmap_kmem(struct file *file, - if (!pfn_valid(pfn)) - return -EIO; - -+#ifdef CONFIG_XEN -+ count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; -+ for (i = 0; i < count; i++) -+ if ((pfn + i) != mfn_to_local_pfn(pfn_to_mfn(pfn + i))) -+ return -EIO; -+#endif -+ - vma->vm_pgoff = pfn; - return mmap_mem(file, vma); - } -@@ -845,6 +857,7 @@ static int open_port(struct inode * inod - #define open_kmem open_mem - #define open_oldmem open_mem - -+#ifndef ARCH_HAS_DEV_MEM - static const struct file_operations mem_fops = { - .llseek = memory_lseek, - .read = read_mem, -@@ -853,6 +866,9 @@ static const struct file_operations mem_ - .open = open_mem, - .get_unmapped_area = get_unmapped_area_mem, - }; -+#else -+extern const struct file_operations mem_fops; -+#endif - - #ifdef CONFIG_DEVKMEM - static const struct file_operations kmem_fops = { ---- head-2011-03-11.orig/drivers/char/tpm/Makefile 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/char/tpm/Makefile 2011-01-31 14:53:38.000000000 +0100 -@@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o - obj-$(CONFIG_TCG_NSC) += tpm_nsc.o - obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o - obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o -+obj-$(CONFIG_TCG_XEN) += tpm_xenu.o -+tpm_xenu-y = tpm_xen.o tpm_vtpm.o ---- head-2011-03-11.orig/drivers/char/tpm/tpm.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/char/tpm/tpm.h 2011-03-11 10:51:58.000000000 +0100 -@@ -108,6 +108,9 @@ struct tpm_chip { - struct dentry **bios_dir; - - struct list_head list; -+#ifdef CONFIG_XEN -+ void *priv; -+#endif - void (*release) (struct device *); - }; - -@@ -272,6 +275,18 @@ struct tpm_cmd_t { - - ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *); - -+#ifdef CONFIG_XEN -+static inline void *chip_get_private(const struct tpm_chip *chip) -+{ -+ return chip->priv; -+} -+ -+static inline void chip_set_private(struct tpm_chip *chip, void *priv) -+{ -+ chip->priv = priv; -+} -+#endif -+ - extern void tpm_get_timeouts(struct tpm_chip *); - extern void tpm_gen_interrupt(struct tpm_chip *); - extern void tpm_continue_selftest(struct tpm_chip *); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/char/tpm/tpm_vtpm.c 2011-01-31 14:53:38.000000000 +0100 -@@ -0,0 +1,542 @@ -+/* -+ * Copyright (C) 2006 IBM Corporation -+ * -+ * Authors: -+ * Stefan Berger -+ * -+ * Generic device driver part for device drivers in a virtualized -+ * environment. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation, version 2 of the -+ * License. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "tpm.h" -+#include "tpm_vtpm.h" -+ -+/* read status bits */ -+enum { -+ STATUS_BUSY = 0x01, -+ STATUS_DATA_AVAIL = 0x02, -+ STATUS_READY = 0x04 -+}; -+ -+struct transmission { -+ struct list_head next; -+ -+ unsigned char *request; -+ size_t request_len; -+ size_t request_buflen; -+ -+ unsigned char *response; -+ size_t response_len; -+ size_t response_buflen; -+ -+ unsigned int flags; -+}; -+ -+enum { -+ TRANSMISSION_FLAG_WAS_QUEUED = 0x1 -+}; -+ -+ -+enum { -+ DATAEX_FLAG_QUEUED_ONLY = 0x1 -+}; -+ -+ -+/* local variables */ -+ -+/* local function prototypes */ -+static int _vtpm_send_queued(struct tpm_chip *chip); -+ -+ -+/* ============================================================= -+ * Some utility functions -+ * ============================================================= -+ */ -+static void vtpm_state_init(struct vtpm_state *vtpms) -+{ -+ vtpms->current_request = NULL; -+ spin_lock_init(&vtpms->req_list_lock); -+ init_waitqueue_head(&vtpms->req_wait_queue); -+ INIT_LIST_HEAD(&vtpms->queued_requests); -+ -+ vtpms->current_response = NULL; -+ spin_lock_init(&vtpms->resp_list_lock); -+ init_waitqueue_head(&vtpms->resp_wait_queue); -+ -+ vtpms->disconnect_time = jiffies; -+} -+ -+ -+static inline struct transmission *transmission_alloc(void) -+{ -+ return kzalloc(sizeof(struct transmission), GFP_ATOMIC); -+} -+ -+static unsigned char * -+transmission_set_req_buffer(struct transmission *t, -+ unsigned char *buffer, size_t len) -+{ -+ if (t->request_buflen < len) { -+ kfree(t->request); -+ t->request = kmalloc(len, GFP_KERNEL); -+ if (!t->request) { -+ t->request_buflen = 0; -+ return NULL; -+ } -+ t->request_buflen = len; -+ } -+ -+ memcpy(t->request, buffer, len); -+ t->request_len = len; -+ -+ return t->request; -+} -+ -+static unsigned char * -+transmission_set_res_buffer(struct transmission *t, -+ const unsigned char *buffer, size_t len) -+{ -+ if (t->response_buflen < len) { -+ kfree(t->response); -+ t->response = kmalloc(len, GFP_ATOMIC); -+ if (!t->response) { -+ t->response_buflen = 0; -+ return NULL; -+ } -+ t->response_buflen = len; -+ } -+ -+ memcpy(t->response, buffer, len); -+ t->response_len = len; -+ -+ return t->response; -+} -+ -+static inline void transmission_free(struct transmission *t) -+{ -+ kfree(t->request); -+ kfree(t->response); -+ kfree(t); -+} -+ -+/* ============================================================= -+ * Interface with the lower layer driver -+ * ============================================================= -+ */ -+/* -+ * Lower layer uses this function to make a response available. -+ */ -+int vtpm_vd_recv(const struct tpm_chip *chip, -+ const unsigned char *buffer, size_t count, -+ void *ptr) -+{ -+ unsigned long flags; -+ int ret_size = 0; -+ struct transmission *t; -+ struct vtpm_state *vtpms; -+ -+ vtpms = (struct vtpm_state *)chip_get_private(chip); -+ -+ /* -+ * The list with requests must contain one request -+ * only and the element there must be the one that -+ * was passed to me from the front-end. -+ */ -+ spin_lock_irqsave(&vtpms->resp_list_lock, flags); -+ if (vtpms->current_request != ptr) { -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ return 0; -+ } -+ -+ if ((t = vtpms->current_request)) { -+ transmission_free(t); -+ vtpms->current_request = NULL; -+ } -+ -+ t = transmission_alloc(); -+ if (t) { -+ if (!transmission_set_res_buffer(t, buffer, count)) { -+ transmission_free(t); -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ return -ENOMEM; -+ } -+ ret_size = count; -+ vtpms->current_response = t; -+ wake_up_interruptible(&vtpms->resp_wait_queue); -+ } -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ -+ return ret_size; -+} -+ -+ -+/* -+ * Lower layer indicates its status (connected/disconnected) -+ */ -+void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status) -+{ -+ struct vtpm_state *vtpms; -+ -+ vtpms = (struct vtpm_state *)chip_get_private(chip); -+ -+ vtpms->vd_status = vd_status; -+ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) { -+ vtpms->disconnect_time = jiffies; -+ } -+} -+ -+/* ============================================================= -+ * Interface with the generic TPM driver -+ * ============================================================= -+ */ -+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) -+{ -+ int rc = 0; -+ unsigned long flags; -+ struct vtpm_state *vtpms; -+ -+ vtpms = (struct vtpm_state *)chip_get_private(chip); -+ -+ /* -+ * Check if the previous operation only queued the command -+ * In this case there won't be a response, so I just -+ * return from here and reset that flag. In any other -+ * case I should receive a response from the back-end. -+ */ -+ spin_lock_irqsave(&vtpms->resp_list_lock, flags); -+ if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) { -+ vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY; -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ /* -+ * The first few commands (measurements) must be -+ * queued since it might not be possible to talk to the -+ * TPM, yet. -+ * Return a response of up to 30 '0's. -+ */ -+ -+ count = min_t(size_t, count, 30); -+ memset(buf, 0x0, count); -+ return count; -+ } -+ /* -+ * Check whether something is in the responselist and if -+ * there's nothing in the list wait for something to appear. -+ */ -+ -+ if (!vtpms->current_response) { -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ interruptible_sleep_on_timeout(&vtpms->resp_wait_queue, -+ 1000); -+ spin_lock_irqsave(&vtpms->resp_list_lock ,flags); -+ } -+ -+ if (vtpms->current_response) { -+ struct transmission *t = vtpms->current_response; -+ vtpms->current_response = NULL; -+ rc = min(count, t->response_len); -+ memcpy(buf, t->response, rc); -+ transmission_free(t); -+ } -+ -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ return rc; -+} -+ -+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) -+{ -+ int rc = 0; -+ unsigned long flags; -+ struct transmission *t = transmission_alloc(); -+ struct vtpm_state *vtpms; -+ -+ vtpms = (struct vtpm_state *)chip_get_private(chip); -+ -+ if (!t) -+ return -ENOMEM; -+ /* -+ * If there's a current request, it must be the -+ * previous request that has timed out. -+ */ -+ spin_lock_irqsave(&vtpms->req_list_lock, flags); -+ if (vtpms->current_request != NULL) { -+ printk("WARNING: Sending although there is a request outstanding.\n" -+ " Previous request must have timed out.\n"); -+ transmission_free(vtpms->current_request); -+ vtpms->current_request = NULL; -+ } -+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags); -+ -+ /* -+ * Queue the packet if the driver below is not -+ * ready, yet, or there is any packet already -+ * in the queue. -+ * If the driver below is ready, unqueue all -+ * packets first before sending our current -+ * packet. -+ * For each unqueued packet, except for the -+ * last (=current) packet, call the function -+ * tpm_xen_recv to wait for the response to come -+ * back. -+ */ -+ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) { -+ if (time_after(jiffies, -+ vtpms->disconnect_time + HZ * 10)) { -+ rc = -ENOENT; -+ } else { -+ goto queue_it; -+ } -+ } else { -+ /* -+ * Send all queued packets. -+ */ -+ if (_vtpm_send_queued(chip) == 0) { -+ -+ vtpms->current_request = t; -+ -+ rc = vtpm_vd_send(vtpms->tpm_private, -+ buf, -+ count, -+ t); -+ /* -+ * The generic TPM driver will call -+ * the function to receive the response. -+ */ -+ if (rc < 0) { -+ vtpms->current_request = NULL; -+ goto queue_it; -+ } -+ } else { -+queue_it: -+ if (!transmission_set_req_buffer(t, buf, count)) { -+ transmission_free(t); -+ rc = -ENOMEM; -+ goto exit; -+ } -+ /* -+ * An error occurred. Don't event try -+ * to send the current request. Just -+ * queue it. -+ */ -+ spin_lock_irqsave(&vtpms->req_list_lock, flags); -+ vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY; -+ list_add_tail(&t->next, &vtpms->queued_requests); -+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags); -+ } -+ } -+ -+exit: -+ return rc; -+} -+ -+ -+/* -+ * Send all queued requests. -+ */ -+static int _vtpm_send_queued(struct tpm_chip *chip) -+{ -+ int rc; -+ int error = 0; -+ long flags; -+ unsigned char buffer[1]; -+ struct vtpm_state *vtpms; -+ vtpms = (struct vtpm_state *)chip_get_private(chip); -+ -+ spin_lock_irqsave(&vtpms->req_list_lock, flags); -+ -+ while (!list_empty(&vtpms->queued_requests)) { -+ /* -+ * Need to dequeue them. -+ * Read the result into a dummy buffer. -+ */ -+ struct transmission *qt = (struct transmission *) -+ vtpms->queued_requests.next; -+ list_del(&qt->next); -+ vtpms->current_request = qt; -+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags); -+ -+ rc = vtpm_vd_send(vtpms->tpm_private, -+ qt->request, -+ qt->request_len, -+ qt); -+ -+ if (rc < 0) { -+ spin_lock_irqsave(&vtpms->req_list_lock, flags); -+ if ((qt = vtpms->current_request) != NULL) { -+ /* -+ * requeue it at the beginning -+ * of the list -+ */ -+ list_add(&qt->next, -+ &vtpms->queued_requests); -+ } -+ vtpms->current_request = NULL; -+ error = 1; -+ break; -+ } -+ /* -+ * After this point qt is not valid anymore! -+ * It is freed when the front-end is delivering -+ * the data by calling tpm_recv -+ */ -+ /* -+ * Receive response into provided dummy buffer -+ */ -+ rc = vtpm_recv(chip, buffer, sizeof(buffer)); -+ spin_lock_irqsave(&vtpms->req_list_lock, flags); -+ } -+ -+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags); -+ -+ return error; -+} -+ -+static void vtpm_cancel(struct tpm_chip *chip) -+{ -+ unsigned long flags; -+ struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip); -+ -+ spin_lock_irqsave(&vtpms->resp_list_lock,flags); -+ -+ if (!vtpms->current_response && vtpms->current_request) { -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ interruptible_sleep_on(&vtpms->resp_wait_queue); -+ spin_lock_irqsave(&vtpms->resp_list_lock,flags); -+ } -+ -+ if (vtpms->current_response) { -+ struct transmission *t = vtpms->current_response; -+ vtpms->current_response = NULL; -+ transmission_free(t); -+ } -+ -+ spin_unlock_irqrestore(&vtpms->resp_list_lock,flags); -+} -+ -+static u8 vtpm_status(struct tpm_chip *chip) -+{ -+ u8 rc = 0; -+ unsigned long flags; -+ struct vtpm_state *vtpms; -+ -+ vtpms = (struct vtpm_state *)chip_get_private(chip); -+ -+ spin_lock_irqsave(&vtpms->resp_list_lock, flags); -+ /* -+ * Data are available if: -+ * - there's a current response -+ * - the last packet was queued only (this is fake, but necessary to -+ * get the generic TPM layer to call the receive function.) -+ */ -+ if (vtpms->current_response || -+ 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) { -+ rc = STATUS_DATA_AVAIL; -+ } else if (!vtpms->current_response && !vtpms->current_request) { -+ rc = STATUS_READY; -+ } -+ -+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); -+ return rc; -+} -+ -+static struct file_operations vtpm_ops = { -+ .owner = THIS_MODULE, -+ .llseek = no_llseek, -+ .open = tpm_open, -+ .read = tpm_read, -+ .write = tpm_write, -+ .release = tpm_release, -+}; -+ -+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); -+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); -+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); -+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); -+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); -+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, -+ NULL); -+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); -+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel); -+ -+static struct attribute *vtpm_attrs[] = { -+ &dev_attr_pubek.attr, -+ &dev_attr_pcrs.attr, -+ &dev_attr_enabled.attr, -+ &dev_attr_active.attr, -+ &dev_attr_owned.attr, -+ &dev_attr_temp_deactivated.attr, -+ &dev_attr_caps.attr, -+ &dev_attr_cancel.attr, -+ NULL, -+}; -+ -+static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs }; -+ -+#define TPM_LONG_TIMEOUT (10 * 60 * HZ) -+ -+static struct tpm_vendor_specific tpm_vtpm = { -+ .recv = vtpm_recv, -+ .send = vtpm_send, -+ .cancel = vtpm_cancel, -+ .status = vtpm_status, -+ .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL, -+ .req_complete_val = STATUS_DATA_AVAIL, -+ .req_canceled = STATUS_READY, -+ .attr_group = &vtpm_attr_grp, -+ .miscdev = { -+ .fops = &vtpm_ops, -+ }, -+ .duration = { -+ TPM_LONG_TIMEOUT, -+ TPM_LONG_TIMEOUT, -+ TPM_LONG_TIMEOUT, -+ }, -+}; -+ -+struct tpm_chip *init_vtpm(struct device *dev, -+ struct tpm_private *tp) -+{ -+ long rc; -+ struct tpm_chip *chip; -+ struct vtpm_state *vtpms; -+ -+ vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL); -+ if (!vtpms) -+ return ERR_PTR(-ENOMEM); -+ -+ vtpm_state_init(vtpms); -+ vtpms->tpm_private = tp; -+ -+ chip = tpm_register_hardware(dev, &tpm_vtpm); -+ if (!chip) { -+ rc = -ENODEV; -+ goto err_free_mem; -+ } -+ -+ chip_set_private(chip, vtpms); -+ -+ return chip; -+ -+err_free_mem: -+ kfree(vtpms); -+ -+ return ERR_PTR(rc); -+} -+ -+void cleanup_vtpm(struct device *dev) -+{ -+ struct tpm_chip *chip = dev_get_drvdata(dev); -+ struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip); -+ tpm_remove_hardware(dev); -+ kfree(vtpms); -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/char/tpm/tpm_vtpm.h 2011-01-31 14:53:38.000000000 +0100 -@@ -0,0 +1,55 @@ -+#ifndef TPM_VTPM_H -+#define TPM_VTPM_H -+ -+struct tpm_chip; -+struct tpm_private; -+ -+struct vtpm_state { -+ struct transmission *current_request; -+ spinlock_t req_list_lock; -+ wait_queue_head_t req_wait_queue; -+ -+ struct list_head queued_requests; -+ -+ struct transmission *current_response; -+ spinlock_t resp_list_lock; -+ wait_queue_head_t resp_wait_queue; // processes waiting for responses -+ -+ u8 vd_status; -+ u8 flags; -+ -+ unsigned long disconnect_time; -+ -+ /* -+ * The following is a private structure of the underlying -+ * driver. It is passed as parameter in the send function. -+ */ -+ struct tpm_private *tpm_private; -+}; -+ -+ -+enum vdev_status { -+ TPM_VD_STATUS_DISCONNECTED = 0x0, -+ TPM_VD_STATUS_CONNECTED = 0x1 -+}; -+ -+/* this function is called from tpm_vtpm.c */ -+int vtpm_vd_send(struct tpm_private * tp, -+ const u8 * buf, size_t count, void *ptr); -+ -+/* these functions are offered by tpm_vtpm.c */ -+struct tpm_chip *init_vtpm(struct device *, -+ struct tpm_private *); -+void cleanup_vtpm(struct device *); -+int vtpm_vd_recv(const struct tpm_chip* chip, -+ const unsigned char *buffer, size_t count, void *ptr); -+void vtpm_vd_status(const struct tpm_chip *, u8 status); -+ -+static inline struct tpm_private *tpm_private_from_dev(struct device *dev) -+{ -+ struct tpm_chip *chip = dev_get_drvdata(dev); -+ struct vtpm_state *vtpms = chip_get_private(chip); -+ return vtpms->tpm_private; -+} -+ -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ head-2011-03-11/drivers/char/tpm/tpm_xen.c 2011-01-31 14:53:38.000000000 +0100 -@@ -0,0 +1,722 @@ -+/* -+ * Copyright (c) 2005, IBM Corporation -+ * -+ * Author: Stefan Berger, stefanb@us.ibm.com -+ * Grant table support: Mahadevan Gomathisankaran -+ * -+ * This code has been derived from drivers/xen/netfront/netfront.c -+ * -+ * Copyright (c) 2002-2004, K A Fraser -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License version 2 -+ * as published by the Free Software Foundation; or, when distributed -+ * separately from the Linux kernel or incorporated into other -+ * software packages, subject to the following license: -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this source file (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, copy, modify, -+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, -+ * and to permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "tpm.h" -+#include "tpm_vtpm.h" -+ -+#undef DEBUG -+ -+/* local structures */ -+struct tpm_private { -+ struct tpm_chip *chip; -+ -+ tpmif_tx_interface_t *tx; -+ atomic_t refcnt; -+ unsigned int irq; -+ u8 is_connected; -+ u8 is_suspended; -+ -+ spinlock_t tx_lock; -+ -+ struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE]; -+ -+ atomic_t tx_busy; -+ void *tx_remember; -+ -+ domid_t backend_id; -+ wait_queue_head_t wait_q; -+ -+ struct xenbus_device *dev; -+ int ring_ref; -+}; -+ -+struct tx_buffer { -+ unsigned int size; // available space in data -+ unsigned int len; // used space in data -+ unsigned char *data; // pointer to a page -+}; -+ -+ -+/* locally visible variables */ -+static grant_ref_t gref_head; -+static struct tpm_private *my_priv; -+ -+/* local function prototypes */ -+static irqreturn_t tpmif_int(int irq, -+ void *tpm_priv, -+ struct pt_regs *ptregs); -+static void tpmif_rx_action(unsigned long unused); -+static int tpmif_connect(struct xenbus_device *dev, -+ struct tpm_private *tp, -+ domid_t domid); -+static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0); -+static int tpmif_allocate_tx_buffers(struct tpm_private *tp); -+static void tpmif_free_tx_buffers(struct tpm_private *tp); -+static void tpmif_set_connected_state(struct tpm_private *tp, -+ u8 newstate); -+static int tpm_xmit(struct tpm_private *tp, -+ const u8 * buf, size_t count, int userbuffer, -+ void *remember); -+static void destroy_tpmring(struct tpm_private *tp); -+void __exit tpmif_exit(void); -+ -+#define DPRINTK(fmt, args...) \ -+ pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args) -+#define IPRINTK(fmt, args...) \ -+ printk(KERN_INFO "xen_tpm_fr: " fmt, ##args) -+#define WPRINTK(fmt, args...) \ -+ printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args) -+ -+#define GRANT_INVALID_REF 0 -+ -+ -+static inline int -+tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len, -+ int isuserbuffer) -+{ -+ int copied = len; -+ -+ if (len > txb->size) -+ copied = txb->size; -+ if (isuserbuffer) { -+ if (copy_from_user(txb->data, src, copied)) -+ return -EFAULT; -+ } else { -+ memcpy(txb->data, src, copied); -+ } -+ txb->len = len; -+ return copied; -+} -+ -+static inline struct tx_buffer *tx_buffer_alloc(void) -+{ -+ struct tx_buffer *txb; -+ -+ txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL); -+ if (!txb) -+ return NULL; -+ -+ txb->len = 0; -+ txb->size = PAGE_SIZE; -+ txb->data = (unsigned char *)__get_free_page(GFP_KERNEL); -+ if (txb->data == NULL) { -+ kfree(txb); -+ txb = NULL; -+ } -+ -+ return txb; -+} -+ -+ -+static inline void tx_buffer_free(struct tx_buffer *txb) -+{ -+ if (txb) { -+ free_page((long)txb->data); -+ kfree(txb); -+ } -+} -+ -+/************************************************************** -+ Utility function for the tpm_private structure -+**************************************************************/ -+static void tpm_private_init(struct tpm_private *tp) -+{ -+ spin_lock_init(&tp->tx_lock); -+ init_waitqueue_head(&tp->wait_q); -+ atomic_set(&tp->refcnt, 1); -+} -+ -+static void tpm_private_put(void) -+{ -+ if (!atomic_dec_and_test(&my_priv->refcnt)) -+ return; -+ -+ tpmif_free_tx_buffers(my_priv); -+ kfree(my_priv); -+ my_priv = NULL; -+} -+ -+static struct tpm_private *tpm_private_get(void) -+{ -+ int err; -+ -+ if (my_priv) { -+ atomic_inc(&my_priv->refcnt); -+ return my_priv; -+ } -+ -+ my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL); -+ if (!my_priv) -+ return NULL; -+ -+ tpm_private_init(my_priv); -+ err = tpmif_allocate_tx_buffers(my_priv); -+ if (err < 0) -+ tpm_private_put(); -+ -+ return my_priv; -+} -+ -+/************************************************************** -+ -+ The interface to let the tpm plugin register its callback -+ function and send data to another partition using this module -+ -+**************************************************************/ -+ -+static DEFINE_MUTEX(suspend_lock); -+/* -+ * Send data via this module by calling this function -+ */ -+int vtpm_vd_send(struct tpm_private *tp, -+ const u8 * buf, size_t count, void *ptr) -+{ -+ int sent; -+ -+ mutex_lock(&suspend_lock); -+ sent = tpm_xmit(tp, buf, count, 0, ptr); -+ mutex_unlock(&suspend_lock); -+ -+ return sent; -+} -+ -+/************************************************************** -+ XENBUS support code -+**************************************************************/ -+ -+static int setup_tpmring(struct xenbus_device *dev, -+ struct tpm_private *tp) -+{ -+ tpmif_tx_interface_t *sring; -+ int err; -+ -+ tp->ring_ref = GRANT_INVALID_REF; -+ -+ sring = (void *)__get_free_page(GFP_KERNEL); -+ if (!sring) { -+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); -+ return -ENOMEM; -+ } -+ tp->tx = sring; -+ -+ err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx)); -+ if (err < 0) { -+ free_page((unsigned long)sring); -+ tp->tx = NULL; -+ xenbus_dev_fatal(dev, err, "allocating grant reference"); -+ goto fail; -+ } -+ tp->ring_ref = err; -+ -+ err = tpmif_connect(dev, tp, dev->otherend_id); -+ if (err) -+ goto fail; -+ -+ return 0; -+fail: -+ destroy_tpmring(tp); -+ return err; -+} -+ -+ -+static void destroy_tpmring(struct tpm_private *tp) -+{ -+ tpmif_set_connected_state(tp, 0); -+ -+ if (tp->ring_ref != GRANT_INVALID_REF) { -+ gnttab_end_foreign_access(tp->ring_ref, (unsigned long)tp->tx); -+ tp->ring_ref = GRANT_INVALID_REF; -+ tp->tx = NULL; -+ } -+ -+ if (tp->irq) -+ unbind_from_irqhandler(tp->irq, tp); -+ -+ tp->irq = 0; -+} -+ -+ -+static int talk_to_backend(struct xenbus_device *dev, -+ struct tpm_private *tp) -+{ -+ const char *message = NULL; -+ int err; -+ struct xenbus_transaction xbt; -+ -+ err = setup_tpmring(dev, tp); -+ if (err) { -+ xenbus_dev_fatal(dev, err, "setting up ring"); -+ goto out; -+ } -+ -+again: -+ err = xenbus_transaction_start(&xbt); -+ if (err) { -+ xenbus_dev_fatal(dev, err, "starting transaction"); -+ goto destroy_tpmring; -+ } -+ -+ err = xenbus_printf(xbt, dev->nodename, -+ "ring-ref","%u", tp->ring_ref); -+ if (err) { -+ message = "writing ring-ref"; -+ goto abort_transaction; -+ } -+ -+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", -+ irq_to_evtchn_port(tp->irq)); -+ if (err) { -+ message = "writing event-channel"; -+ goto abort_transaction; -+ } -+ -+ err = xenbus_transaction_end(xbt, 0); -+ if (err == -EAGAIN) -+ goto again; -+ if (err) { -+ xenbus_dev_fatal(dev, err, "completing transaction"); -+ goto destroy_tpmring; -+ } -+ -+ xenbus_switch_state(dev, XenbusStateConnected); -+ -+ return 0; -+ -+abort_transaction: -+ xenbus_transaction_end(xbt, 1); -+ if (message) -+ xenbus_dev_error(dev, err, "%s", message); -+destroy_tpmring: -+ destroy_tpmring(tp); -+out: -+ return err; -+} -+ -+/** -+ * Callback received when the backend's state changes. -+ */ -+static void backend_changed(struct xenbus_device *dev, -+ enum xenbus_state backend_state) -+{ -+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev); -+ DPRINTK("\n"); -+ -+ switch (backend_state) { -+ case XenbusStateInitialising: -+ case XenbusStateInitWait: -+ case XenbusStateInitialised: -+ case XenbusStateReconfiguring: -+ case XenbusStateReconfigured: -+ case XenbusStateUnknown: -+ break; -+ -+ case XenbusStateConnected: -+ tpmif_set_connected_state(tp, 1); -+ break; -+ -+ case XenbusStateClosing: -+ tpmif_set_connected_state(tp, 0); -+ xenbus_frontend_closed(dev); -+ break; -+ -+ case XenbusStateClosed: -+ tpmif_set_connected_state(tp, 0); -+ if (tp->is_suspended == 0) -+ device_unregister(&dev->dev); -+ xenbus_frontend_closed(dev); -+ break; -+ } -+} -+ -+static int tpmfront_probe(struct xenbus_device *dev, -+ const struct xenbus_device_id *id) -+{ -+ int err; -+ int handle; -+ struct tpm_private *tp = tpm_private_get(); -+ -+ if (!tp) -+ return -ENOMEM; -+ -+ tp->chip = init_vtpm(&dev->dev, tp); -+ if (IS_ERR(tp->chip)) -+ return PTR_ERR(tp->chip); -+ -+ err = xenbus_scanf(XBT_NIL, dev->nodename, -+ "handle", "%i", &handle); -+ if (XENBUS_EXIST_ERR(err)) -+ return err; -+ -+ if (err < 0) { -+ xenbus_dev_fatal(dev,err,"reading virtual-device"); -+ return err; -+ } -+ -+ tp->dev = dev; -+ -+ err = talk_to_backend(dev, tp); -+ if (err) { -+ tpm_private_put(); -+ return err; -+ } -+ -+ return 0; -+} -+ -+ -+static int tpmfront_remove(struct xenbus_device *dev) -+{ -+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev); -+ destroy_tpmring(tp); -+ cleanup_vtpm(&dev->dev); -+ return 0; -+} -+ -+static int tpmfront_suspend(struct xenbus_device *dev) -+{ -+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev); -+ u32 ctr; -+ -+ /* Take the lock, preventing any application from sending. */ -+ mutex_lock(&suspend_lock); -+ tp->is_suspended = 1; -+ -+ for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) { -+ if ((ctr % 10) == 0) -+ printk("TPM-FE [INFO]: Waiting for outstanding " -+ "request.\n"); -+ /* Wait for a request to be responded to. */ -+ interruptible_sleep_on_timeout(&tp->wait_q, 100); -+ } -+ -+ return 0; -+} -+ -+static int tpmfront_suspend_finish(struct tpm_private *tp) -+{ -+ tp->is_suspended = 0; -+ /* Allow applications to send again. */ -+ mutex_unlock(&suspend_lock); -+ return 0; -+} -+ -+static int tpmfront_suspend_cancel(struct xenbus_device *dev) -+{ -+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev); -+ return tpmfront_suspend_finish(tp); -+} -+ -+static int tpmfront_resume(struct xenbus_device *dev) -+{ -+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev); -+ destroy_tpmring(tp); -+ return talk_to_backend(dev, tp); -+} -+ -+static int tpmif_connect(struct xenbus_device *dev, -+ struct tpm_private *tp, -+ domid_t domid) -+{ -+ int err; -+ -+ tp->backend_id = domid; -+ -+ err = bind_listening_port_to_irqhandler( -+ domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp); -+ if (err <= 0) { -+ WPRINTK("bind_listening_port_to_irqhandler failed " -+ "(err=%d)\n", err); -+ return err; -+ } -+ tp->irq = err; -+ -+ return 0; -+} -+ -+static struct xenbus_device_id tpmfront_ids[] = { -+ { "vtpm" }, -+ { "" } -+}; -+ -+static struct xenbus_driver tpmfront = { -+ .name = "vtpm", -+ .owner = THIS_MODULE, -+ .ids = tpmfront_ids, -+ .probe = tpmfront_probe, -+ .remove = tpmfront_remove, -+ .resume = tpmfront_resume, -+ .otherend_changed = backend_changed, -+ .suspend = tpmfront_suspend, -+ .suspend_cancel = tpmfront_suspend_cancel, -+}; -+ -+static void __init init_tpm_xenbus(void) -+{ -+ xenbus_register_frontend(&tpmfront); -+} -+ -+static int tpmif_allocate_tx_buffers(struct tpm_private *tp) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < TPMIF_TX_RING_SIZE; i++) { -+ tp->tx_buffers[i] = tx_buffer_alloc(); -+ if (!tp->tx_buffers[i]) { -+ tpmif_free_tx_buffers(tp); -+ return -ENOMEM; -+ } -+ } -+ return 0; -+} -+ -+static void tpmif_free_tx_buffers(struct tpm_private *tp) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < TPMIF_TX_RING_SIZE; i++) -+ tx_buffer_free(tp->tx_buffers[i]); -+} -+ -+static void tpmif_rx_action(unsigned long priv) -+{ -+ struct tpm_private *tp = (struct tpm_private *)priv; -+ int i = 0; -+ unsigned int received; -+ unsigned int offset = 0; -+ u8 *buffer; -+ tpmif_tx_request_t *tx = &tp->tx->ring[i].req; -+ -+ atomic_set(&tp->tx_busy, 0); -+ wake_up_interruptible(&tp->wait_q); -+ -+ received = tx->size; -+ -+ buffer = kmalloc(received, GFP_ATOMIC); -+ if (!buffer) -+ return; -+ -+ for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) { -+ struct tx_buffer *txb = tp->tx_buffers[i]; -+ tpmif_tx_request_t *tx; -+ unsigned int tocopy; -+ -+ tx = &tp->tx->ring[i].req; -+ tocopy = tx->size; -+ if (tocopy > PAGE_SIZE) -+ tocopy = PAGE_SIZE; -+ -+ memcpy(&buffer[offset], txb->data, tocopy); -+ -+ gnttab_release_grant_reference(&gref_head, tx->ref); -+ -+ offset += tocopy; -+ } -+ -+ vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember); -+ kfree(buffer); -+} -+ -+ -+static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs) -+{ -+ struct tpm_private *tp = tpm_priv; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&tp->tx_lock, flags); -+ tpmif_rx_tasklet.data = (unsigned long)tp; -+ tasklet_schedule(&tpmif_rx_tasklet); -+ spin_unlock_irqrestore(&tp->tx_lock, flags); -+ -+ return IRQ_HANDLED; -+} -+ -+ -+static int tpm_xmit(struct tpm_private *tp, -+ const u8 * buf, size_t count, int isuserbuffer, -+ void *remember) -+{ -+ tpmif_tx_request_t *tx; -+ TPMIF_RING_IDX i; -+ unsigned int offset = 0; -+ -+ spin_lock_irq(&tp->tx_lock); -+ -+ if (unlikely(atomic_read(&tp->tx_busy))) { -+ printk("tpm_xmit: There's an outstanding request/response " -+ "on the way!\n"); -+ spin_unlock_irq(&tp->tx_lock); -+ return -EBUSY; -+ } -+ -+ if (tp->is_connected != 1) { -+ spin_unlock_irq(&tp->tx_lock); -+ return -EIO; -+ } -+ -+ for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) { -+ struct tx_buffer *txb = tp->tx_buffers[i]; -+ int copied; -+ -+ if (!txb) { -+ DPRINTK("txb (i=%d) is NULL. buffers initilized?\n" -+ "Not transmitting anything!\n", i); -+ spin_unlock_irq(&tp->tx_lock); -+ return -EFAULT; -+ } -+ -+ copied = tx_buffer_copy(txb, &buf[offset], count, -+ isuserbuffer); -+ if (copied < 0) { -+ /* An error occurred */ -+ spin_unlock_irq(&tp->tx_lock); -+ return copied; -+ } -+ count -= copied; -+ offset += copied; -+ -+ tx = &tp->tx->ring[i].req; -+ tx->addr = virt_to_machine(txb->data); -+ tx->size = txb->len; -+ tx->unused = 0; -+ -+ DPRINTK("First 4 characters sent by TPM-FE are " -+ "0x%02x 0x%02x 0x%02x 0x%02x\n", -+ txb->data[0],txb->data[1],txb->data[2],txb->data[3]); -+ -+ /* Get the granttable reference for this page. */ -+ tx->ref = gnttab_claim_grant_reference(&gref_head); -+ if (tx->ref == -ENOSPC) { -+ spin_unlock_irq(&tp->tx_lock); -+ DPRINTK("Grant table claim reference failed in " -+ "func:%s line:%d file:%s\n", -+ __FUNCTION__, __LINE__, __FILE__); -+ return -ENOSPC; -+ } -+ gnttab_grant_foreign_access_ref(tx->ref, -+ tp->backend_id, -+ virt_to_mfn(txb->data), -+ 0 /*RW*/); -+ wmb(); -+ } -+ -+ atomic_set(&tp->tx_busy, 1); -+ tp->tx_remember = remember; -+ -+ mb(); -+ -+ notify_remote_via_irq(tp->irq); -+ -+ spin_unlock_irq(&tp->tx_lock); -+ return offset; -+} -+ -+ -+static void tpmif_notify_upperlayer(struct tpm_private *tp) -+{ -+ /* Notify upper layer about the state of the connection to the BE. */ -+ vtpm_vd_status(tp->chip, (tp->is_connected -+ ? TPM_VD_STATUS_CONNECTED -+ : TPM_VD_STATUS_DISCONNECTED)); -+} -+ -+ -+static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected) -+{ -+ /* -+ * Don't notify upper layer if we are in suspend mode and -+ * should disconnect - assumption is that we will resume -+ * The mutex keeps apps from sending. -+ */ -+ if (is_connected == 0 && tp->is_suspended == 1) -+ return; -+ -+ /* -+ * Unlock the mutex if we are connected again -+ * after being suspended - now resuming. -+ * This also removes the suspend state. -+ */ -+ if (is_connected == 1 && tp->is_suspended == 1) -+ tpmfront_suspend_finish(tp); -+ -+ if (is_connected != tp->is_connected) { -+ tp->is_connected = is_connected; -+ tpmif_notify_upperlayer(tp); -+ } -+} -+ -+ -+ -+/* ================================================================= -+ * Initialization function. -+ * ================================================================= -+ */ -+ -+ -+static int __init tpmif_init(void) -+{ -+ struct tpm_private *tp; -+ -+ if (is_initial_xendomain()) -+ return -EPERM; -+ -+ tp = tpm_private_get(); -+ if (!tp) -+ return -ENOMEM; -+ -+ IPRINTK("Initialising the vTPM driver.\n"); -+ if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE, -+ &gref_head) < 0) { -+ tpm_private_put(); -+ return -EFAULT; -+ } -+ -+ init_tpm_xenbus(); -+ return 0; -+} -+ -+ -+module_init(tpmif_init); -+ -+MODULE_LICENSE("Dual BSD/GPL"); ---- head-2011-03-11.orig/drivers/edac/edac_mc.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/edac/edac_mc.c 2011-01-31 14:53:38.000000000 +0100 -@@ -615,6 +615,10 @@ static void edac_mc_scrub_block(unsigned - - debugf3("%s()\n", __func__); - -+#ifdef CONFIG_XEN -+ page = mfn_to_local_pfn(page); -+#endif -+ - /* ECC error page was not in our memory. Ignore it. */ - if (!pfn_valid(page)) - return; ---- head-2011-03-11.orig/drivers/firmware/dell_rbu.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/firmware/dell_rbu.c 2011-01-31 14:53:38.000000000 +0100 -@@ -170,9 +170,28 @@ static int create_packet(void *data, siz - spin_lock(&rbu_data.lock); - goto out_alloc_packet_array; - } -+#ifdef CONFIG_XEN -+ if (ordernum && xen_create_contiguous_region( -+ (unsigned long)packet_data_temp_buf, ordernum, 0)) { -+ free_pages((unsigned long)packet_data_temp_buf, -+ ordernum); -+ printk(KERN_WARNING -+ "dell_rbu:%s: failed to adjust new " -+ "packet\n", __func__); -+ retval = -ENOMEM; -+ spin_lock(&rbu_data.lock); -+ goto out_alloc_packet_array; -+ } -+#endif - -- if ((unsigned long)virt_to_phys(packet_data_temp_buf) -+ if ((unsigned long)virt_to_bus(packet_data_temp_buf) - < allocation_floor) { -+#ifdef CONFIG_XEN -+ if (ordernum) -+ xen_destroy_contiguous_region( -+ (unsigned long)packet_data_temp_buf, -+ ordernum); -+#endif - pr_debug("packet 0x%lx below floor at 0x%lx.\n", - (unsigned long)virt_to_phys( - packet_data_temp_buf), -@@ -186,7 +205,7 @@ static int create_packet(void *data, siz - newpacket->data = packet_data_temp_buf; - - pr_debug("create_packet: newpacket at physical addr %lx\n", -- (unsigned long)virt_to_phys(newpacket->data)); -+ (unsigned long)virt_to_bus(newpacket->data)); - - /* packets may not have fixed size */ - newpacket->length = length; -@@ -205,7 +224,7 @@ out_alloc_packet_array: - /* always free packet array */ - for (;idx>0;idx--) { - pr_debug("freeing unused packet below floor 0x%lx.\n", -- (unsigned long)virt_to_phys( -+ (unsigned long)virt_to_bus( - invalid_addr_packet_array[idx-1])); - free_pages((unsigned long)invalid_addr_packet_array[idx-1], - ordernum); -@@ -349,6 +368,13 @@ static void packet_empty_list(void) - * to make sure there are no stale RBU packets left in memory - */ - memset(newpacket->data, 0, rbu_data.packetsize); -+#ifdef CONFIG_XEN -+ if (newpacket->ordernum) -+ xen_destroy_contiguous_region( -+ (unsigned long)newpacket->data, -+ newpacket->ordernum); -+#endif -+ - free_pages((unsigned long) newpacket->data, - newpacket->ordernum); - kfree(newpacket); -@@ -403,7 +429,9 @@ static int img_update_realloc(unsigned l - { - unsigned char *image_update_buffer = NULL; - unsigned long rc; -+#ifndef CONFIG_XEN - unsigned long img_buf_phys_addr; -+#endif - int ordernum; - int dma_alloc = 0; - -@@ -434,15 +462,19 @@ static int img_update_realloc(unsigned l - - spin_unlock(&rbu_data.lock); - -+#ifndef CONFIG_XEN - ordernum = get_order(size); - image_update_buffer = - (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum); - - img_buf_phys_addr = -- (unsigned long) virt_to_phys(image_update_buffer); -+ (unsigned long) virt_to_bus(image_update_buffer); - - if (img_buf_phys_addr > BIOS_SCAN_LIMIT) { - free_pages((unsigned long) image_update_buffer, ordernum); -+#else -+ { -+#endif - ordernum = -1; - image_update_buffer = dma_alloc_coherent(NULL, size, - &dell_rbu_dmaaddr, GFP_KERNEL); -@@ -695,6 +727,12 @@ static struct bin_attribute rbu_packet_s - static int __init dcdrbu_init(void) - { - int rc; -+ -+#ifdef CONFIG_XEN -+ if (!is_initial_xendomain()) -+ return -ENODEV; -+#endif -+ - spin_lock_init(&rbu_data.lock); - - init_packet_head(); ---- head-2011-03-11.orig/drivers/ide/ide-lib.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/ide/ide-lib.c 2011-01-31 14:53:38.000000000 +0100 -@@ -18,12 +18,12 @@ void ide_toggle_bounce(ide_drive_t *driv - { - u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ - -- if (!PCI_DMA_BUS_IS_PHYS) { -- addr = BLK_BOUNCE_ANY; -- } else if (on && drive->media == ide_disk) { -+ if (on && drive->media == ide_disk) { - struct device *dev = drive->hwif->dev; - -- if (dev && dev->dma_mask) -+ if (!PCI_DMA_BUS_IS_PHYS) -+ addr = BLK_BOUNCE_ANY; -+ else if (dev && dev->dma_mask) - addr = *dev->dma_mask; - } - ---- head-2011-03-11.orig/drivers/oprofile/buffer_sync.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/oprofile/buffer_sync.c 2011-01-31 14:53:38.000000000 +0100 -@@ -8,6 +8,10 @@ - * @author Barry Kasindorf - * @author Robert Richter - * -+ * Modified by Aravind Menon for Xen -+ * These modifications are: -+ * Copyright (C) 2005 Hewlett-Packard Co. -+ * - * This is the core of the buffer management. Each - * CPU buffer is processed and entered into the - * global event buffer. Such processing is necessary -@@ -43,6 +47,8 @@ static cpumask_var_t marked_cpus; - static DEFINE_SPINLOCK(task_mortuary); - static void process_task_mortuary(void); - -+static int cpu_current_domain[NR_CPUS]; -+ - /* Take ownership of the task struct and place it on the - * list for processing. Only after two full buffer syncs - * does the task eventually get freed, because by then -@@ -61,7 +67,6 @@ task_free_notify(struct notifier_block * - return NOTIFY_OK; - } - -- - /* The task is on its way out. A sync of the buffer means we can catch - * any remaining samples for this task. - */ -@@ -144,6 +149,11 @@ static struct notifier_block module_load - int sync_start(void) - { - int err; -+ int i; -+ -+ for (i = 0; i < NR_CPUS; i++) { -+ cpu_current_domain[i] = COORDINATOR_DOMAIN; -+ } - - if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) - return -ENOMEM; -@@ -286,13 +296,29 @@ static void add_cpu_switch(int i) - last_cookie = INVALID_COOKIE; - } - --static void add_kernel_ctx_switch(unsigned int in_kernel) -+static void add_cpu_mode_switch(unsigned int cpu_mode) - { - add_event_entry(ESCAPE_CODE); -- if (in_kernel) -+ switch (cpu_mode) { -+ case CPU_MODE_USER: -+ add_event_entry(USER_ENTER_SWITCH_CODE); -+ break; -+ case CPU_MODE_KERNEL: - add_event_entry(KERNEL_ENTER_SWITCH_CODE); -- else -- add_event_entry(KERNEL_EXIT_SWITCH_CODE); -+ break; -+ case CPU_MODE_XEN: -+ add_event_entry(XEN_ENTER_SWITCH_CODE); -+ break; -+ default: -+ break; -+ } -+} -+ -+static void add_domain_switch(unsigned long domain_id) -+{ -+ add_event_entry(ESCAPE_CODE); -+ add_event_entry(DOMAIN_SWITCH_CODE); -+ add_event_entry(domain_id); - } - - static void -@@ -373,12 +399,12 @@ static inline void add_sample_entry(unsi - * for later lookup from userspace. Return 0 on failure. - */ - static int --add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) -+add_sample(struct mm_struct *mm, struct op_sample *s, int cpu_mode) - { - unsigned long cookie; - off_t offset; - -- if (in_kernel) { -+ if (cpu_mode >= CPU_MODE_KERNEL) { - add_sample_entry(s->eip, s->event); - return 1; - } -@@ -503,9 +529,10 @@ void sync_buffer(int cpu) - unsigned long val; - struct task_struct *new; - unsigned long cookie = 0; -- int in_kernel = 1; -+ int cpu_mode = CPU_MODE_KERNEL; - sync_buffer_state state = sb_buffer_start; - unsigned int i; -+ int domain_switch = 0; - unsigned long available; - unsigned long flags; - struct op_entry entry; -@@ -515,6 +542,11 @@ void sync_buffer(int cpu) - - add_cpu_switch(cpu); - -+ /* We need to assign the first samples in this CPU buffer to the -+ same domain that we were processing at the last sync_buffer */ -+ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) -+ add_domain_switch(cpu_current_domain[cpu]); -+ - op_cpu_buffer_reset(cpu); - available = op_cpu_buffer_entries(cpu); - -@@ -523,6 +555,13 @@ void sync_buffer(int cpu) - if (!sample) - break; - -+ if (domain_switch) { -+ cpu_current_domain[cpu] = sample->eip; -+ add_domain_switch(sample->eip); -+ domain_switch = 0; -+ continue; -+ } -+ - if (is_code(sample->eip)) { - flags = sample->event; - if (flags & TRACE_BEGIN) { -@@ -531,10 +570,10 @@ void sync_buffer(int cpu) - } - if (flags & KERNEL_CTX_SWITCH) { - /* kernel/userspace switch */ -- in_kernel = flags & IS_KERNEL; -+ cpu_mode = flags & CPU_MODE_MASK; - if (state == sb_buffer_start) - state = sb_sample_start; -- add_kernel_ctx_switch(flags & IS_KERNEL); -+ add_cpu_mode_switch(cpu_mode); - } - if (flags & USER_CTX_SWITCH - && op_cpu_buffer_get_data(&entry, &val)) { -@@ -547,16 +586,23 @@ void sync_buffer(int cpu) - cookie = get_exec_dcookie(mm); - add_user_ctx_switch(new, cookie); - } -+ if (flags & DOMAIN_SWITCH) -+ domain_switch = 1; - if (op_cpu_buffer_get_size(&entry)) - add_data(&entry, mm); - continue; - } - -+ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) { -+ add_sample_entry(sample->eip, sample->event); -+ continue; -+ } -+ - if (state < sb_bt_start) - /* ignore sample */ - continue; - -- if (add_sample(mm, sample, in_kernel)) -+ if (add_sample(mm, sample, cpu_mode)) - continue; - - /* ignore backtraces if failed to add a sample */ -@@ -567,6 +613,10 @@ void sync_buffer(int cpu) - } - release_mm(mm); - -+ /* We reset domain to COORDINATOR at each CPU switch */ -+ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) -+ add_domain_switch(COORDINATOR_DOMAIN); -+ - mark_done(cpu); - - mutex_unlock(&buffer_mutex); ---- head-2011-03-11.orig/drivers/oprofile/cpu_buffer.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/oprofile/cpu_buffer.c 2011-01-31 14:53:38.000000000 +0100 -@@ -8,6 +8,10 @@ - * @author Barry Kasindorf - * @author Robert Richter - * -+ * Modified by Aravind Menon for Xen -+ * These modifications are: -+ * Copyright (C) 2005 Hewlett-Packard Co. -+ * - * Each CPU has a local buffer that stores PC value/event - * pairs. We also log context switches when we notice them. - * Eventually each CPU's buffer is processed into the global -@@ -38,6 +42,8 @@ static void wq_sync_buffer(struct work_s - #define DEFAULT_TIMER_EXPIRE (HZ / 10) - static int work_enabled; - -+static int32_t current_domain = COORDINATOR_DOMAIN; -+ - unsigned long oprofile_get_cpu_buffer_size(void) - { - return oprofile_cpu_buffer_size; -@@ -75,7 +81,7 @@ int alloc_cpu_buffers(void) - struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); - - b->last_task = NULL; -- b->last_is_kernel = -1; -+ b->last_cpu_mode = -1; - b->tracing = 0; - b->buffer_size = buffer_size; - b->sample_received = 0; -@@ -180,7 +186,7 @@ unsigned long op_cpu_buffer_entries(int - - static int - op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, -- int is_kernel, struct task_struct *task) -+ int cpu_mode, struct task_struct *task) - { - struct op_entry entry; - struct op_sample *sample; -@@ -193,16 +199,15 @@ op_add_code(struct oprofile_cpu_buffer * - flags |= TRACE_BEGIN; - - /* notice a switch from user->kernel or vice versa */ -- is_kernel = !!is_kernel; -- if (cpu_buf->last_is_kernel != is_kernel) { -- cpu_buf->last_is_kernel = is_kernel; -- flags |= KERNEL_CTX_SWITCH; -- if (is_kernel) -- flags |= IS_KERNEL; -+ if (cpu_buf->last_cpu_mode != cpu_mode) { -+ cpu_buf->last_cpu_mode = cpu_mode; -+ flags |= KERNEL_CTX_SWITCH | cpu_mode; - } - - /* notice a task switch */ -- if (cpu_buf->last_task != task) { -+ /* if not processing other domain samples */ -+ if (cpu_buf->last_task != task && -+ current_domain == COORDINATOR_DOMAIN) { - cpu_buf->last_task = task; - flags |= USER_CTX_SWITCH; - } -@@ -251,14 +256,14 @@ op_add_sample(struct oprofile_cpu_buffer - /* - * This must be safe from any context. - * -- * is_kernel is needed because on some architectures you cannot -+ * cpu_mode is needed because on some architectures you cannot - * tell if you are in kernel or user space simply by looking at -- * pc. We tag this in the buffer by generating kernel enter/exit -- * events whenever is_kernel changes -+ * pc. We tag this in the buffer by generating kernel/user (and -+ * xen) enter events whenever cpu_mode changes - */ - static int - log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, -- unsigned long backtrace, int is_kernel, unsigned long event) -+ unsigned long backtrace, int cpu_mode, unsigned long event) - { - cpu_buf->sample_received++; - -@@ -267,7 +272,7 @@ log_sample(struct oprofile_cpu_buffer *c - return 0; - } - -- if (op_add_code(cpu_buf, backtrace, is_kernel, current)) -+ if (op_add_code(cpu_buf, backtrace, cpu_mode, current)) - goto fail; - - if (op_add_sample(cpu_buf, pc, event)) -@@ -430,6 +435,25 @@ fail: - return; - } - -+int oprofile_add_domain_switch(int32_t domain_id) -+{ -+ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; -+ -+ /* should have space for switching into and out of domain -+ (2 slots each) plus one sample and one cpu mode switch */ -+ if (((nr_available_slots(cpu_buf) < 6) && -+ (domain_id != COORDINATOR_DOMAIN)) || -+ (nr_available_slots(cpu_buf) < 2)) -+ return 0; -+ -+ add_code(cpu_buf, DOMAIN_SWITCH); -+ add_sample(cpu_buf, domain_id, 0); -+ -+ current_domain = domain_id; -+ -+ return 1; -+} -+ - /* - * This serves to avoid cpu buffer overflow, and makes sure - * the task mortuary progresses ---- head-2011-03-11.orig/drivers/oprofile/cpu_buffer.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/oprofile/cpu_buffer.h 2011-01-31 14:53:38.000000000 +0100 -@@ -41,7 +41,7 @@ struct op_entry; - struct oprofile_cpu_buffer { - unsigned long buffer_size; - struct task_struct *last_task; -- int last_is_kernel; -+ int last_cpu_mode; - int tracing; - unsigned long sample_received; - unsigned long sample_lost_overflow; -@@ -63,7 +63,7 @@ static inline void op_cpu_buffer_reset(i - { - struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); - -- cpu_buf->last_is_kernel = -1; -+ cpu_buf->last_cpu_mode = -1; - cpu_buf->last_task = NULL; - } - -@@ -113,9 +113,13 @@ int op_cpu_buffer_get_data(struct op_ent - } - - /* extra data flags */ --#define KERNEL_CTX_SWITCH (1UL << 0) --#define IS_KERNEL (1UL << 1) -+#define CPU_MODE_USER 0 -+#define CPU_MODE_KERNEL 1 -+#define CPU_MODE_XEN 2 -+#define CPU_MODE_MASK 3 - #define TRACE_BEGIN (1UL << 2) - #define USER_CTX_SWITCH (1UL << 3) -+#define KERNEL_CTX_SWITCH (1UL << 4) -+#define DOMAIN_SWITCH (1UL << 5) - - #endif /* OPROFILE_CPU_BUFFER_H */ ---- head-2011-03-11.orig/drivers/oprofile/event_buffer.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/oprofile/event_buffer.h 2011-01-31 14:53:38.000000000 +0100 -@@ -30,6 +30,9 @@ void wake_up_buffer_waiter(void); - #define INVALID_COOKIE ~0UL - #define NO_COOKIE 0UL - -+/* Constant used to refer to coordinator domain (Xen) */ -+#define COORDINATOR_DOMAIN -1 -+ - extern const struct file_operations event_buffer_fops; - - /* mutex between sync_cpu_buffers() and the ---- head-2011-03-11.orig/drivers/oprofile/oprof.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/oprofile/oprof.c 2011-01-31 14:53:38.000000000 +0100 -@@ -5,6 +5,10 @@ - * @remark Read the file COPYING - * - * @author John Levon -+ * -+ * Modified by Aravind Menon for Xen -+ * These modifications are: -+ * Copyright (C) 2005 Hewlett-Packard Co. - */ - - #include -@@ -35,6 +39,32 @@ static DEFINE_MUTEX(start_mutex); - */ - static int timer = 0; - -+int oprofile_set_active(int active_domains[], unsigned int adomains) -+{ -+ int err; -+ -+ if (!oprofile_ops.set_active) -+ return -EINVAL; -+ -+ mutex_lock(&start_mutex); -+ err = oprofile_ops.set_active(active_domains, adomains); -+ mutex_unlock(&start_mutex); -+ return err; -+} -+ -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains) -+{ -+ int err; -+ -+ if (!oprofile_ops.set_passive) -+ return -EINVAL; -+ -+ mutex_lock(&start_mutex); -+ err = oprofile_ops.set_passive(passive_domains, pdomains); -+ mutex_unlock(&start_mutex); -+ return err; -+} -+ - int oprofile_setup(void) - { - int err; ---- head-2011-03-11.orig/drivers/oprofile/oprof.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/oprofile/oprof.h 2011-01-31 14:53:38.000000000 +0100 -@@ -40,4 +40,7 @@ void oprofile_timer_exit(void); - int oprofile_set_ulong(unsigned long *addr, unsigned long val); - int oprofile_set_timeout(unsigned long time); - -+int oprofile_set_active(int active_domains[], unsigned int adomains); -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains); -+ - #endif /* OPROF_H */ ---- head-2011-03-11.orig/drivers/oprofile/oprofile_files.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/drivers/oprofile/oprofile_files.c 2011-01-31 14:53:38.000000000 +0100 -@@ -5,11 +5,17 @@ - * @remark Read the file COPYING - * - * @author John Levon -+ * -+ * Modified by Aravind Menon for Xen -+ * These modifications are: -+ * Copyright (C) 2005 Hewlett-Packard Co. - */ - - #include - #include - #include -+#include -+#include - - #include "event_buffer.h" - #include "oprofile_stats.h" -@@ -174,6 +180,195 @@ static const struct file_operations dump - .llseek = noop_llseek, - }; - -+#define TMPBUFSIZE 512 -+ -+static unsigned int adomains = 0; -+static int active_domains[MAX_OPROF_DOMAINS + 1]; -+static DEFINE_MUTEX(adom_mutex); -+ -+static ssize_t adomain_write(struct file * file, char const __user * buf, -+ size_t count, loff_t * offset) -+{ -+ char *tmpbuf; -+ char *startp, *endp; -+ int i; -+ unsigned long val; -+ ssize_t retval = count; -+ -+ if (*offset) -+ return -EINVAL; -+ if (count > TMPBUFSIZE - 1) -+ return -EINVAL; -+ -+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL))) -+ return -ENOMEM; -+ -+ if (copy_from_user(tmpbuf, buf, count)) { -+ kfree(tmpbuf); -+ return -EFAULT; -+ } -+ tmpbuf[count] = 0; -+ -+ mutex_lock(&adom_mutex); -+ -+ startp = tmpbuf; -+ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */ -+ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) { -+ val = simple_strtoul(startp, &endp, 0); -+ if (endp == startp) -+ break; -+ while (ispunct(*endp) || isspace(*endp)) -+ endp++; -+ active_domains[i] = val; -+ if (active_domains[i] != val) -+ /* Overflow, force error below */ -+ i = MAX_OPROF_DOMAINS + 1; -+ startp = endp; -+ } -+ /* Force error on trailing junk */ -+ adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i; -+ -+ kfree(tmpbuf); -+ -+ if (adomains > MAX_OPROF_DOMAINS -+ || oprofile_set_active(active_domains, adomains)) { -+ adomains = 0; -+ retval = -EINVAL; -+ } -+ -+ mutex_unlock(&adom_mutex); -+ return retval; -+} -+ -+static ssize_t adomain_read(struct file * file, char __user * buf, -+ size_t count, loff_t * offset) -+{ -+ char * tmpbuf; -+ size_t len; -+ int i; -+ ssize_t retval; -+ -+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL))) -+ return -ENOMEM; -+ -+ mutex_lock(&adom_mutex); -+ -+ len = 0; -+ for (i = 0; i < adomains; i++) -+ len += snprintf(tmpbuf + len, -+ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0, -+ "%u ", active_domains[i]); -+ WARN_ON(len > TMPBUFSIZE); -+ if (len != 0 && len <= TMPBUFSIZE) -+ tmpbuf[len-1] = '\n'; -+ -+ mutex_unlock(&adom_mutex); -+ -+ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len); -+ -+ kfree(tmpbuf); -+ return retval; -+} -+ -+ -+static const struct file_operations active_domain_ops = { -+ .read = adomain_read, -+ .write = adomain_write, -+}; -+ -+static unsigned int pdomains = 0; -+static int passive_domains[MAX_OPROF_DOMAINS]; -+static DEFINE_MUTEX(pdom_mutex); -+ -+static ssize_t pdomain_write(struct file * file, char const __user * buf, -+ size_t count, loff_t * offset) -+{ -+ char *tmpbuf; -+ char *startp, *endp; -+ int i; -+ unsigned long val; -+ ssize_t retval = count; -+ -+ if (*offset) -+ return -EINVAL; -+ if (count > TMPBUFSIZE - 1) -+ return -EINVAL; -+ -+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL))) -+ return -ENOMEM; -+ -+ if (copy_from_user(tmpbuf, buf, count)) { -+ kfree(tmpbuf); -+ return -EFAULT; -+ } -+ tmpbuf[count] = 0; -+ -+ mutex_lock(&pdom_mutex); -+ -+ startp = tmpbuf; -+ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */ -+ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) { -+ val = simple_strtoul(startp, &endp, 0); -+ if (endp == startp) -+ break; -+ while (ispunct(*endp) || isspace(*endp)) -+ endp++; -+ passive_domains[i] = val; -+ if (passive_domains[i] != val) -+ /* Overflow, force error below */ -+ i = MAX_OPROF_DOMAINS + 1; -+ startp = endp; -+ } -+ /* Force error on trailing junk */ -+ pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i; -+ -+ kfree(tmpbuf); -+ -+ if (pdomains > MAX_OPROF_DOMAINS -+ || oprofile_set_passive(passive_domains, pdomains)) { -+ pdomains = 0; -+ retval = -EINVAL; -+ } -+ -+ mutex_unlock(&pdom_mutex); -+ return retval; -+} -+ -+static ssize_t pdomain_read(struct file * file, char __user * buf, -+ size_t count, loff_t * offset) -+{ -+ char * tmpbuf; -+ size_t len; -+ int i; -+ ssize_t retval; -+ -+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL))) -+ return -ENOMEM; -+ -+ mutex_lock(&pdom_mutex); -+ -+ len = 0; -+ for (i = 0; i < pdomains; i++) -+ len += snprintf(tmpbuf + len, -+ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0, -+ "%u ", passive_domains[i]); -+ WARN_ON(len > TMPBUFSIZE); -+ if (len != 0 && len <= TMPBUFSIZE) -+ tmpbuf[len-1] = '\n'; -+ -+ mutex_unlock(&pdom_mutex); -+ -+ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len); -+ -+ kfree(tmpbuf); -+ return retval; -+} -+ -+static const struct file_operations passive_domain_ops = { -+ .read = pdomain_read, -+ .write = pdomain_write, -+}; -+ - void oprofile_create_files(struct super_block *sb, struct dentry *root) - { - /* reinitialize default values */ -@@ -184,6 +379,8 @@ void oprofile_create_files(struct super_ - - oprofilefs_create_file(sb, root, "enable", &enable_fops); - oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); -+ oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops); -+ oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops); - oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); - oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size); - oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed); ---- head-2011-03-11.orig/fs/aio.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/fs/aio.c 2011-03-11 10:52:11.000000000 +0100 -@@ -41,6 +41,11 @@ - #include - #include - -+#ifdef CONFIG_EPOLL -+#include -+#include -+#endif -+ - #if DEBUG > 1 - #define dprintk printk - #else -@@ -1002,6 +1007,11 @@ put_rq: - if (waitqueue_active(&ctx->wait)) - wake_up(&ctx->wait); - -+#ifdef CONFIG_EPOLL -+ if (ctx->file && waitqueue_active(&ctx->poll_wait)) -+ wake_up(&ctx->poll_wait); -+#endif -+ - spin_unlock_irqrestore(&ctx->ctx_lock, flags); - return ret; - } -@@ -1010,6 +1020,8 @@ EXPORT_SYMBOL(aio_complete); - /* aio_read_evt - * Pull an event off of the ioctx's event ring. Returns the number of - * events fetched (0 or 1 ;-) -+ * If ent parameter is 0, just returns the number of events that would -+ * be fetched. - * FIXME: make this use cmpxchg. - * TODO: make the ringbuffer user mmap()able (requires FIXME). - */ -@@ -1032,13 +1044,18 @@ static int aio_read_evt(struct kioctx *i - - head = ring->head % info->nr; - if (head != ring->tail) { -- struct io_event *evp = aio_ring_event(info, head, KM_USER1); -- *ent = *evp; -- head = (head + 1) % info->nr; -- smp_mb(); /* finish reading the event before updatng the head */ -- ring->head = head; -- ret = 1; -- put_aio_ring_event(evp, KM_USER1); -+ if (ent) { /* event requested */ -+ struct io_event *evp = -+ aio_ring_event(info, head, KM_USER1); -+ *ent = *evp; -+ head = (head + 1) % info->nr; -+ /* finish reading the event before updatng the head */ -+ smp_mb(); -+ ring->head = head; -+ ret = 1; -+ put_aio_ring_event(evp, KM_USER1); -+ } else /* only need to know availability */ -+ ret = 1; - } - spin_unlock(&info->ring_lock); - -@@ -1223,6 +1240,13 @@ static void io_destroy(struct kioctx *io - - aio_cancel_all(ioctx); - wait_for_all_aios(ioctx); -+#ifdef CONFIG_EPOLL -+ /* forget the poll file, but it's up to the user to close it */ -+ if (ioctx->file) { -+ ioctx->file->private_data = 0; -+ ioctx->file = 0; -+ } -+#endif - - /* - * Wake up any waiters. The setting of ctx->dead must be seen -@@ -1233,6 +1257,67 @@ static void io_destroy(struct kioctx *io - put_ioctx(ioctx); /* once for the lookup */ - } - -+#ifdef CONFIG_EPOLL -+ -+static int aio_queue_fd_close(struct inode *inode, struct file *file) -+{ -+ struct kioctx *ioctx = file->private_data; -+ if (ioctx) { -+ file->private_data = 0; -+ spin_lock_irq(&ioctx->ctx_lock); -+ ioctx->file = 0; -+ spin_unlock_irq(&ioctx->ctx_lock); -+ } -+ return 0; -+} -+ -+static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait) -+{ unsigned int pollflags = 0; -+ struct kioctx *ioctx = file->private_data; -+ -+ if (ioctx) { -+ -+ spin_lock_irq(&ioctx->ctx_lock); -+ /* Insert inside our poll wait queue */ -+ poll_wait(file, &ioctx->poll_wait, wait); -+ -+ /* Check our condition */ -+ if (aio_read_evt(ioctx, 0)) -+ pollflags = POLLIN | POLLRDNORM; -+ spin_unlock_irq(&ioctx->ctx_lock); -+ } -+ -+ return pollflags; -+} -+ -+static const struct file_operations aioq_fops = { -+ .release = aio_queue_fd_close, -+ .poll = aio_queue_fd_poll -+}; -+ -+/* make_aio_fd: -+ * Create a file descriptor that can be used to poll the event queue. -+ * Based and piggybacked on the excellent epoll code. -+ */ -+ -+static int make_aio_fd(struct kioctx *ioctx) -+{ -+ int error, fd; -+ struct inode *inode; -+ struct file *file; -+ -+ error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops); -+ if (error) -+ return error; -+ -+ /* associate the file with the IO context */ -+ file->private_data = ioctx; -+ ioctx->file = file; -+ init_waitqueue_head(&ioctx->poll_wait); -+ return fd; -+} -+#endif -+ - /* sys_io_setup: - * Create an aio_context capable of receiving at least nr_events. - * ctxp must not point to an aio_context that already exists, and -@@ -1245,18 +1330,30 @@ static void io_destroy(struct kioctx *io - * resources are available. May fail with -EFAULT if an invalid - * pointer is passed for ctxp. Will fail with -ENOSYS if not - * implemented. -+ * -+ * To request a selectable fd, the user context has to be initialized -+ * to 1, instead of 0, and the return value is the fd. -+ * This keeps the system call compatible, since a non-zero value -+ * was not allowed so far. - */ - SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) - { - struct kioctx *ioctx = NULL; - unsigned long ctx; - long ret; -+ int make_fd = 0; - - ret = get_user(ctx, ctxp); - if (unlikely(ret)) - goto out; - - ret = -EINVAL; -+#ifdef CONFIG_EPOLL -+ if (ctx == 1) { -+ make_fd = 1; -+ ctx = 0; -+ } -+#endif - if (unlikely(ctx || nr_events == 0)) { - pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", - ctx, nr_events); -@@ -1267,8 +1364,12 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_e - ret = PTR_ERR(ioctx); - if (!IS_ERR(ioctx)) { - ret = put_user(ioctx->user_id, ctxp); -- if (!ret) -- return 0; -+#ifdef CONFIG_EPOLL -+ if (make_fd && ret >= 0) -+ ret = make_aio_fd(ioctx); -+#endif -+ if (ret >= 0) -+ return ret; - - get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ - io_destroy(ioctx); ---- head-2011-03-11.orig/fs/compat_ioctl.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/fs/compat_ioctl.c 2011-01-31 14:53:38.000000000 +0100 -@@ -114,6 +114,13 @@ - #include - #endif - -+#ifdef CONFIG_XEN -+#include -+#include -+#include -+#include -+#endif -+ - static int w_long(unsigned int fd, unsigned int cmd, - compat_ulong_t __user *argp) - { -@@ -1408,6 +1415,19 @@ IGNORE_IOCTL(FBIOGETCMAP32) - IGNORE_IOCTL(FBIOSCURSOR32) - IGNORE_IOCTL(FBIOGCURSOR32) - #endif -+ -+#ifdef CONFIG_XEN -+HANDLE_IOCTL(IOCTL_PRIVCMD_MMAP_32, privcmd_ioctl_32) -+HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_32, privcmd_ioctl_32) -+HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_V2_32, privcmd_ioctl_32) -+COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL) -+COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ) -+COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN) -+COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_UNBOUND_PORT) -+COMPATIBLE_IOCTL(IOCTL_EVTCHN_UNBIND) -+COMPATIBLE_IOCTL(IOCTL_EVTCHN_NOTIFY) -+COMPATIBLE_IOCTL(IOCTL_EVTCHN_RESET) -+#endif - }; - - /* ---- head-2011-03-11.orig/include/acpi/processor.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/acpi/processor.h 2011-01-31 14:53:38.000000000 +0100 -@@ -17,6 +17,12 @@ - #define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */ - #define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4 - -+#ifdef CONFIG_XEN -+#define NR_ACPI_CPUS (NR_CPUS < 256 ? 256 : NR_CPUS) -+#else -+#define NR_ACPI_CPUS NR_CPUS -+#endif /* CONFIG_XEN */ -+ - #define ACPI_PDC_REVISION_ID 0x1 - - #define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */ -@@ -42,6 +48,17 @@ - - struct acpi_processor_cx; - -+#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+struct acpi_csd_package { -+ acpi_integer num_entries; -+ acpi_integer revision; -+ acpi_integer domain; -+ acpi_integer coord_type; -+ acpi_integer num_processors; -+ acpi_integer index; -+} __attribute__ ((packed)); -+#endif -+ - struct acpi_power_register { - u8 descriptor; - u16 length; -@@ -64,6 +81,12 @@ struct acpi_processor_cx { - u32 usage; - u64 time; - u8 bm_sts_skip; -+#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+ /* Require raw information for external control logic */ -+ struct acpi_power_register reg; -+ u32 csd_count; -+ struct acpi_csd_package *domain_info; -+#endif - char desc[ACPI_CX_DESC_LEN]; - }; - -@@ -288,6 +311,9 @@ static inline void acpi_processor_ppc_ex - { - return; - } -+#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+int acpi_processor_ppc_has_changed(struct acpi_processor *pr); -+#else - static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, - int event_flag) - { -@@ -301,6 +327,7 @@ static inline int acpi_processor_ppc_has - } - return 0; - } -+#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */ - static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) - { - return -ENODEV; -@@ -359,4 +386,120 @@ static inline void acpi_thermal_cpufreq_ - } - #endif - -+/* -+ * Following are interfaces geared to external processor PM control -+ * logic like a VMM -+ */ -+/* Events notified to external control logic */ -+#define PROCESSOR_PM_INIT 1 -+#define PROCESSOR_PM_CHANGE 2 -+#define PROCESSOR_HOTPLUG 3 -+ -+/* Objects for the PM events */ -+#define PM_TYPE_IDLE 0 -+#define PM_TYPE_PERF 1 -+#define PM_TYPE_THR 2 -+#define PM_TYPE_MAX 3 -+ -+/* Processor hotplug events */ -+#define HOTPLUG_TYPE_ADD 0 -+#define HOTPLUG_TYPE_REMOVE 1 -+ -+#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL -+struct processor_extcntl_ops { -+ /* Transfer processor PM events to external control logic */ -+ int (*pm_ops[PM_TYPE_MAX])(struct acpi_processor *pr, int event); -+ /* Notify physical processor status to external control logic */ -+ int (*hotplug)(struct acpi_processor *pr, int type); -+}; -+extern const struct processor_extcntl_ops *processor_extcntl_ops; -+ -+static inline int processor_cntl_external(void) -+{ -+ return (processor_extcntl_ops != NULL); -+} -+ -+static inline int processor_pm_external(void) -+{ -+ return processor_cntl_external() && -+ (processor_extcntl_ops->pm_ops[PM_TYPE_IDLE] != NULL); -+} -+ -+static inline int processor_pmperf_external(void) -+{ -+ return processor_cntl_external() && -+ (processor_extcntl_ops->pm_ops[PM_TYPE_PERF] != NULL); -+} -+ -+static inline int processor_pmthr_external(void) -+{ -+ return processor_cntl_external() && -+ (processor_extcntl_ops->pm_ops[PM_TYPE_THR] != NULL); -+} -+ -+extern int processor_notify_external(struct acpi_processor *pr, -+ int event, int type); -+extern void processor_extcntl_init(void); -+extern int processor_extcntl_prepare(struct acpi_processor *pr); -+extern int acpi_processor_get_performance_info(struct acpi_processor *pr); -+extern int acpi_processor_get_psd(struct acpi_processor *pr); -+void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **); -+#else -+static inline int processor_cntl_external(void) {return 0;} -+static inline int processor_pm_external(void) {return 0;} -+static inline int processor_pmperf_external(void) {return 0;} -+static inline int processor_pmthr_external(void) {return 0;} -+static inline int processor_notify_external(struct acpi_processor *pr, -+ int event, int type) -+{ -+ return 0; -+} -+static inline void processor_extcntl_init(void) {} -+static inline int processor_extcntl_prepare(struct acpi_processor *pr) -+{ -+ return 0; -+} -+#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */ -+ -+#ifdef CONFIG_XEN -+static inline void xen_convert_pct_reg(struct xen_pct_register *xpct, -+ struct acpi_pct_register *apct) -+{ -+ xpct->descriptor = apct->descriptor; -+ xpct->length = apct->length; -+ xpct->space_id = apct->space_id; -+ xpct->bit_width = apct->bit_width; -+ xpct->bit_offset = apct->bit_offset; -+ xpct->reserved = apct->reserved; -+ xpct->address = apct->address; -+} -+ -+static inline void xen_convert_pss_states(struct xen_processor_px *xpss, -+ struct acpi_processor_px *apss, int state_count) -+{ -+ int i; -+ for(i=0; icore_frequency = apss->core_frequency; -+ xpss->power = apss->power; -+ xpss->transition_latency = apss->transition_latency; -+ xpss->bus_master_latency = apss->bus_master_latency; -+ xpss->control = apss->control; -+ xpss->status = apss->status; -+ xpss++; -+ apss++; -+ } -+} -+ -+static inline void xen_convert_psd_pack(struct xen_psd_package *xpsd, -+ struct acpi_psd_package *apsd) -+{ -+ xpsd->num_entries = apsd->num_entries; -+ xpsd->revision = apsd->revision; -+ xpsd->domain = apsd->domain; -+ xpsd->coord_type = apsd->coord_type; -+ xpsd->num_processors = apsd->num_processors; -+} -+ -+#endif /* CONFIG_XEN */ -+ - #endif ---- head-2011-03-11.orig/include/asm-generic/pgtable.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/asm-generic/pgtable.h 2011-03-11 10:52:21.000000000 +0100 -@@ -156,6 +156,10 @@ static inline void pmdp_set_wrprotect(st - #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - #endif - -+#ifndef arch_change_pte_range -+#define arch_change_pte_range(mm, pmd, addr, end, newprot) 0 -+#endif -+ - #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH - extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, - unsigned long address, ---- head-2011-03-11.orig/include/linux/aio.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/aio.h 2011-01-31 14:53:38.000000000 +0100 -@@ -199,6 +199,12 @@ struct kioctx { - - struct delayed_work wq; - -+#ifdef CONFIG_EPOLL -+ /* poll integration */ -+ wait_queue_head_t poll_wait; -+ struct file *file; -+#endif -+ - struct rcu_head rcu_head; - }; - ---- head-2011-03-11.orig/include/linux/highmem.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/highmem.h 2011-01-31 14:53:38.000000000 +0100 -@@ -178,12 +178,14 @@ alloc_zeroed_user_highpage_movable(struc - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); - } - -+#ifndef __HAVE_ARCH_CLEAR_HIGHPAGE - static inline void clear_highpage(struct page *page) - { - void *kaddr = kmap_atomic(page, KM_USER0); - clear_page(kaddr); - kunmap_atomic(kaddr, KM_USER0); - } -+#endif - - static inline void zero_user_segments(struct page *page, - unsigned start1, unsigned end1, -@@ -237,6 +239,8 @@ static inline void copy_user_highpage(st - - #endif - -+#ifndef __HAVE_ARCH_COPY_HIGHPAGE -+ - static inline void copy_highpage(struct page *to, struct page *from) - { - char *vfrom, *vto; -@@ -248,4 +252,6 @@ static inline void copy_highpage(struct - kunmap_atomic(vfrom, KM_USER0); - } - -+#endif -+ - #endif /* _LINUX_HIGHMEM_H */ ---- head-2011-03-11.orig/include/linux/interrupt.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/interrupt.h 2011-01-31 14:53:38.000000000 +0100 -@@ -353,6 +353,12 @@ static inline int disable_irq_wake(unsig - } - #endif /* CONFIG_GENERIC_HARDIRQS */ - -+#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED -+int irq_ignore_unhandled(unsigned int irq); -+#else -+#define irq_ignore_unhandled(irq) 0 -+#endif -+ - #ifndef __ARCH_SET_SOFTIRQ_PENDING - #define set_softirq_pending(x) (local_softirq_pending() = (x)) - #define or_softirq_pending(x) (local_softirq_pending() |= (x)) ---- head-2011-03-11.orig/include/linux/kexec.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/kexec.h 2011-01-31 14:53:38.000000000 +0100 -@@ -46,6 +46,13 @@ - KEXEC_CORE_NOTE_NAME_BYTES + \ - KEXEC_CORE_NOTE_DESC_BYTES ) - -+#ifndef KEXEC_ARCH_HAS_PAGE_MACROS -+#define kexec_page_to_pfn(page) page_to_pfn(page) -+#define kexec_pfn_to_page(pfn) pfn_to_page(pfn) -+#define kexec_virt_to_phys(addr) virt_to_phys(addr) -+#define kexec_phys_to_virt(addr) phys_to_virt(addr) -+#endif -+ - /* - * This structure is used to hold the arguments that are used when loading - * kernel binaries. -@@ -112,6 +119,12 @@ struct kimage { - extern void machine_kexec(struct kimage *image); - extern int machine_kexec_prepare(struct kimage *image); - extern void machine_kexec_cleanup(struct kimage *image); -+#ifdef CONFIG_XEN -+extern int xen_machine_kexec_load(struct kimage *image); -+extern void xen_machine_kexec_unload(struct kimage *image); -+extern void xen_machine_kexec_setup_resources(void); -+extern void xen_machine_kexec_register_resources(struct resource *res); -+#endif - extern asmlinkage long sys_kexec_load(unsigned long entry, - unsigned long nr_segments, - struct kexec_segment __user *segments, ---- head-2011-03-11.orig/include/linux/mm.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/mm.h 2011-01-31 14:53:38.000000000 +0100 -@@ -113,7 +113,12 @@ extern unsigned int kobjsize(const void - - #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ - #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ -+#ifndef CONFIG_XEN - #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ -+#else -+#define VM_SAO 0 -+#define VM_FOREIGN 0x20000000 /* Has pages belonging to another VM */ -+#endif - #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ - #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ - -@@ -141,6 +146,12 @@ extern unsigned int kobjsize(const void - */ - #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) - -+#ifdef CONFIG_XEN -+struct vm_foreign_map { -+ struct page **map; -+}; -+#endif -+ - /* - * mapping from the currently active vm_flags protection bits (the - * low four bits) to a page protection mask.. -@@ -210,6 +221,15 @@ struct vm_operations_struct { - */ - int (*access)(struct vm_area_struct *vma, unsigned long addr, - void *buf, int len, int write); -+ -+ /* Area-specific function for clearing the PTE at @ptep. Returns the -+ * original value of @ptep. */ -+ pte_t (*zap_pte)(struct vm_area_struct *vma, -+ unsigned long addr, pte_t *ptep, int is_fullmm); -+ -+ /* called before close() to indicate no more pages should be mapped */ -+ void (*unmap)(struct vm_area_struct *area); -+ - #ifdef CONFIG_NUMA - /* - * set_policy() op must add a reference to any non-NULL @new mempolicy ---- head-2011-03-11.orig/include/linux/oprofile.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/oprofile.h 2011-02-17 10:05:41.000000000 +0100 -@@ -19,6 +19,8 @@ - #include - #include - #include -+ -+#include - - /* Each escaped entry is prefixed by ESCAPE_CODE - * then one of the following codes, then the -@@ -31,14 +33,18 @@ - #define CPU_SWITCH_CODE 2 - #define COOKIE_SWITCH_CODE 3 - #define KERNEL_ENTER_SWITCH_CODE 4 --#define KERNEL_EXIT_SWITCH_CODE 5 -+#define USER_ENTER_SWITCH_CODE 5 - #define MODULE_LOADED_CODE 6 - #define CTX_TGID_CODE 7 - #define TRACE_BEGIN_CODE 8 - #define TRACE_END_CODE 9 - #define XEN_ENTER_SWITCH_CODE 10 -+#ifndef CONFIG_XEN - #define SPU_PROFILING_CODE 11 - #define SPU_CTX_SWITCH_CODE 12 -+#else -+#define DOMAIN_SWITCH_CODE 11 -+#endif - #define IBS_FETCH_CODE 13 - #define IBS_OP_CODE 14 - -@@ -52,6 +58,11 @@ struct oprofile_operations { - /* create any necessary configuration files in the oprofile fs. - * Optional. */ - int (*create_files)(struct super_block * sb, struct dentry * root); -+ /* setup active domains with Xen */ -+ int (*set_active)(int *active_domains, unsigned int adomains); -+ /* setup passive domains with Xen */ -+ int (*set_passive)(int *passive_domains, unsigned int pdomains); -+ - /* Do any necessary interrupt setup. Optional. */ - int (*setup)(void); - /* Do any necessary interrupt shutdown. Optional. */ -@@ -113,6 +124,9 @@ void oprofile_add_pc(unsigned long pc, i - /* add a backtrace entry, to be called from the ->backtrace callback */ - void oprofile_add_trace(unsigned long eip); - -+/* add a domain switch entry */ -+int oprofile_add_domain_switch(int32_t domain_id); -+ - - /** - * Create a file of the given name as a child of the given root, with ---- head-2011-03-11.orig/include/linux/page-flags.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/page-flags.h 2011-01-31 14:53:38.000000000 +0100 -@@ -108,6 +108,11 @@ enum pageflags { - #ifdef CONFIG_TRANSPARENT_HUGEPAGE - PG_compound_lock, - #endif -+#ifdef CONFIG_XEN -+ PG_foreign, /* Page is owned by foreign allocator. */ -+ PG_netback, /* Page is owned by netback */ -+ PG_blkback, /* Page is owned by blkback */ -+#endif - __NR_PAGEFLAGS, - - /* Filesystems */ -@@ -333,6 +338,27 @@ static inline void SetPageUptodate(struc - - CLEARPAGEFLAG(Uptodate, uptodate) - -+#define PageForeign(page) test_bit(PG_foreign, &(page)->flags) -+#define SetPageForeign(_page, dtor) do { \ -+ set_bit(PG_foreign, &(_page)->flags); \ -+ BUG_ON((dtor) == (void (*)(struct page *, unsigned int))0); \ -+ (_page)->index = (long)(dtor); \ -+} while (0) -+#define ClearPageForeign(page) do { \ -+ clear_bit(PG_foreign, &(page)->flags); \ -+ (page)->index = 0; \ -+} while (0) -+#define PageForeignDestructor(_page, order) \ -+ ((void (*)(struct page *, unsigned int))(_page)->index)(_page, order) -+ -+#define PageNetback(page) test_bit(PG_netback, &(page)->flags) -+#define SetPageNetback(page) set_bit(PG_netback, &(page)->flags) -+#define ClearPageNetback(page) clear_bit(PG_netback, &(page)->flags) -+ -+#define PageBlkback(page) test_bit(PG_blkback, &(page)->flags) -+#define SetPageBlkback(page) set_bit(PG_blkback, &(page)->flags) -+#define ClearPageBlkback(page) clear_bit(PG_blkback, &(page)->flags) -+ - extern void cancel_dirty_page(struct page *page, unsigned int account_size); - - int test_clear_page_writeback(struct page *page); -@@ -463,6 +489,14 @@ static inline int PageTransCompound(stru - #define __PG_COMPOUND_LOCK 0 - #endif - -+#if !defined(CONFIG_XEN) -+# define __PG_XEN 0 -+#elif defined(CONFIG_X86) -+# define __PG_XEN ((1 << PG_pinned) | (1 << PG_foreign)) -+#else -+# define __PG_XEN (1 << PG_foreign) -+#endif -+ - /* - * Flags checked when a page is freed. Pages being freed should not have - * these flags set. It they are, there is a problem. -@@ -473,7 +507,7 @@ static inline int PageTransCompound(stru - 1 << PG_writeback | 1 << PG_reserved | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ -- __PG_COMPOUND_LOCK) -+ __PG_COMPOUND_LOCK | __PG_XEN) - - /* - * Flags checked when a page is prepped for return by the page allocator. ---- head-2011-03-11.orig/include/linux/pci.h 2011-01-31 14:31:28.000000000 +0100 -+++ head-2011-03-11/include/linux/pci.h 2011-01-31 14:53:38.000000000 +0100 -@@ -979,6 +979,11 @@ static inline int pci_msi_enabled(void) - { - return 0; - } -+ -+#ifdef CONFIG_XEN -+#define register_msi_get_owner(func) 0 -+#define unregister_msi_get_owner(func) 0 -+#endif - #else - extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); - extern void pci_msi_shutdown(struct pci_dev *dev); -@@ -991,6 +996,10 @@ extern void pci_disable_msix(struct pci_ - extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); - extern void pci_restore_msi_state(struct pci_dev *dev); - extern int pci_msi_enabled(void); -+#ifdef CONFIG_XEN -+extern int register_msi_get_owner(int (*func)(struct pci_dev *dev)); -+extern int unregister_msi_get_owner(int (*func)(struct pci_dev *dev)); -+#endif - #endif - - #ifdef CONFIG_PCIEPORTBUS ---- head-2011-03-11.orig/include/linux/vermagic.h 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/include/linux/vermagic.h 2011-01-31 14:53:38.000000000 +0100 -@@ -22,6 +22,11 @@ - #else - #define MODULE_VERMAGIC_MODVERSIONS "" - #endif -+#ifdef CONFIG_XEN -+#define MODULE_VERMAGIC_XEN "Xen " -+#else -+#define MODULE_VERMAGIC_XEN -+#endif - #ifndef MODULE_ARCH_VERMAGIC - #define MODULE_ARCH_VERMAGIC "" - #endif -@@ -30,5 +35,5 @@ - UTS_RELEASE " " \ - MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ - MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ -- MODULE_ARCH_VERMAGIC -+ MODULE_VERMAGIC_XEN MODULE_ARCH_VERMAGIC - ---- head-2011-03-11.orig/kernel/irq/spurious.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/kernel/irq/spurious.c 2011-01-31 14:53:38.000000000 +0100 -@@ -227,7 +227,7 @@ void note_interrupt(unsigned int irq, st - */ - if (time_after(jiffies, desc->last_unhandled + HZ/10)) - desc->irqs_unhandled = 1; -- else -+ else if (!irq_ignore_unhandled(irq)) - desc->irqs_unhandled++; - desc->last_unhandled = jiffies; - if (unlikely(action_ret != IRQ_NONE)) ---- head-2011-03-11.orig/kernel/kexec.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/kernel/kexec.c 2011-01-31 14:53:38.000000000 +0100 -@@ -356,13 +356,26 @@ static int kimage_is_destination_range(s - return 0; - } - --static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) -+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit) - { - struct page *pages; - - pages = alloc_pages(gfp_mask, order); - if (pages) { - unsigned int count, i; -+#ifdef CONFIG_XEN -+ int address_bits; -+ -+ if (limit == ~0UL) -+ address_bits = BITS_PER_LONG; -+ else -+ address_bits = long_log2(limit); -+ -+ if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) { -+ __free_pages(pages, order); -+ return NULL; -+ } -+#endif - pages->mapping = NULL; - set_page_private(pages, order); - count = 1 << order; -@@ -426,10 +439,10 @@ static struct page *kimage_alloc_normal_ - do { - unsigned long pfn, epfn, addr, eaddr; - -- pages = kimage_alloc_pages(GFP_KERNEL, order); -+ pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT); - if (!pages) - break; -- pfn = page_to_pfn(pages); -+ pfn = kexec_page_to_pfn(pages); - epfn = pfn + count; - addr = pfn << PAGE_SHIFT; - eaddr = epfn << PAGE_SHIFT; -@@ -463,6 +476,7 @@ static struct page *kimage_alloc_normal_ - return pages; - } - -+#ifndef CONFIG_XEN - static struct page *kimage_alloc_crash_control_pages(struct kimage *image, - unsigned int order) - { -@@ -516,7 +530,7 @@ static struct page *kimage_alloc_crash_c - } - /* If I don't overlap any segments I have found my hole! */ - if (i == image->nr_segments) { -- pages = pfn_to_page(hole_start >> PAGE_SHIFT); -+ pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT); - break; - } - } -@@ -543,6 +557,13 @@ struct page *kimage_alloc_control_pages( - - return pages; - } -+#else /* !CONFIG_XEN */ -+struct page *kimage_alloc_control_pages(struct kimage *image, -+ unsigned int order) -+{ -+ return kimage_alloc_normal_control_pages(image, order); -+} -+#endif - - static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) - { -@@ -558,7 +579,7 @@ static int kimage_add_entry(struct kimag - return -ENOMEM; - - ind_page = page_address(page); -- *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; -+ *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION; - image->entry = ind_page; - image->last_entry = ind_page + - ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); -@@ -617,13 +638,13 @@ static void kimage_terminate(struct kima - #define for_each_kimage_entry(image, ptr, entry) \ - for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ - ptr = (entry & IND_INDIRECTION)? \ -- phys_to_virt((entry & PAGE_MASK)): ptr +1) -+ kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1) - - static void kimage_free_entry(kimage_entry_t entry) - { - struct page *page; - -- page = pfn_to_page(entry >> PAGE_SHIFT); -+ page = kexec_pfn_to_page(entry >> PAGE_SHIFT); - kimage_free_pages(page); - } - -@@ -635,6 +656,10 @@ static void kimage_free(struct kimage *i - if (!image) - return; - -+#ifdef CONFIG_XEN -+ xen_machine_kexec_unload(image); -+#endif -+ - kimage_free_extra_pages(image); - for_each_kimage_entry(image, ptr, entry) { - if (entry & IND_INDIRECTION) { -@@ -710,7 +735,7 @@ static struct page *kimage_alloc_page(st - * have a match. - */ - list_for_each_entry(page, &image->dest_pages, lru) { -- addr = page_to_pfn(page) << PAGE_SHIFT; -+ addr = kexec_page_to_pfn(page) << PAGE_SHIFT; - if (addr == destination) { - list_del(&page->lru); - return page; -@@ -721,16 +746,16 @@ static struct page *kimage_alloc_page(st - kimage_entry_t *old; - - /* Allocate a page, if we run out of memory give up */ -- page = kimage_alloc_pages(gfp_mask, 0); -+ page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT); - if (!page) - return NULL; - /* If the page cannot be used file it away */ -- if (page_to_pfn(page) > -+ if (kexec_page_to_pfn(page) > - (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { - list_add(&page->lru, &image->unuseable_pages); - continue; - } -- addr = page_to_pfn(page) << PAGE_SHIFT; -+ addr = kexec_page_to_pfn(page) << PAGE_SHIFT; - - /* If it is the destination page we want use it */ - if (addr == destination) -@@ -753,7 +778,7 @@ static struct page *kimage_alloc_page(st - struct page *old_page; - - old_addr = *old & PAGE_MASK; -- old_page = pfn_to_page(old_addr >> PAGE_SHIFT); -+ old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT); - copy_highpage(page, old_page); - *old = addr | (*old & ~PAGE_MASK); - -@@ -809,7 +834,7 @@ static int kimage_load_normal_segment(st - result = -ENOMEM; - goto out; - } -- result = kimage_add_page(image, page_to_pfn(page) -+ result = kimage_add_page(image, kexec_page_to_pfn(page) - << PAGE_SHIFT); - if (result < 0) - goto out; -@@ -841,6 +866,7 @@ out: - return result; - } - -+#ifndef CONFIG_XEN - static int kimage_load_crash_segment(struct kimage *image, - struct kexec_segment *segment) - { -@@ -863,7 +889,7 @@ static int kimage_load_crash_segment(str - char *ptr; - size_t uchunk, mchunk; - -- page = pfn_to_page(maddr >> PAGE_SHIFT); -+ page = kexec_pfn_to_page(maddr >> PAGE_SHIFT); - if (!page) { - result = -ENOMEM; - goto out; -@@ -912,6 +938,13 @@ static int kimage_load_segment(struct ki - - return result; - } -+#else /* CONFIG_XEN */ -+static int kimage_load_segment(struct kimage *image, -+ struct kexec_segment *segment) -+{ -+ return kimage_load_normal_segment(image, segment); -+} -+#endif - - /* - * Exec Kernel system call: for obvious reasons only root may call it. -@@ -1015,6 +1048,13 @@ SYSCALL_DEFINE4(kexec_load, unsigned lon - } - kimage_terminate(image); - } -+#ifdef CONFIG_XEN -+ if (image) { -+ result = xen_machine_kexec_load(image); -+ if (result) -+ goto out; -+ } -+#endif - /* Install the new kernel, and Uninstall the old */ - image = xchg(dest_image, image); - ---- head-2011-03-11.orig/kernel/sysctl.c 2011-02-08 10:00:13.000000000 +0100 -+++ head-2011-03-11/kernel/sysctl.c 2011-02-08 10:02:12.000000000 +0100 -@@ -846,7 +846,7 @@ static struct ctl_table kern_table[] = { - .proc_handler = proc_dointvec, - }, - #endif --#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) -+#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) && !defined(CONFIG_ACPI_PV_SLEEP) - { - .procname = "acpi_video_flags", - .data = &acpi_realmode_flags, ---- head-2011-03-11.orig/mm/memory.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/mm/memory.c 2011-01-31 14:53:38.000000000 +0100 -@@ -604,6 +604,12 @@ struct page *vm_normal_page(struct vm_ar - { - unsigned long pfn = pte_pfn(pte); - -+#if defined(CONFIG_XEN) && defined(CONFIG_X86) -+ /* XEN: Covers user-space grant mappings (even of local pages). */ -+ if (unlikely(vma->vm_flags & VM_FOREIGN)) -+ return NULL; -+#endif -+ - if (HAVE_PTE_SPECIAL) { - if (likely(!pte_special(pte))) - goto check_pfn; -@@ -635,6 +641,9 @@ struct page *vm_normal_page(struct vm_ar - return NULL; - check_pfn: - if (unlikely(pfn > highest_memmap_pfn)) { -+#ifdef CONFIG_XEN -+ if (!(vma->vm_flags & VM_RESERVED)) -+#endif - print_bad_pte(vma, addr, pte, NULL); - return NULL; - } -@@ -951,8 +960,12 @@ static unsigned long zap_pte_range(struc - page->index > details->last_index)) - continue; - } -- ptent = ptep_get_and_clear_full(mm, addr, pte, -- tlb->fullmm); -+ if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte)) -+ ptent = vma->vm_ops->zap_pte(vma, addr, pte, -+ tlb->fullmm); -+ else -+ ptent = ptep_get_and_clear_full(mm, addr, pte, -+ tlb->fullmm); - tlb_remove_tlb_entry(tlb, pte, addr); - if (unlikely(!page)) - continue; -@@ -1229,6 +1242,7 @@ unsigned long zap_page_range(struct vm_a - tlb_finish_mmu(tlb, address, end); - return end; - } -+EXPORT_SYMBOL(zap_page_range); - - /** - * zap_vma_ptes - remove ptes mapping the vma -@@ -1489,6 +1503,28 @@ int __get_user_pages(struct task_struct - continue; - } - -+#ifdef CONFIG_XEN -+ if (vma && (vma->vm_flags & VM_FOREIGN)) { -+ struct vm_foreign_map *foreign_map = -+ vma->vm_private_data; -+ struct page **map = foreign_map->map; -+ int offset = (start - vma->vm_start) >> PAGE_SHIFT; -+ if (map[offset] != NULL) { -+ if (pages) { -+ struct page *page = map[offset]; -+ -+ pages[i] = page; -+ get_page(page); -+ } -+ if (vmas) -+ vmas[i] = vma; -+ i++; -+ start += PAGE_SIZE; -+ len--; -+ continue; -+ } -+ } -+#endif - if (!vma || - (vma->vm_flags & (VM_IO | VM_PFNMAP)) || - !(vm_flags & vma->vm_flags)) ---- head-2011-03-11.orig/mm/mmap.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/mm/mmap.c 2011-01-31 14:53:38.000000000 +0100 -@@ -1926,6 +1926,12 @@ static void unmap_region(struct mm_struc - tlb_finish_mmu(tlb, start, end); - } - -+static inline void unmap_vma(struct vm_area_struct *vma) -+{ -+ if (unlikely(vma->vm_ops && vma->vm_ops->unmap)) -+ vma->vm_ops->unmap(vma); -+} -+ - /* - * Create a list of vma's touched by the unmap, removing them from the mm's - * vma list as we go.. -@@ -1942,6 +1948,7 @@ detach_vmas_to_be_unmapped(struct mm_str - vma->vm_prev = NULL; - do { - rb_erase(&vma->vm_rb, &mm->mm_rb); -+ unmap_vma(vma); - mm->map_count--; - tail_vma = vma; - vma = vma->vm_next; -@@ -2284,6 +2291,9 @@ void exit_mmap(struct mm_struct *mm) - - arch_exit_mmap(mm); - -+ for (vma = mm->mmap; vma; vma = vma->vm_next) -+ unmap_vma(vma); -+ - vma = mm->mmap; - if (!vma) /* Can happen if dup_mmap() received an OOM */ - return; ---- head-2011-03-11.orig/mm/mprotect.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/mm/mprotect.c 2011-01-31 14:53:38.000000000 +0100 -@@ -97,6 +97,8 @@ static inline void change_pmd_range(stru - } - if (pmd_none_or_clear_bad(pmd)) - continue; -+ if (arch_change_pte_range(mm, pmd, addr, next, newprot)) -+ continue; - change_pte_range(vma->vm_mm, pmd, addr, next, newprot, - dirty_accountable); - } while (pmd++, addr = next, addr != end); ---- head-2011-03-11.orig/mm/page_alloc.c 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/mm/page_alloc.c 2011-01-31 14:53:38.000000000 +0100 -@@ -647,6 +647,13 @@ static bool free_pages_prepare(struct pa - int i; - int bad = 0; - -+#ifdef CONFIG_XEN -+ if (PageForeign(page)) { -+ PageForeignDestructor(page, order); -+ return; -+ } -+#endif -+ - trace_mm_page_free_direct(page, order); - kmemcheck_free_shadow(page, order); - ---- head-2011-03-11.orig/scripts/Makefile.build 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/scripts/Makefile.build 2011-01-31 14:53:38.000000000 +0100 -@@ -76,6 +76,21 @@ ifndef obj - $(warning kbuild: Makefile.build is included improperly) - endif - -+ifeq ($(CONFIG_XEN),y) -+Makefile.xen := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD),$(objtree)/scripts)/Makefile.xen -+$(Makefile.xen): $(srctree)/scripts/Makefile.xen.awk $(srctree)/scripts/Makefile.build -+ @echo ' Updating $@' -+ $(if $(shell echo a | $(AWK) '{ print gensub(/a/, "AA", "g"); }'),\ -+ ,$(error 'Your awk program does not define gensub. Use gawk or another awk with gensub')) -+ @$(AWK) -f $< $(filter-out $<,$^) >$@ -+ -+xen-src-single-used-m := $(patsubst $(srctree)/%,%,$(wildcard $(addprefix $(srctree)/,$(single-used-m:.o=-xen.c)))) -+xen-single-used-m := $(xen-src-single-used-m:-xen.c=.o) -+single-used-m := $(filter-out $(xen-single-used-m),$(single-used-m)) -+ -+-include $(Makefile.xen) -+endif -+ - # =========================================================================== - - ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),) ---- head-2011-03-11.orig/scripts/Makefile.lib 2011-03-11 10:41:54.000000000 +0100 -+++ head-2011-03-11/scripts/Makefile.lib 2011-01-31 14:53:38.000000000 +0100 -@@ -22,6 +22,12 @@ obj-m := $(filter-out $(obj-y),$(obj-m)) - - lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m))) - -+# Remove objects forcibly disabled -+ -+obj-y := $(filter-out $(disabled-obj-y),$(obj-y)) -+obj-m := $(filter-out $(disabled-obj-y),$(obj-m)) -+lib-y := $(filter-out $(disabled-obj-y),$(lib-y)) -+ - - # Handle objects in subdirs - # --------------------------------------------------------------------------- diff --git a/patches.xen/xen3-auto-include-xen-interface.diff b/patches.xen/xen3-auto-include-xen-interface.diff deleted file mode 100644 index fd4e673..0000000 --- a/patches.xen/xen3-auto-include-xen-interface.diff +++ /dev/null @@ -1,6063 +0,0 @@ -Subject: xen3 include-xen-interface -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/COPYING 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,38 @@ -+XEN NOTICE -+========== -+ -+This copyright applies to all files within this subdirectory and its -+subdirectories: -+ include/public/*.h -+ include/public/hvm/*.h -+ include/public/io/*.h -+ -+The intention is that these files can be freely copied into the source -+tree of an operating system when porting that OS to run on Xen. Doing -+so does *not* cause the OS to become subject to the terms of the GPL. -+ -+All other files in the Xen source distribution are covered by version -+2 of the GNU General Public License except where explicitly stated -+otherwise within individual source files. -+ -+ -- Keir Fraser (on behalf of the Xen team) -+ -+===================================================================== -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to -+deal in the Software without restriction, including without limitation the -+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+sell copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+DEALINGS IN THE SOFTWARE. ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86/cpuid.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,68 @@ -+/****************************************************************************** -+ * arch-x86/cpuid.h -+ * -+ * CPUID interface to Xen. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2007 Citrix Systems, Inc. -+ * -+ * Authors: -+ * Keir Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ -+#define __XEN_PUBLIC_ARCH_X86_CPUID_H__ -+ -+/* Xen identification leaves start at 0x40000000. */ -+#define XEN_CPUID_FIRST_LEAF 0x40000000 -+#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) -+ -+/* -+ * Leaf 1 (0x40000000) -+ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX -+ * are supported by the Xen host. -+ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification -+ * of a Xen host. -+ */ -+#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ -+#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ -+#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ -+ -+/* -+ * Leaf 2 (0x40000001) -+ * EAX[31:16]: Xen major version. -+ * EAX[15: 0]: Xen minor version. -+ * EBX-EDX: Reserved (currently all zeroes). -+ */ -+ -+/* -+ * Leaf 3 (0x40000002) -+ * EAX: Number of hypercall transfer pages. This register is always guaranteed -+ * to specify one hypercall page. -+ * EBX: Base address of Xen-specific MSRs. -+ * ECX: Features 1. Unused bits are set to zero. -+ * EDX: Features 2. Unused bits are set to zero. -+ */ -+ -+/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ -+#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 -+#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) -+ -+#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86/hvm/save.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,463 @@ -+/* -+ * Structure definitions for HVM state that is held by Xen and must -+ * be saved along with the domain's memory and device-model state. -+ * -+ * Copyright (c) 2007 XenSource Ltd. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__ -+#define __XEN_PUBLIC_HVM_SAVE_X86_H__ -+ -+/* -+ * Save/restore header: general info about the save file. -+ */ -+ -+#define HVM_FILE_MAGIC 0x54381286 -+#define HVM_FILE_VERSION 0x00000001 -+ -+struct hvm_save_header { -+ uint32_t magic; /* Must be HVM_FILE_MAGIC */ -+ uint32_t version; /* File format version */ -+ uint64_t changeset; /* Version of Xen that saved this file */ -+ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ -+ uint32_t gtsc_khz; /* Guest's TSC frequency in kHz */ -+}; -+ -+DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); -+ -+ -+/* -+ * Processor -+ */ -+ -+struct hvm_hw_cpu { -+ uint8_t fpu_regs[512]; -+ -+ uint64_t rax; -+ uint64_t rbx; -+ uint64_t rcx; -+ uint64_t rdx; -+ uint64_t rbp; -+ uint64_t rsi; -+ uint64_t rdi; -+ uint64_t rsp; -+ uint64_t r8; -+ uint64_t r9; -+ uint64_t r10; -+ uint64_t r11; -+ uint64_t r12; -+ uint64_t r13; -+ uint64_t r14; -+ uint64_t r15; -+ -+ uint64_t rip; -+ uint64_t rflags; -+ -+ uint64_t cr0; -+ uint64_t cr2; -+ uint64_t cr3; -+ uint64_t cr4; -+ -+ uint64_t dr0; -+ uint64_t dr1; -+ uint64_t dr2; -+ uint64_t dr3; -+ uint64_t dr6; -+ uint64_t dr7; -+ -+ uint32_t cs_sel; -+ uint32_t ds_sel; -+ uint32_t es_sel; -+ uint32_t fs_sel; -+ uint32_t gs_sel; -+ uint32_t ss_sel; -+ uint32_t tr_sel; -+ uint32_t ldtr_sel; -+ -+ uint32_t cs_limit; -+ uint32_t ds_limit; -+ uint32_t es_limit; -+ uint32_t fs_limit; -+ uint32_t gs_limit; -+ uint32_t ss_limit; -+ uint32_t tr_limit; -+ uint32_t ldtr_limit; -+ uint32_t idtr_limit; -+ uint32_t gdtr_limit; -+ -+ uint64_t cs_base; -+ uint64_t ds_base; -+ uint64_t es_base; -+ uint64_t fs_base; -+ uint64_t gs_base; -+ uint64_t ss_base; -+ uint64_t tr_base; -+ uint64_t ldtr_base; -+ uint64_t idtr_base; -+ uint64_t gdtr_base; -+ -+ uint32_t cs_arbytes; -+ uint32_t ds_arbytes; -+ uint32_t es_arbytes; -+ uint32_t fs_arbytes; -+ uint32_t gs_arbytes; -+ uint32_t ss_arbytes; -+ uint32_t tr_arbytes; -+ uint32_t ldtr_arbytes; -+ -+ uint64_t sysenter_cs; -+ uint64_t sysenter_esp; -+ uint64_t sysenter_eip; -+ -+ /* msr for em64t */ -+ uint64_t shadow_gs; -+ -+ /* msr content saved/restored. */ -+ uint64_t msr_flags; -+ uint64_t msr_lstar; -+ uint64_t msr_star; -+ uint64_t msr_cstar; -+ uint64_t msr_syscall_mask; -+ uint64_t msr_efer; -+ uint64_t msr_tsc_aux; -+ -+ /* guest's idea of what rdtsc() would return */ -+ uint64_t tsc; -+ -+ /* pending event, if any */ -+ union { -+ uint32_t pending_event; -+ struct { -+ uint8_t pending_vector:8; -+ uint8_t pending_type:3; -+ uint8_t pending_error_valid:1; -+ uint32_t pending_reserved:19; -+ uint8_t pending_valid:1; -+ }; -+ }; -+ /* error code for pending event */ -+ uint32_t error_code; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); -+ -+ -+/* -+ * PIC -+ */ -+ -+struct hvm_hw_vpic { -+ /* IR line bitmasks. */ -+ uint8_t irr; -+ uint8_t imr; -+ uint8_t isr; -+ -+ /* Line IRx maps to IRQ irq_base+x */ -+ uint8_t irq_base; -+ -+ /* -+ * Where are we in ICW2-4 initialisation (0 means no init in progress)? -+ * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). -+ * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) -+ * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) -+ */ -+ uint8_t init_state:4; -+ -+ /* IR line with highest priority. */ -+ uint8_t priority_add:4; -+ -+ /* Reads from A=0 obtain ISR or IRR? */ -+ uint8_t readsel_isr:1; -+ -+ /* Reads perform a polling read? */ -+ uint8_t poll:1; -+ -+ /* Automatically clear IRQs from the ISR during INTA? */ -+ uint8_t auto_eoi:1; -+ -+ /* Automatically rotate IRQ priorities during AEOI? */ -+ uint8_t rotate_on_auto_eoi:1; -+ -+ /* Exclude slave inputs when considering in-service IRQs? */ -+ uint8_t special_fully_nested_mode:1; -+ -+ /* Special mask mode excludes masked IRs from AEOI and priority checks. */ -+ uint8_t special_mask_mode:1; -+ -+ /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ -+ uint8_t is_master:1; -+ -+ /* Edge/trigger selection. */ -+ uint8_t elcr; -+ -+ /* Virtual INT output. */ -+ uint8_t int_output; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); -+ -+ -+/* -+ * IO-APIC -+ */ -+ -+#ifdef __ia64__ -+#define VIOAPIC_IS_IOSAPIC 1 -+#define VIOAPIC_NUM_PINS 24 -+#else -+#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ -+#endif -+ -+struct hvm_hw_vioapic { -+ uint64_t base_address; -+ uint32_t ioregsel; -+ uint32_t id; -+ union vioapic_redir_entry -+ { -+ uint64_t bits; -+ struct { -+ uint8_t vector; -+ uint8_t delivery_mode:3; -+ uint8_t dest_mode:1; -+ uint8_t delivery_status:1; -+ uint8_t polarity:1; -+ uint8_t remote_irr:1; -+ uint8_t trig_mode:1; -+ uint8_t mask:1; -+ uint8_t reserve:7; -+#if !VIOAPIC_IS_IOSAPIC -+ uint8_t reserved[4]; -+ uint8_t dest_id; -+#else -+ uint8_t reserved[3]; -+ uint16_t dest_id; -+#endif -+ } fields; -+ } redirtbl[VIOAPIC_NUM_PINS]; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); -+ -+ -+/* -+ * LAPIC -+ */ -+ -+struct hvm_hw_lapic { -+ uint64_t apic_base_msr; -+ uint32_t disabled; /* VLAPIC_xx_DISABLED */ -+ uint32_t timer_divisor; -+ uint64_t tdt_msr; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); -+ -+struct hvm_hw_lapic_regs { -+ uint8_t data[1024]; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); -+ -+ -+/* -+ * IRQs -+ */ -+ -+struct hvm_hw_pci_irqs { -+ /* -+ * Virtual interrupt wires for a single PCI bus. -+ * Indexed by: device*4 + INTx#. -+ */ -+ union { -+ unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */ -+ uint64_t pad[2]; -+ }; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); -+ -+struct hvm_hw_isa_irqs { -+ /* -+ * Virtual interrupt wires for ISA devices. -+ * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). -+ */ -+ union { -+ unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */ -+ uint64_t pad[1]; -+ }; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); -+ -+struct hvm_hw_pci_link { -+ /* -+ * PCI-ISA interrupt router. -+ * Each PCI is 'wire-ORed' into one of four links using -+ * the traditional 'barber's pole' mapping ((device + INTx#) & 3). -+ * The router provides a programmable mapping from each link to a GSI. -+ */ -+ uint8_t route[4]; -+ uint8_t pad0[4]; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); -+ -+/* -+ * PIT -+ */ -+ -+struct hvm_hw_pit { -+ struct hvm_hw_pit_channel { -+ uint32_t count; /* can be 65536 */ -+ uint16_t latched_count; -+ uint8_t count_latched; -+ uint8_t status_latched; -+ uint8_t status; -+ uint8_t read_state; -+ uint8_t write_state; -+ uint8_t write_latch; -+ uint8_t rw_mode; -+ uint8_t mode; -+ uint8_t bcd; /* not supported */ -+ uint8_t gate; /* timer start */ -+ } channels[3]; /* 3 x 16 bytes */ -+ uint32_t speaker_data_on; -+ uint32_t pad0; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); -+ -+ -+/* -+ * RTC -+ */ -+ -+#define RTC_CMOS_SIZE 14 -+struct hvm_hw_rtc { -+ /* CMOS bytes */ -+ uint8_t cmos_data[RTC_CMOS_SIZE]; -+ /* Index register for 2-part operations */ -+ uint8_t cmos_index; -+ uint8_t pad0; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); -+ -+ -+/* -+ * HPET -+ */ -+ -+#define HPET_TIMER_NUM 3 /* 3 timers supported now */ -+struct hvm_hw_hpet { -+ /* Memory-mapped, software visible registers */ -+ uint64_t capability; /* capabilities */ -+ uint64_t res0; /* reserved */ -+ uint64_t config; /* configuration */ -+ uint64_t res1; /* reserved */ -+ uint64_t isr; /* interrupt status reg */ -+ uint64_t res2[25]; /* reserved */ -+ uint64_t mc64; /* main counter */ -+ uint64_t res3; /* reserved */ -+ struct { /* timers */ -+ uint64_t config; /* configuration/cap */ -+ uint64_t cmp; /* comparator */ -+ uint64_t fsb; /* FSB route, not supported now */ -+ uint64_t res4; /* reserved */ -+ } timers[HPET_TIMER_NUM]; -+ uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ -+ -+ /* Hidden register state */ -+ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ -+}; -+ -+DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); -+ -+ -+/* -+ * PM timer -+ */ -+ -+struct hvm_hw_pmtimer { -+ uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ -+ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ -+ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ -+}; -+ -+DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); -+ -+/* -+ * MTRR MSRs -+ */ -+ -+struct hvm_hw_mtrr { -+#define MTRR_VCNT 8 -+#define NUM_FIXED_MSR 11 -+ uint64_t msr_pat_cr; -+ /* mtrr physbase & physmask msr pair*/ -+ uint64_t msr_mtrr_var[MTRR_VCNT*2]; -+ uint64_t msr_mtrr_fixed[NUM_FIXED_MSR]; -+ uint64_t msr_mtrr_cap; -+ uint64_t msr_mtrr_def_type; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr); -+ -+/* -+ * Viridian hypervisor context. -+ */ -+ -+struct hvm_viridian_context { -+ uint64_t hypercall_gpa; -+ uint64_t guest_os_id; -+}; -+ -+DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context); -+ -+ -+/* -+ * The save area of XSAVE/XRSTOR. -+ */ -+ -+struct hvm_hw_cpu_xsave { -+ uint64_t xfeature_mask; -+ uint64_t xcr0; /* Updated by XSETBV */ -+ uint64_t xcr0_accum; /* Updated by XSETBV */ -+ struct { -+ struct { char x[512]; } fpu_sse; -+ -+ struct { -+ uint64_t xstate_bv; /* Updated by XRSTOR */ -+ uint64_t reserved[7]; -+ } xsave_hdr; /* The 64-byte header */ -+ -+ struct { char x[0]; } ymm; /* YMM */ -+ } save_area; -+} __attribute__((packed)); -+ -+#define CPU_XSAVE_CODE 16 -+ -+/* -+ * Largest type-code in use -+ */ -+#define HVM_SAVE_CODE_MAX 16 -+ -+#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86/xen-mca.h 2010-08-31 09:24:21.000000000 +0200 -@@ -0,0 +1,440 @@ -+/****************************************************************************** -+ * arch-x86/mca.h -+ * -+ * Contributed by Advanced Micro Devices, Inc. -+ * Author: Christoph Egger -+ * -+ * Guest OS machine check interface to x86 Xen. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+/* Full MCA functionality has the following Usecases from the guest side: -+ * -+ * Must have's: -+ * 1. Dom0 and DomU register machine check trap callback handlers -+ * (already done via "set_trap_table" hypercall) -+ * 2. Dom0 registers machine check event callback handler -+ * (doable via EVTCHNOP_bind_virq) -+ * 3. Dom0 and DomU fetches machine check data -+ * 4. Dom0 wants Xen to notify a DomU -+ * 5. Dom0 gets DomU ID from physical address -+ * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") -+ * -+ * Nice to have's: -+ * 7. Dom0 wants Xen to deactivate a physical CPU -+ * This is better done as separate task, physical CPU hotplugging, -+ * and hypercall(s) should be sysctl's -+ * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to -+ * move a DomU (or Dom0 itself) away from a malicious page -+ * producing correctable errors. -+ * 9. offlining physical page: -+ * Xen free's and never re-uses a certain physical page. -+ * 10. Testfacility: Allow Dom0 to write values into machine check MSR's -+ * and tell Xen to trigger a machine check -+ */ -+ -+#ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ -+#define __XEN_PUBLIC_ARCH_X86_MCA_H__ -+ -+/* Hypercall */ -+#define __HYPERVISOR_mca __HYPERVISOR_arch_0 -+ -+/* -+ * The xen-unstable repo has interface version 0x03000001; out interface -+ * is incompatible with that and any future minor revisions, so we -+ * choose a different version number range that is numerically less -+ * than that used in xen-unstable. -+ */ -+#define XEN_MCA_INTERFACE_VERSION 0x01ecc003 -+ -+/* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ -+#define XEN_MC_NONURGENT 0x0001 -+/* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ -+#define XEN_MC_URGENT 0x0002 -+/* IN: Dom0 acknowledges previosly-fetched telemetry */ -+#define XEN_MC_ACK 0x0004 -+ -+/* OUT: All is ok */ -+#define XEN_MC_OK 0x0 -+/* OUT: Domain could not fetch data. */ -+#define XEN_MC_FETCHFAILED 0x1 -+/* OUT: There was no machine check data to fetch. */ -+#define XEN_MC_NODATA 0x2 -+/* OUT: Between notification time and this hypercall an other -+ * (most likely) correctable error happened. The fetched data, -+ * does not match the original machine check data. */ -+#define XEN_MC_NOMATCH 0x4 -+ -+/* OUT: DomU did not register MC NMI handler. Try something else. */ -+#define XEN_MC_CANNOTHANDLE 0x8 -+/* OUT: Notifying DomU failed. Retry later or try something else. */ -+#define XEN_MC_NOTDELIVERED 0x10 -+/* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ -+ -+ -+#ifndef __ASSEMBLY__ -+ -+#define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ -+ -+/* -+ * Machine Check Architecure: -+ * structs are read-only and used to report all kinds of -+ * correctable and uncorrectable errors detected by the HW. -+ * Dom0 and DomU: register a handler to get notified. -+ * Dom0 only: Correctable errors are reported via VIRQ_MCA -+ * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers -+ */ -+#define MC_TYPE_GLOBAL 0 -+#define MC_TYPE_BANK 1 -+#define MC_TYPE_EXTENDED 2 -+#define MC_TYPE_RECOVERY 3 -+ -+struct mcinfo_common { -+ uint16_t type; /* structure type */ -+ uint16_t size; /* size of this struct in bytes */ -+}; -+ -+ -+#define MC_FLAG_CORRECTABLE (1 << 0) -+#define MC_FLAG_UNCORRECTABLE (1 << 1) -+#define MC_FLAG_RECOVERABLE (1 << 2) -+#define MC_FLAG_POLLED (1 << 3) -+#define MC_FLAG_RESET (1 << 4) -+#define MC_FLAG_CMCI (1 << 5) -+#define MC_FLAG_MCE (1 << 6) -+/* contains global x86 mc information */ -+struct mcinfo_global { -+ struct mcinfo_common common; -+ -+ /* running domain at the time in error (most likely the impacted one) */ -+ uint16_t mc_domid; -+ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ -+ uint32_t mc_socketid; /* physical socket of the physical core */ -+ uint16_t mc_coreid; /* physical impacted core */ -+ uint16_t mc_core_threadid; /* core thread of physical core */ -+ uint32_t mc_apicid; -+ uint32_t mc_flags; -+ uint64_t mc_gstatus; /* global status */ -+}; -+ -+/* contains bank local x86 mc information */ -+struct mcinfo_bank { -+ struct mcinfo_common common; -+ -+ uint16_t mc_bank; /* bank nr */ -+ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 -+ * and if mc_addr is valid. Never valid on DomU. */ -+ uint64_t mc_status; /* bank status */ -+ uint64_t mc_addr; /* bank address, only valid -+ * if addr bit is set in mc_status */ -+ uint64_t mc_misc; -+ uint64_t mc_ctrl2; -+ uint64_t mc_tsc; -+}; -+ -+ -+struct mcinfo_msr { -+ uint64_t reg; /* MSR */ -+ uint64_t value; /* MSR value */ -+}; -+ -+/* contains mc information from other -+ * or additional mc MSRs */ -+struct mcinfo_extended { -+ struct mcinfo_common common; -+ -+ /* You can fill up to five registers. -+ * If you need more, then use this structure -+ * multiple times. */ -+ -+ uint32_t mc_msrs; /* Number of msr with valid values. */ -+ /* -+ * Currently Intel extended MSR (32/64) include all gp registers -+ * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be -+ * useful at present. So expand this array to 16/32 to leave room. -+ */ -+ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; -+}; -+ -+/* Recovery Action flags. Giving recovery result information to DOM0 */ -+ -+/* Xen takes successful recovery action, the error is recovered */ -+#define REC_ACTION_RECOVERED (0x1 << 0) -+/* No action is performed by XEN */ -+#define REC_ACTION_NONE (0x1 << 1) -+/* It's possible DOM0 might take action ownership in some case */ -+#define REC_ACTION_NEED_RESET (0x1 << 2) -+ -+/* Different Recovery Action types, if the action is performed successfully, -+ * REC_ACTION_RECOVERED flag will be returned. -+ */ -+ -+/* Page Offline Action */ -+#define MC_ACTION_PAGE_OFFLINE (0x1 << 0) -+/* CPU offline Action */ -+#define MC_ACTION_CPU_OFFLINE (0x1 << 1) -+/* L3 cache disable Action */ -+#define MC_ACTION_CACHE_SHRINK (0x1 << 2) -+ -+/* Below interface used between XEN/DOM0 for passing XEN's recovery action -+ * information to DOM0. -+ * usage Senario: After offlining broken page, XEN might pass its page offline -+ * recovery action result to DOM0. DOM0 will save the information in -+ * non-volatile memory for further proactive actions, such as offlining the -+ * easy broken page earlier when doing next reboot. -+*/ -+struct page_offline_action -+{ -+ /* Params for passing the offlined page number to DOM0 */ -+ uint64_t mfn; -+ uint64_t status; -+}; -+ -+struct cpu_offline_action -+{ -+ /* Params for passing the identity of the offlined CPU to DOM0 */ -+ uint32_t mc_socketid; -+ uint16_t mc_coreid; -+ uint16_t mc_core_threadid; -+}; -+ -+#define MAX_UNION_SIZE 16 -+struct mcinfo_recovery -+{ -+ struct mcinfo_common common; -+ uint16_t mc_bank; /* bank nr */ -+ uint8_t action_flags; -+ uint8_t action_types; -+ union { -+ struct page_offline_action page_retire; -+ struct cpu_offline_action cpu_offline; -+ uint8_t pad[MAX_UNION_SIZE]; -+ } action_info; -+}; -+ -+ -+#define MCINFO_HYPERCALLSIZE 1024 -+#define MCINFO_MAXSIZE 768 -+ -+#define MCINFO_FLAGS_UNCOMPLETE 0x1 -+struct mc_info { -+ /* Number of mcinfo_* entries in mi_data */ -+ uint32_t mi_nentries; -+ uint32_t flags; -+ uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; -+}; -+typedef struct mc_info mc_info_t; -+DEFINE_XEN_GUEST_HANDLE(mc_info_t); -+ -+#define __MC_MSR_ARRAYSIZE 8 -+#define __MC_NMSRS 1 -+#define MC_NCAPS 7 /* 7 CPU feature flag words */ -+#define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ -+#define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ -+#define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ -+#define MC_CAPS_LINUX 3 /* Linux-defined */ -+#define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ -+#define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ -+#define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ -+ -+struct mcinfo_logical_cpu { -+ uint32_t mc_cpunr; -+ uint32_t mc_chipid; -+ uint16_t mc_coreid; -+ uint16_t mc_threadid; -+ uint32_t mc_apicid; -+ uint32_t mc_clusterid; -+ uint32_t mc_ncores; -+ uint32_t mc_ncores_active; -+ uint32_t mc_nthreads; -+ int32_t mc_cpuid_level; -+ uint32_t mc_family; -+ uint32_t mc_vendor; -+ uint32_t mc_model; -+ uint32_t mc_step; -+ char mc_vendorid[16]; -+ char mc_brandid[64]; -+ uint32_t mc_cpu_caps[MC_NCAPS]; -+ uint32_t mc_cache_size; -+ uint32_t mc_cache_alignment; -+ int32_t mc_nmsrvals; -+ struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; -+}; -+typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; -+DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); -+ -+ -+/* -+ * OS's should use these instead of writing their own lookup function -+ * each with its own bugs and drawbacks. -+ * We use macros instead of static inline functions to allow guests -+ * to include this header in assembly files (*.S). -+ */ -+/* Prototype: -+ * uint32_t x86_mcinfo_nentries(struct mc_info *mi); -+ */ -+#define x86_mcinfo_nentries(_mi) \ -+ (_mi)->mi_nentries -+/* Prototype: -+ * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); -+ */ -+#define x86_mcinfo_first(_mi) \ -+ ((struct mcinfo_common *)(_mi)->mi_data) -+/* Prototype: -+ * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); -+ */ -+#define x86_mcinfo_next(_mic) \ -+ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) -+ -+/* Prototype: -+ * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); -+ */ -+#define x86_mcinfo_lookup(_ret, _mi, _type) \ -+ do { \ -+ uint32_t found, i; \ -+ struct mcinfo_common *_mic; \ -+ \ -+ found = 0; \ -+ (_ret) = NULL; \ -+ if (_mi == NULL) break; \ -+ _mic = x86_mcinfo_first(_mi); \ -+ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ -+ if (_mic->type == (_type)) { \ -+ found = 1; \ -+ break; \ -+ } \ -+ _mic = x86_mcinfo_next(_mic); \ -+ } \ -+ (_ret) = found ? _mic : NULL; \ -+ } while (0) -+ -+ -+/* Usecase 1 -+ * Register machine check trap callback handler -+ * (already done via "set_trap_table" hypercall) -+ */ -+ -+/* Usecase 2 -+ * Dom0 registers machine check event callback handler -+ * done by EVTCHNOP_bind_virq -+ */ -+ -+/* Usecase 3 -+ * Fetch machine check data from hypervisor. -+ * Note, this hypercall is special, because both Dom0 and DomU must use this. -+ */ -+#define XEN_MC_fetch 1 -+struct xen_mc_fetch { -+ /* IN/OUT variables. */ -+ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, -+ XEN_MC_ACK if ack'ing an earlier fetch */ -+ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, -+ XEN_MC_NODATA, XEN_MC_NOMATCH */ -+ uint32_t _pad0; -+ uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ -+ -+ /* OUT variables. */ -+ XEN_GUEST_HANDLE(mc_info_t) data; -+}; -+typedef struct xen_mc_fetch xen_mc_fetch_t; -+DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); -+ -+ -+/* Usecase 4 -+ * This tells the hypervisor to notify a DomU about the machine check error -+ */ -+#define XEN_MC_notifydomain 2 -+struct xen_mc_notifydomain { -+ /* IN variables. */ -+ uint16_t mc_domid; /* The unprivileged domain to notify. */ -+ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. -+ * Usually echo'd value from the fetch hypercall. */ -+ -+ /* IN/OUT variables. */ -+ uint32_t flags; -+ -+/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ -+/* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ -+}; -+typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; -+DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); -+ -+#define XEN_MC_physcpuinfo 3 -+struct xen_mc_physcpuinfo { -+ /* IN/OUT */ -+ uint32_t ncpus; -+ uint32_t _pad0; -+ /* OUT */ -+ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; -+}; -+ -+#define XEN_MC_msrinject 4 -+#define MC_MSRINJ_MAXMSRS 8 -+struct xen_mc_msrinject { -+ /* IN */ -+ uint32_t mcinj_cpunr; /* target processor id */ -+ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ -+ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ -+ uint32_t _pad0; -+ struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; -+}; -+ -+/* Flags for mcinj_flags above; bits 16-31 are reserved */ -+#define MC_MSRINJ_F_INTERPOSE 0x1 -+ -+#define XEN_MC_mceinject 5 -+struct xen_mc_mceinject { -+ unsigned int mceinj_cpunr; /* target processor id */ -+}; -+ -+#if defined(__XEN__) || defined(__XEN_TOOLS__) -+#define XEN_MC_inject_v2 6 -+#define XEN_MC_INJECT_TYPE_MASK 0x7 -+#define XEN_MC_INJECT_TYPE_MCE 0x0 -+#define XEN_MC_INJECT_TYPE_CMCI 0x1 -+ -+#define XEN_MC_INJECT_CPU_BROADCAST 0x8 -+ -+struct xen_mc_inject_v2 { -+ uint32_t flags; -+ struct xenctl_cpumap cpumap; -+}; -+#endif -+ -+struct xen_mc { -+ uint32_t cmd; -+ uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ -+ union { -+ struct xen_mc_fetch mc_fetch; -+ struct xen_mc_notifydomain mc_notifydomain; -+ struct xen_mc_physcpuinfo mc_physcpuinfo; -+ struct xen_mc_msrinject mc_msrinject; -+ struct xen_mc_mceinject mc_mceinject; -+#if defined(__XEN__) || defined(__XEN_TOOLS__) -+ struct xen_mc_inject_v2 mc_inject_v2; -+#endif -+ } u; -+}; -+typedef struct xen_mc xen_mc_t; -+DEFINE_XEN_GUEST_HANDLE(xen_mc_t); -+ -+#endif /* __ASSEMBLY__ */ -+ -+#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86/xen-x86_32.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,180 @@ -+/****************************************************************************** -+ * xen-x86_32.h -+ * -+ * Guest OS interface to x86 32-bit Xen. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2004-2007, K A Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ -+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ -+ -+/* -+ * Hypercall interface: -+ * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) -+ * Output: %eax -+ * Access is via hypercall page (set up by guest loader or via a Xen MSR): -+ * call hypercall_page + hypercall-number * 32 -+ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) -+ */ -+ -+#if __XEN_INTERFACE_VERSION__ < 0x00030203 -+/* -+ * Legacy hypercall interface: -+ * As above, except the entry sequence to the hypervisor is: -+ * mov $hypercall-number*32,%eax ; int $0x82 -+ */ -+#define TRAP_INSTR "int $0x82" -+#endif -+ -+/* -+ * These flat segments are in the Xen-private section of every GDT. Since these -+ * are also present in the initial GDT, many OSes will be able to avoid -+ * installing their own GDT. -+ */ -+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */ -+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */ -+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */ -+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */ -+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */ -+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */ -+ -+#define FLAT_KERNEL_CS FLAT_RING1_CS -+#define FLAT_KERNEL_DS FLAT_RING1_DS -+#define FLAT_KERNEL_SS FLAT_RING1_SS -+#define FLAT_USER_CS FLAT_RING3_CS -+#define FLAT_USER_DS FLAT_RING3_DS -+#define FLAT_USER_SS FLAT_RING3_SS -+ -+#define __HYPERVISOR_VIRT_START_PAE 0xF5800000 -+#define __MACH2PHYS_VIRT_START_PAE 0xF5800000 -+#define __MACH2PHYS_VIRT_END_PAE 0xF6800000 -+#define HYPERVISOR_VIRT_START_PAE \ -+ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) -+#define MACH2PHYS_VIRT_START_PAE \ -+ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) -+#define MACH2PHYS_VIRT_END_PAE \ -+ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) -+ -+/* Non-PAE bounds are obsolete. */ -+#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 -+#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 -+#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 -+#define HYPERVISOR_VIRT_START_NONPAE \ -+ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) -+#define MACH2PHYS_VIRT_START_NONPAE \ -+ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) -+#define MACH2PHYS_VIRT_END_NONPAE \ -+ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) -+ -+#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE -+#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE -+#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE -+ -+#ifndef HYPERVISOR_VIRT_START -+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) -+#endif -+ -+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) -+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) -+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) -+#ifndef machine_to_phys_mapping -+#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) -+#endif -+ -+/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ -+#if defined(__XEN__) || defined(__XEN_TOOLS__) -+#undef ___DEFINE_XEN_GUEST_HANDLE -+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ -+ typedef struct { type *p; } \ -+ __guest_handle_ ## name; \ -+ typedef struct { union { type *p; uint64_aligned_t q; }; } \ -+ __guest_handle_64_ ## name -+#undef set_xen_guest_handle_raw -+#define set_xen_guest_handle_raw(hnd, val) \ -+ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ -+ (hnd).p = val; \ -+ } while ( 0 ) -+#define uint64_aligned_t uint64_t __attribute__((aligned(8))) -+#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name -+#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) -+#endif -+ -+#ifndef __ASSEMBLY__ -+ -+struct cpu_user_regs { -+ uint32_t ebx; -+ uint32_t ecx; -+ uint32_t edx; -+ uint32_t esi; -+ uint32_t edi; -+ uint32_t ebp; -+ uint32_t eax; -+ uint16_t error_code; /* private */ -+ uint16_t entry_vector; /* private */ -+ uint32_t eip; -+ uint16_t cs; -+ uint8_t saved_upcall_mask; -+ uint8_t _pad0; -+ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ -+ uint32_t esp; -+ uint16_t ss, _pad1; -+ uint16_t es, _pad2; -+ uint16_t ds, _pad3; -+ uint16_t fs, _pad4; -+ uint16_t gs, _pad5; -+}; -+typedef struct cpu_user_regs cpu_user_regs_t; -+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); -+ -+/* -+ * Page-directory addresses above 4GB do not fit into architectural %cr3. -+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests -+ * must use the following accessor macros to pack/unpack valid MFNs. -+ */ -+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) -+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) -+ -+struct arch_vcpu_info { -+ unsigned long cr2; -+ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ -+}; -+typedef struct arch_vcpu_info arch_vcpu_info_t; -+ -+struct xen_callback { -+ unsigned long cs; -+ unsigned long eip; -+}; -+typedef struct xen_callback xen_callback_t; -+ -+#endif /* !__ASSEMBLY__ */ -+ -+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86/xen-x86_64.h 2008-04-02 12:34:02.000000000 +0200 -@@ -0,0 +1,212 @@ -+/****************************************************************************** -+ * xen-x86_64.h -+ * -+ * Guest OS interface to x86 64-bit Xen. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2004-2006, K A Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ -+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ -+ -+/* -+ * Hypercall interface: -+ * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) -+ * Output: %rax -+ * Access is via hypercall page (set up by guest loader or via a Xen MSR): -+ * call hypercall_page + hypercall-number * 32 -+ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) -+ */ -+ -+#if __XEN_INTERFACE_VERSION__ < 0x00030203 -+/* -+ * Legacy hypercall interface: -+ * As above, except the entry sequence to the hypervisor is: -+ * mov $hypercall-number*32,%eax ; syscall -+ * Clobbered: %rcx, %r11, argument registers (as above) -+ */ -+#define TRAP_INSTR "syscall" -+#endif -+ -+/* -+ * 64-bit segment selectors -+ * These flat segments are in the Xen-private section of every GDT. Since these -+ * are also present in the initial GDT, many OSes will be able to avoid -+ * installing their own GDT. -+ */ -+ -+#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ -+#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ -+#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ -+#define FLAT_RING3_DS64 0x0000 /* NULL selector */ -+#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ -+#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ -+ -+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64 -+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32 -+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64 -+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64 -+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32 -+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64 -+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64 -+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32 -+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64 -+ -+#define FLAT_USER_DS64 FLAT_RING3_DS64 -+#define FLAT_USER_DS32 FLAT_RING3_DS32 -+#define FLAT_USER_DS FLAT_USER_DS64 -+#define FLAT_USER_CS64 FLAT_RING3_CS64 -+#define FLAT_USER_CS32 FLAT_RING3_CS32 -+#define FLAT_USER_CS FLAT_USER_CS64 -+#define FLAT_USER_SS64 FLAT_RING3_SS64 -+#define FLAT_USER_SS32 FLAT_RING3_SS32 -+#define FLAT_USER_SS FLAT_USER_SS64 -+ -+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000 -+#define __HYPERVISOR_VIRT_END 0xFFFF880000000000 -+#define __MACH2PHYS_VIRT_START 0xFFFF800000000000 -+#define __MACH2PHYS_VIRT_END 0xFFFF804000000000 -+ -+#ifndef HYPERVISOR_VIRT_START -+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) -+#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) -+#endif -+ -+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) -+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) -+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) -+#ifndef machine_to_phys_mapping -+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) -+#endif -+ -+/* -+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) -+ * @which == SEGBASE_* ; @base == 64-bit base address -+ * Returns 0 on success. -+ */ -+#define SEGBASE_FS 0 -+#define SEGBASE_GS_USER 1 -+#define SEGBASE_GS_KERNEL 2 -+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ -+ -+/* -+ * int HYPERVISOR_iret(void) -+ * All arguments are on the kernel stack, in the following format. -+ * Never returns if successful. Current kernel context is lost. -+ * The saved CS is mapped as follows: -+ * RING0 -> RING3 kernel mode. -+ * RING1 -> RING3 kernel mode. -+ * RING2 -> RING3 kernel mode. -+ * RING3 -> RING3 user mode. -+ * However RING0 indicates that the guest kernel should return to iteself -+ * directly with -+ * orb $3,1*8(%rsp) -+ * iretq -+ * If flags contains VGCF_in_syscall: -+ * Restore RAX, RIP, RFLAGS, RSP. -+ * Discard R11, RCX, CS, SS. -+ * Otherwise: -+ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. -+ * All other registers are saved on hypercall entry and restored to user. -+ */ -+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */ -+#define _VGCF_in_syscall 8 -+#define VGCF_in_syscall (1<<_VGCF_in_syscall) -+#define VGCF_IN_SYSCALL VGCF_in_syscall -+ -+#ifndef __ASSEMBLY__ -+ -+struct iret_context { -+ /* Top of stack (%rsp at point of hypercall). */ -+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; -+ /* Bottom of iret stack frame. */ -+}; -+ -+#if defined(__GNUC__) && !defined(__STRICT_ANSI__) -+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ -+#define __DECL_REG(name) union { \ -+ uint64_t r ## name, e ## name; \ -+ uint32_t _e ## name; \ -+} -+#else -+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ -+#define __DECL_REG(name) uint64_t r ## name -+#endif -+ -+struct cpu_user_regs { -+ uint64_t r15; -+ uint64_t r14; -+ uint64_t r13; -+ uint64_t r12; -+ __DECL_REG(bp); -+ __DECL_REG(bx); -+ uint64_t r11; -+ uint64_t r10; -+ uint64_t r9; -+ uint64_t r8; -+ __DECL_REG(ax); -+ __DECL_REG(cx); -+ __DECL_REG(dx); -+ __DECL_REG(si); -+ __DECL_REG(di); -+ uint32_t error_code; /* private */ -+ uint32_t entry_vector; /* private */ -+ __DECL_REG(ip); -+ uint16_t cs, _pad0[1]; -+ uint8_t saved_upcall_mask; -+ uint8_t _pad1[3]; -+ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ -+ __DECL_REG(sp); -+ uint16_t ss, _pad2[3]; -+ uint16_t es, _pad3[3]; -+ uint16_t ds, _pad4[3]; -+ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ -+ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ -+}; -+typedef struct cpu_user_regs cpu_user_regs_t; -+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); -+ -+#undef __DECL_REG -+ -+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) -+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) -+ -+struct arch_vcpu_info { -+ unsigned long cr2; -+ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ -+}; -+typedef struct arch_vcpu_info arch_vcpu_info_t; -+ -+typedef unsigned long xen_callback_t; -+ -+#endif /* !__ASSEMBLY__ */ -+ -+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86/xen.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,201 @@ -+/****************************************************************************** -+ * arch-x86/xen.h -+ * -+ * Guest OS interface to x86 Xen. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2004-2006, K A Fraser -+ */ -+ -+#include "../xen.h" -+ -+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ -+#define __XEN_PUBLIC_ARCH_X86_XEN_H__ -+ -+/* Structural guest handles introduced in 0x00030201. */ -+#if __XEN_INTERFACE_VERSION__ >= 0x00030201 -+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ -+ typedef struct { type *p; } __guest_handle_ ## name -+#else -+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ -+ typedef type * __guest_handle_ ## name -+#endif -+ -+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \ -+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ -+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) -+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) -+#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name -+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) -+#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0) -+#ifdef __XEN_TOOLS__ -+#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) -+#endif -+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) -+ -+#if defined(__i386__) -+#include "xen-x86_32.h" -+#elif defined(__x86_64__) -+#include "xen-x86_64.h" -+#endif -+ -+#ifndef __ASSEMBLY__ -+typedef unsigned long xen_pfn_t; -+#define PRI_xen_pfn "lx" -+#endif -+ -+/* -+ * SEGMENT DESCRIPTOR TABLES -+ */ -+/* -+ * A number of GDT entries are reserved by Xen. These are not situated at the -+ * start of the GDT because some stupid OSes export hard-coded selector values -+ * in their ABI. These hard-coded values are always near the start of the GDT, -+ * so Xen places itself out of the way, at the far end of the GDT. -+ */ -+#define FIRST_RESERVED_GDT_PAGE 14 -+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) -+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) -+ -+/* Maximum number of virtual CPUs in legacy multi-processor guests. */ -+#define XEN_LEGACY_MAX_VCPUS 32 -+ -+#ifndef __ASSEMBLY__ -+ -+typedef unsigned long xen_ulong_t; -+ -+/* -+ * Send an array of these to HYPERVISOR_set_trap_table(). -+ * The privilege level specifies which modes may enter a trap via a software -+ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate -+ * privilege levels as follows: -+ * Level == 0: Noone may enter -+ * Level == 1: Kernel may enter -+ * Level == 2: Kernel may enter -+ * Level == 3: Everyone may enter -+ */ -+#define TI_GET_DPL(_ti) ((_ti)->flags & 3) -+#define TI_GET_IF(_ti) ((_ti)->flags & 4) -+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) -+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) -+struct trap_info { -+ uint8_t vector; /* exception vector */ -+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ -+ uint16_t cs; /* code selector */ -+ unsigned long address; /* code offset */ -+}; -+typedef struct trap_info trap_info_t; -+DEFINE_XEN_GUEST_HANDLE(trap_info_t); -+ -+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ -+ -+/* -+ * The following is all CPU context. Note that the fpu_ctxt block is filled -+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. -+ */ -+struct vcpu_guest_context { -+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ -+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ -+#define VGCF_I387_VALID (1<<0) -+#define VGCF_IN_KERNEL (1<<2) -+#define _VGCF_i387_valid 0 -+#define VGCF_i387_valid (1<<_VGCF_i387_valid) -+#define _VGCF_in_kernel 2 -+#define VGCF_in_kernel (1<<_VGCF_in_kernel) -+#define _VGCF_failsafe_disables_events 3 -+#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) -+#define _VGCF_syscall_disables_events 4 -+#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) -+#define _VGCF_online 5 -+#define VGCF_online (1<<_VGCF_online) -+ unsigned long flags; /* VGCF_* flags */ -+ struct cpu_user_regs user_regs; /* User-level CPU registers */ -+ struct trap_info trap_ctxt[256]; /* Virtual IDT */ -+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ -+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ -+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ -+ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ -+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ -+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ -+#ifdef __i386__ -+ unsigned long event_callback_cs; /* CS:EIP of event callback */ -+ unsigned long event_callback_eip; -+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ -+ unsigned long failsafe_callback_eip; -+#else -+ unsigned long event_callback_eip; -+ unsigned long failsafe_callback_eip; -+#ifdef __XEN__ -+ union { -+ unsigned long syscall_callback_eip; -+ struct { -+ unsigned int event_callback_cs; /* compat CS of event cb */ -+ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ -+ }; -+ }; -+#else -+ unsigned long syscall_callback_eip; -+#endif -+#endif -+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ -+#ifdef __x86_64__ -+ /* Segment base addresses. */ -+ uint64_t fs_base; -+ uint64_t gs_base_kernel; -+ uint64_t gs_base_user; -+#endif -+}; -+typedef struct vcpu_guest_context vcpu_guest_context_t; -+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); -+ -+struct arch_shared_info { -+ unsigned long max_pfn; /* max pfn that appears in table */ -+ /* Frame containing list of mfns containing list of mfns containing p2m. */ -+ xen_pfn_t pfn_to_mfn_frame_list_list; -+ unsigned long nmi_reason; -+ uint64_t pad[32]; -+}; -+typedef struct arch_shared_info arch_shared_info_t; -+ -+#endif /* !__ASSEMBLY__ */ -+ -+/* -+ * Prefix forces emulation of some non-trapping instructions. -+ * Currently only CPUID. -+ */ -+#ifdef __ASSEMBLY__ -+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; -+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid -+#else -+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " -+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" -+#endif -+ -+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86_32.h 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,27 @@ -+/****************************************************************************** -+ * arch-x86_32.h -+ * -+ * Guest OS interface to x86 32-bit Xen. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2004-2006, K A Fraser -+ */ -+ -+#include "arch-x86/xen.h" ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/arch-x86_64.h 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,27 @@ -+/****************************************************************************** -+ * arch-x86_64.h -+ * -+ * Guest OS interface to x86 64-bit Xen. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2004-2006, K A Fraser -+ */ -+ -+#include "arch-x86/xen.h" ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/dom0_ops.h 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,120 @@ -+/****************************************************************************** -+ * dom0_ops.h -+ * -+ * Process command requests from domain-0 guest OS. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2002-2003, B Dragovic -+ * Copyright (c) 2002-2006, K Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_DOM0_OPS_H__ -+#define __XEN_PUBLIC_DOM0_OPS_H__ -+ -+#include "xen.h" -+#include "platform.h" -+ -+#if __XEN_INTERFACE_VERSION__ >= 0x00030204 -+#error "dom0_ops.h is a compatibility interface only" -+#endif -+ -+#define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION -+ -+#define DOM0_SETTIME XENPF_settime -+#define dom0_settime xenpf_settime -+#define dom0_settime_t xenpf_settime_t -+ -+#define DOM0_ADD_MEMTYPE XENPF_add_memtype -+#define dom0_add_memtype xenpf_add_memtype -+#define dom0_add_memtype_t xenpf_add_memtype_t -+ -+#define DOM0_DEL_MEMTYPE XENPF_del_memtype -+#define dom0_del_memtype xenpf_del_memtype -+#define dom0_del_memtype_t xenpf_del_memtype_t -+ -+#define DOM0_READ_MEMTYPE XENPF_read_memtype -+#define dom0_read_memtype xenpf_read_memtype -+#define dom0_read_memtype_t xenpf_read_memtype_t -+ -+#define DOM0_MICROCODE XENPF_microcode_update -+#define dom0_microcode xenpf_microcode_update -+#define dom0_microcode_t xenpf_microcode_update_t -+ -+#define DOM0_PLATFORM_QUIRK XENPF_platform_quirk -+#define dom0_platform_quirk xenpf_platform_quirk -+#define dom0_platform_quirk_t xenpf_platform_quirk_t -+ -+typedef uint64_t cpumap_t; -+ -+/* Unsupported legacy operation -- defined for API compatibility. */ -+#define DOM0_MSR 15 -+struct dom0_msr { -+ /* IN variables. */ -+ uint32_t write; -+ cpumap_t cpu_mask; -+ uint32_t msr; -+ uint32_t in1; -+ uint32_t in2; -+ /* OUT variables. */ -+ uint32_t out1; -+ uint32_t out2; -+}; -+typedef struct dom0_msr dom0_msr_t; -+DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); -+ -+/* Unsupported legacy operation -- defined for API compatibility. */ -+#define DOM0_PHYSICAL_MEMORY_MAP 40 -+struct dom0_memory_map_entry { -+ uint64_t start, end; -+ uint32_t flags; /* reserved */ -+ uint8_t is_ram; -+}; -+typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; -+DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); -+ -+struct dom0_op { -+ uint32_t cmd; -+ uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ -+ union { -+ struct dom0_msr msr; -+ struct dom0_settime settime; -+ struct dom0_add_memtype add_memtype; -+ struct dom0_del_memtype del_memtype; -+ struct dom0_read_memtype read_memtype; -+ struct dom0_microcode microcode; -+ struct dom0_platform_quirk platform_quirk; -+ struct dom0_memory_map_entry physical_memory_map; -+ uint8_t pad[128]; -+ } u; -+}; -+typedef struct dom0_op dom0_op_t; -+DEFINE_XEN_GUEST_HANDLE(dom0_op_t); -+ -+#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/domctl.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,968 @@ -+/****************************************************************************** -+ * domctl.h -+ * -+ * Domain management operations. For use by node control stack. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2002-2003, B Dragovic -+ * Copyright (c) 2002-2006, K Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_DOMCTL_H__ -+#define __XEN_PUBLIC_DOMCTL_H__ -+ -+#if !defined(__XEN__) && !defined(__XEN_TOOLS__) -+#error "domctl operations are intended for use by node control tools only" -+#endif -+ -+#include "xen.h" -+#include "grant_table.h" -+ -+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007 -+ -+/* -+ * NB. xen_domctl.domain is an IN/OUT parameter for this operation. -+ * If it is specified as zero, an id is auto-allocated and returned. -+ */ -+/* XEN_DOMCTL_createdomain */ -+struct xen_domctl_createdomain { -+ /* IN parameters */ -+ uint32_t ssidref; -+ xen_domain_handle_t handle; -+ /* Is this an HVM guest (as opposed to a PV guest)? */ -+#define _XEN_DOMCTL_CDF_hvm_guest 0 -+#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) -+ /* Use hardware-assisted paging if available? */ -+#define _XEN_DOMCTL_CDF_hap 1 -+#define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap) -+ /* Should domain memory integrity be verifed by tboot during Sx? */ -+#define _XEN_DOMCTL_CDF_s3_integrity 2 -+#define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity) -+ /* Disable out-of-sync shadow page tables? */ -+#define _XEN_DOMCTL_CDF_oos_off 3 -+#define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off) -+ uint32_t flags; -+}; -+typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); -+ -+/* XEN_DOMCTL_getdomaininfo */ -+struct xen_domctl_getdomaininfo { -+ /* OUT variables. */ -+ domid_t domain; /* Also echoed in domctl.domain */ -+ /* Domain is scheduled to die. */ -+#define _XEN_DOMINF_dying 0 -+#define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) -+ /* Domain is an HVM guest (as opposed to a PV guest). */ -+#define _XEN_DOMINF_hvm_guest 1 -+#define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) -+ /* The guest OS has shut down. */ -+#define _XEN_DOMINF_shutdown 2 -+#define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) -+ /* Currently paused by control software. */ -+#define _XEN_DOMINF_paused 3 -+#define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) -+ /* Currently blocked pending an event. */ -+#define _XEN_DOMINF_blocked 4 -+#define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) -+ /* Domain is currently running. */ -+#define _XEN_DOMINF_running 5 -+#define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) -+ /* Being debugged. */ -+#define _XEN_DOMINF_debugged 6 -+#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged) -+ /* XEN_DOMINF_shutdown guest-supplied code. */ -+#define XEN_DOMINF_shutdownmask 255 -+#define XEN_DOMINF_shutdownshift 16 -+ uint32_t flags; /* XEN_DOMINF_* */ -+ uint64_aligned_t tot_pages; -+ uint64_aligned_t max_pages; -+ uint64_aligned_t shr_pages; -+ uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ -+ uint64_aligned_t cpu_time; -+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ -+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ -+ uint32_t ssidref; -+ xen_domain_handle_t handle; -+ uint32_t cpupool; -+}; -+typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); -+ -+ -+/* XEN_DOMCTL_getmemlist */ -+struct xen_domctl_getmemlist { -+ /* IN variables. */ -+ /* Max entries to write to output buffer. */ -+ uint64_aligned_t max_pfns; -+ /* Start index in guest's page list. */ -+ uint64_aligned_t start_pfn; -+ XEN_GUEST_HANDLE_64(uint64) buffer; -+ /* OUT variables. */ -+ uint64_aligned_t num_pfns; -+}; -+typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); -+ -+ -+/* XEN_DOMCTL_getpageframeinfo */ -+ -+#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 -+#define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) -+#define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) -+#define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) -+#define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) -+#define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) -+#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) -+#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) -+#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ -+#define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28) -+#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) -+ -+struct xen_domctl_getpageframeinfo { -+ /* IN variables. */ -+ uint64_aligned_t gmfn; /* GMFN to query */ -+ /* OUT variables. */ -+ /* Is the page PINNED to a type? */ -+ uint32_t type; /* see above type defs */ -+}; -+typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); -+ -+ -+/* XEN_DOMCTL_getpageframeinfo2 */ -+struct xen_domctl_getpageframeinfo2 { -+ /* IN variables. */ -+ uint64_aligned_t num; -+ /* IN/OUT variables. */ -+ XEN_GUEST_HANDLE_64(uint32) array; -+}; -+typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); -+ -+/* XEN_DOMCTL_getpageframeinfo3 */ -+struct xen_domctl_getpageframeinfo3 { -+ /* IN variables. */ -+ uint64_aligned_t num; -+ /* IN/OUT variables. */ -+ XEN_GUEST_HANDLE_64(xen_pfn_t) array; -+}; -+ -+ -+/* -+ * Control shadow pagetables operation -+ */ -+/* XEN_DOMCTL_shadow_op */ -+ -+/* Disable shadow mode. */ -+#define XEN_DOMCTL_SHADOW_OP_OFF 0 -+ -+/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ -+#define XEN_DOMCTL_SHADOW_OP_ENABLE 32 -+ -+/* Log-dirty bitmap operations. */ -+ /* Return the bitmap and clean internal copy for next round. */ -+#define XEN_DOMCTL_SHADOW_OP_CLEAN 11 -+ /* Return the bitmap but do not modify internal copy. */ -+#define XEN_DOMCTL_SHADOW_OP_PEEK 12 -+ -+/* Memory allocation accessors. */ -+#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 -+#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 -+ -+/* Legacy enable operations. */ -+ /* Equiv. to ENABLE with no mode flags. */ -+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 -+ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ -+#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 -+ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ -+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 -+ -+/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ -+ /* -+ * Shadow pagetables are refcounted: guest does not use explicit mmu -+ * operations nor write-protect its pagetables. -+ */ -+#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) -+ /* -+ * Log pages in a bitmap as they are dirtied. -+ * Used for live relocation to determine which pages must be re-sent. -+ */ -+#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) -+ /* -+ * Automatically translate GPFNs into MFNs. -+ */ -+#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) -+ /* -+ * Xen does not steal virtual address space from the guest. -+ * Requires HVM support. -+ */ -+#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) -+ -+struct xen_domctl_shadow_op_stats { -+ uint32_t fault_count; -+ uint32_t dirty_count; -+}; -+typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); -+ -+struct xen_domctl_shadow_op { -+ /* IN variables. */ -+ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ -+ -+ /* OP_ENABLE */ -+ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ -+ -+ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ -+ uint32_t mb; /* Shadow memory allocation in MB */ -+ -+ /* OP_PEEK / OP_CLEAN */ -+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; -+ uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ -+ struct xen_domctl_shadow_op_stats stats; -+}; -+typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); -+ -+ -+/* XEN_DOMCTL_max_mem */ -+struct xen_domctl_max_mem { -+ /* IN variables. */ -+ uint64_aligned_t max_memkb; -+}; -+typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); -+ -+ -+/* XEN_DOMCTL_setvcpucontext */ -+/* XEN_DOMCTL_getvcpucontext */ -+struct xen_domctl_vcpucontext { -+ uint32_t vcpu; /* IN */ -+ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ -+}; -+typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); -+ -+ -+/* XEN_DOMCTL_getvcpuinfo */ -+struct xen_domctl_getvcpuinfo { -+ /* IN variables. */ -+ uint32_t vcpu; -+ /* OUT variables. */ -+ uint8_t online; /* currently online (not hotplugged)? */ -+ uint8_t blocked; /* blocked waiting for an event? */ -+ uint8_t running; /* currently scheduled on its CPU? */ -+ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ -+ uint32_t cpu; /* current mapping */ -+}; -+typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); -+ -+ -+/* Get/set which physical cpus a vcpu can execute on. */ -+/* XEN_DOMCTL_setvcpuaffinity */ -+/* XEN_DOMCTL_getvcpuaffinity */ -+struct xen_domctl_vcpuaffinity { -+ uint32_t vcpu; /* IN */ -+ struct xenctl_cpumap cpumap; /* IN/OUT */ -+}; -+typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); -+ -+ -+/* XEN_DOMCTL_max_vcpus */ -+struct xen_domctl_max_vcpus { -+ uint32_t max; /* maximum number of vcpus */ -+}; -+typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); -+ -+ -+/* XEN_DOMCTL_scheduler_op */ -+/* Scheduler types. */ -+#define XEN_SCHEDULER_SEDF 4 -+#define XEN_SCHEDULER_CREDIT 5 -+#define XEN_SCHEDULER_CREDIT2 6 -+#define XEN_SCHEDULER_ARINC653 7 -+/* Set or get info? */ -+#define XEN_DOMCTL_SCHEDOP_putinfo 0 -+#define XEN_DOMCTL_SCHEDOP_getinfo 1 -+struct xen_domctl_scheduler_op { -+ uint32_t sched_id; /* XEN_SCHEDULER_* */ -+ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ -+ union { -+ struct xen_domctl_sched_sedf { -+ uint64_aligned_t period; -+ uint64_aligned_t slice; -+ uint64_aligned_t latency; -+ uint32_t extratime; -+ uint32_t weight; -+ } sedf; -+ struct xen_domctl_sched_credit { -+ uint16_t weight; -+ uint16_t cap; -+ } credit; -+ struct xen_domctl_sched_credit2 { -+ uint16_t weight; -+ } credit2; -+ } u; -+}; -+typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); -+ -+ -+/* XEN_DOMCTL_setdomainhandle */ -+struct xen_domctl_setdomainhandle { -+ xen_domain_handle_t handle; -+}; -+typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); -+ -+ -+/* XEN_DOMCTL_setdebugging */ -+struct xen_domctl_setdebugging { -+ uint8_t enable; -+}; -+typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); -+ -+ -+/* XEN_DOMCTL_irq_permission */ -+struct xen_domctl_irq_permission { -+ uint8_t pirq; -+ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ -+}; -+typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); -+ -+ -+/* XEN_DOMCTL_iomem_permission */ -+struct xen_domctl_iomem_permission { -+ uint64_aligned_t first_mfn;/* first page (physical page number) in range */ -+ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ -+ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ -+}; -+typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); -+ -+ -+/* XEN_DOMCTL_ioport_permission */ -+struct xen_domctl_ioport_permission { -+ uint32_t first_port; /* first port int range */ -+ uint32_t nr_ports; /* size of port range */ -+ uint8_t allow_access; /* allow or deny access to range? */ -+}; -+typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); -+ -+ -+/* XEN_DOMCTL_hypercall_init */ -+struct xen_domctl_hypercall_init { -+ uint64_aligned_t gmfn; /* GMFN to be initialised */ -+}; -+typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); -+ -+ -+/* XEN_DOMCTL_arch_setup */ -+#define _XEN_DOMAINSETUP_hvm_guest 0 -+#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) -+#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ -+#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) -+#define _XEN_DOMAINSETUP_sioemu_guest 2 -+#define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest) -+typedef struct xen_domctl_arch_setup { -+ uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ -+#ifdef __ia64__ -+ uint64_aligned_t bp; /* mpaddr of boot param area */ -+ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ -+ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ -+ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ -+ int8_t vhpt_size_log2; /* Log2 of VHPT size. */ -+#endif -+} xen_domctl_arch_setup_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); -+ -+ -+/* XEN_DOMCTL_settimeoffset */ -+struct xen_domctl_settimeoffset { -+ int32_t time_offset_seconds; /* applied to domain wallclock time */ -+}; -+typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); -+ -+/* XEN_DOMCTL_gethvmcontext */ -+/* XEN_DOMCTL_sethvmcontext */ -+typedef struct xen_domctl_hvmcontext { -+ uint32_t size; /* IN/OUT: size of buffer / bytes filled */ -+ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call -+ * gethvmcontext with NULL -+ * buffer to get size req'd */ -+} xen_domctl_hvmcontext_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); -+ -+ -+/* XEN_DOMCTL_set_address_size */ -+/* XEN_DOMCTL_get_address_size */ -+typedef struct xen_domctl_address_size { -+ uint32_t size; -+} xen_domctl_address_size_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); -+ -+ -+/* XEN_DOMCTL_real_mode_area */ -+struct xen_domctl_real_mode_area { -+ uint32_t log; /* log2 of Real Mode Area size */ -+}; -+typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); -+ -+ -+/* XEN_DOMCTL_sendtrigger */ -+#define XEN_DOMCTL_SENDTRIGGER_NMI 0 -+#define XEN_DOMCTL_SENDTRIGGER_RESET 1 -+#define XEN_DOMCTL_SENDTRIGGER_INIT 2 -+#define XEN_DOMCTL_SENDTRIGGER_POWER 3 -+#define XEN_DOMCTL_SENDTRIGGER_SLEEP 4 -+struct xen_domctl_sendtrigger { -+ uint32_t trigger; /* IN */ -+ uint32_t vcpu; /* IN */ -+}; -+typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); -+ -+ -+/* Assign PCI device to HVM guest. Sets up IOMMU structures. */ -+/* XEN_DOMCTL_assign_device */ -+/* XEN_DOMCTL_test_assign_device */ -+/* XEN_DOMCTL_deassign_device */ -+struct xen_domctl_assign_device { -+ uint32_t machine_bdf; /* machine PCI ID of assigned device */ -+}; -+typedef struct xen_domctl_assign_device xen_domctl_assign_device_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t); -+ -+/* Retrieve sibling devices infomation of machine_bdf */ -+/* XEN_DOMCTL_get_device_group */ -+struct xen_domctl_get_device_group { -+ uint32_t machine_bdf; /* IN */ -+ uint32_t max_sdevs; /* IN */ -+ uint32_t num_sdevs; /* OUT */ -+ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */ -+}; -+typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t); -+ -+/* Pass-through interrupts: bind real irq -> hvm devfn. */ -+/* XEN_DOMCTL_bind_pt_irq */ -+/* XEN_DOMCTL_unbind_pt_irq */ -+typedef enum pt_irq_type_e { -+ PT_IRQ_TYPE_PCI, -+ PT_IRQ_TYPE_ISA, -+ PT_IRQ_TYPE_MSI, -+ PT_IRQ_TYPE_MSI_TRANSLATE, -+} pt_irq_type_t; -+struct xen_domctl_bind_pt_irq { -+ uint32_t machine_irq; -+ pt_irq_type_t irq_type; -+ uint32_t hvm_domid; -+ -+ union { -+ struct { -+ uint8_t isa_irq; -+ } isa; -+ struct { -+ uint8_t bus; -+ uint8_t device; -+ uint8_t intx; -+ } pci; -+ struct { -+ uint8_t gvec; -+ uint32_t gflags; -+ uint64_aligned_t gtable; -+ } msi; -+ } u; -+}; -+typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t); -+ -+ -+/* Bind machine I/O address range -> HVM address range. */ -+/* XEN_DOMCTL_memory_mapping */ -+#define DPCI_ADD_MAPPING 1 -+#define DPCI_REMOVE_MAPPING 0 -+struct xen_domctl_memory_mapping { -+ uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */ -+ uint64_aligned_t first_mfn; /* first page (machine page) in range */ -+ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ -+ uint32_t add_mapping; /* add or remove mapping */ -+ uint32_t padding; /* padding for 64-bit aligned structure */ -+}; -+typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t); -+ -+ -+/* Bind machine I/O port range -> HVM I/O port range. */ -+/* XEN_DOMCTL_ioport_mapping */ -+struct xen_domctl_ioport_mapping { -+ uint32_t first_gport; /* first guest IO port*/ -+ uint32_t first_mport; /* first machine IO port */ -+ uint32_t nr_ports; /* size of port range */ -+ uint32_t add_mapping; /* add or remove mapping */ -+}; -+typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t); -+ -+ -+/* -+ * Pin caching type of RAM space for x86 HVM domU. -+ */ -+/* XEN_DOMCTL_pin_mem_cacheattr */ -+/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */ -+#define XEN_DOMCTL_MEM_CACHEATTR_UC 0 -+#define XEN_DOMCTL_MEM_CACHEATTR_WC 1 -+#define XEN_DOMCTL_MEM_CACHEATTR_WT 4 -+#define XEN_DOMCTL_MEM_CACHEATTR_WP 5 -+#define XEN_DOMCTL_MEM_CACHEATTR_WB 6 -+#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 -+struct xen_domctl_pin_mem_cacheattr { -+ uint64_aligned_t start, end; -+ uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ -+}; -+typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); -+ -+ -+/* XEN_DOMCTL_set_ext_vcpucontext */ -+/* XEN_DOMCTL_get_ext_vcpucontext */ -+struct xen_domctl_ext_vcpucontext { -+ /* IN: VCPU that this call applies to. */ -+ uint32_t vcpu; -+ /* -+ * SET: Size of struct (IN) -+ * GET: Size of struct (OUT) -+ */ -+ uint32_t size; -+#if defined(__i386__) || defined(__x86_64__) -+ /* SYSCALL from 32-bit mode and SYSENTER callback information. */ -+ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */ -+ uint64_aligned_t syscall32_callback_eip; -+ uint64_aligned_t sysenter_callback_eip; -+ uint16_t syscall32_callback_cs; -+ uint16_t sysenter_callback_cs; -+ uint8_t syscall32_disables_events; -+ uint8_t sysenter_disables_events; -+#endif -+}; -+typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t); -+ -+/* -+ * Set optimizaton features for a domain -+ */ -+/* XEN_DOMCTL_set_opt_feature */ -+struct xen_domctl_set_opt_feature { -+#if defined(__ia64__) -+ struct xen_ia64_opt_feature optf; -+#else -+ /* Make struct non-empty: do not depend on this field name! */ -+ uint64_t dummy; -+#endif -+}; -+typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t); -+ -+/* -+ * Set the target domain for a domain -+ */ -+/* XEN_DOMCTL_set_target */ -+struct xen_domctl_set_target { -+ domid_t target; -+}; -+typedef struct xen_domctl_set_target xen_domctl_set_target_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t); -+ -+#if defined(__i386__) || defined(__x86_64__) -+# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF -+/* XEN_DOMCTL_set_cpuid */ -+struct xen_domctl_cpuid { -+ uint32_t input[2]; -+ uint32_t eax; -+ uint32_t ebx; -+ uint32_t ecx; -+ uint32_t edx; -+}; -+typedef struct xen_domctl_cpuid xen_domctl_cpuid_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t); -+#endif -+ -+/* XEN_DOMCTL_subscribe */ -+struct xen_domctl_subscribe { -+ uint32_t port; /* IN */ -+}; -+typedef struct xen_domctl_subscribe xen_domctl_subscribe_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t); -+ -+/* -+ * Define the maximum machine address size which should be allocated -+ * to a guest. -+ */ -+/* XEN_DOMCTL_set_machine_address_size */ -+/* XEN_DOMCTL_get_machine_address_size */ -+ -+/* -+ * Do not inject spurious page faults into this domain. -+ */ -+/* XEN_DOMCTL_suppress_spurious_page_faults */ -+ -+/* XEN_DOMCTL_debug_op */ -+#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0 -+#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1 -+struct xen_domctl_debug_op { -+ uint32_t op; /* IN */ -+ uint32_t vcpu; /* IN */ -+}; -+typedef struct xen_domctl_debug_op xen_domctl_debug_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t); -+ -+/* -+ * Request a particular record from the HVM context -+ */ -+/* XEN_DOMCTL_gethvmcontext_partial */ -+typedef struct xen_domctl_hvmcontext_partial { -+ uint32_t type; /* IN: Type of record required */ -+ uint32_t instance; /* IN: Instance of that type */ -+ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */ -+} xen_domctl_hvmcontext_partial_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t); -+ -+/* XEN_DOMCTL_disable_migrate */ -+typedef struct xen_domctl_disable_migrate { -+ uint32_t disable; /* IN: 1: disable migration and restore */ -+} xen_domctl_disable_migrate_t; -+ -+ -+/* XEN_DOMCTL_gettscinfo */ -+/* XEN_DOMCTL_settscinfo */ -+struct xen_guest_tsc_info { -+ uint32_t tsc_mode; -+ uint32_t gtsc_khz; -+ uint32_t incarnation; -+ uint32_t pad; -+ uint64_aligned_t elapsed_nsec; -+}; -+typedef struct xen_guest_tsc_info xen_guest_tsc_info_t; -+DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t); -+typedef struct xen_domctl_tsc_info { -+ XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */ -+ xen_guest_tsc_info_t info; /* IN */ -+} xen_domctl_tsc_info_t; -+ -+/* XEN_DOMCTL_gdbsx_guestmemio guest mem io */ -+struct xen_domctl_gdbsx_memio { -+ /* IN */ -+ uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */ -+ uint64_aligned_t gva; /* guest virtual address */ -+ uint64_aligned_t uva; /* user buffer virtual address */ -+ uint32_t len; /* number of bytes to read/write */ -+ uint8_t gwr; /* 0 = read from guest. 1 = write to guest */ -+ /* OUT */ -+ uint32_t remain; /* bytes remaining to be copied */ -+}; -+ -+/* XEN_DOMCTL_gdbsx_pausevcpu */ -+/* XEN_DOMCTL_gdbsx_unpausevcpu */ -+struct xen_domctl_gdbsx_pauseunp_vcpu { /* pause/unpause a vcpu */ -+ uint32_t vcpu; /* which vcpu */ -+}; -+ -+/* XEN_DOMCTL_gdbsx_domstatus */ -+struct xen_domctl_gdbsx_domstatus { -+ /* OUT */ -+ uint8_t paused; /* is the domain paused */ -+ uint32_t vcpu_id; /* any vcpu in an event? */ -+ uint32_t vcpu_ev; /* if yes, what event? */ -+}; -+ -+/* -+ * Memory event operations -+ */ -+ -+/* XEN_DOMCTL_mem_event_op */ -+ -+/* Add and remove memory handlers */ -+#define XEN_DOMCTL_MEM_EVENT_OP_ENABLE 0 -+#define XEN_DOMCTL_MEM_EVENT_OP_DISABLE 1 -+ -+/* -+ * Page memory in and out. -+ */ -+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1 -+ -+/* Domain memory paging */ -+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 0 -+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 1 -+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 2 -+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 3 -+ -+/* -+ * Access permissions. -+ * -+ * There are HVM hypercalls to set the per-page access permissions of every -+ * page in a domain. When one of these permissions--independent, read, -+ * write, and execute--is violated, the VCPU is paused and a memory event -+ * is sent with what happened. (See public/mem_event.h) The memory event -+ * handler can then resume the VCPU and redo the access with an -+ * ACCESS_RESUME mode for the following domctl. -+ */ -+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2 -+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 0 -+ -+struct xen_domctl_mem_event_op { -+ uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ -+ uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */ -+ -+ /* OP_ENABLE */ -+ uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */ -+ uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */ -+ -+ /* Other OPs */ -+ uint64_aligned_t gfn; /* IN: gfn of page being operated on */ -+}; -+typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t); -+ -+/* -+ * Memory sharing operations -+ */ -+/* XEN_DOMCTL_mem_sharing_op */ -+ -+#define XEN_DOMCTL_MEM_SHARING_OP_CONTROL 0 -+#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN 1 -+#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF 2 -+#define XEN_DOMCTL_MEM_SHARING_OP_SHARE 3 -+#define XEN_DOMCTL_MEM_SHARING_OP_RESUME 4 -+#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN 5 -+#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN 6 -+#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF 7 -+ -+#define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID (-10) -+#define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9) -+ -+struct xen_domctl_mem_sharing_op { -+ uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ -+ -+ union { -+ uint8_t enable; /* OP_CONTROL */ -+ -+ struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ -+ union { -+ uint64_aligned_t gfn; /* IN: gfn to nominate */ -+ uint32_t grant_ref; /* IN: grant ref to nominate */ -+ } u; -+ uint64_aligned_t handle; /* OUT: the handle */ -+ } nominate; -+ struct mem_sharing_op_share { /* OP_SHARE */ -+ uint64_aligned_t source_handle; /* IN: handle to the source page */ -+ uint64_aligned_t client_handle; /* IN: handle to the client page */ -+ } share; -+ struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ -+ union { -+ uint64_aligned_t gfn; /* IN: gfn to debug */ -+ uint64_aligned_t mfn; /* IN: mfn to debug */ -+ grant_ref_t gref; /* IN: gref to debug */ -+ } u; -+ } debug; -+ } u; -+}; -+typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t); -+ -+#if defined(__i386__) || defined(__x86_64__) -+/* XEN_DOMCTL_setvcpuextstate */ -+/* XEN_DOMCTL_getvcpuextstate */ -+struct xen_domctl_vcpuextstate { -+ /* IN: VCPU that this call applies to. */ -+ uint32_t vcpu; -+ /* -+ * SET: xfeature support mask of struct (IN) -+ * GET: xfeature support mask of struct (IN/OUT) -+ * xfeature mask is served as identifications of the saving format -+ * so that compatible CPUs can have a check on format to decide -+ * whether it can restore. -+ */ -+ uint64_aligned_t xfeature_mask; -+ /* -+ * SET: Size of struct (IN) -+ * GET: Size of struct (IN/OUT) -+ */ -+ uint64_aligned_t size; -+ XEN_GUEST_HANDLE_64(uint64) buffer; -+}; -+typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t); -+#endif -+ -+/* XEN_DOMCTL_set_access_required: sets whether a memory event listener -+ * must be present to handle page access events: if false, the page -+ * access will revert to full permissions if no one is listening; -+ * */ -+struct xen_domctl_set_access_required { -+ uint8_t access_required; -+}; -+typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t); -+ -+struct xen_domctl { -+ uint32_t cmd; -+#define XEN_DOMCTL_createdomain 1 -+#define XEN_DOMCTL_destroydomain 2 -+#define XEN_DOMCTL_pausedomain 3 -+#define XEN_DOMCTL_unpausedomain 4 -+#define XEN_DOMCTL_getdomaininfo 5 -+#define XEN_DOMCTL_getmemlist 6 -+#define XEN_DOMCTL_getpageframeinfo 7 -+#define XEN_DOMCTL_getpageframeinfo2 8 -+#define XEN_DOMCTL_setvcpuaffinity 9 -+#define XEN_DOMCTL_shadow_op 10 -+#define XEN_DOMCTL_max_mem 11 -+#define XEN_DOMCTL_setvcpucontext 12 -+#define XEN_DOMCTL_getvcpucontext 13 -+#define XEN_DOMCTL_getvcpuinfo 14 -+#define XEN_DOMCTL_max_vcpus 15 -+#define XEN_DOMCTL_scheduler_op 16 -+#define XEN_DOMCTL_setdomainhandle 17 -+#define XEN_DOMCTL_setdebugging 18 -+#define XEN_DOMCTL_irq_permission 19 -+#define XEN_DOMCTL_iomem_permission 20 -+#define XEN_DOMCTL_ioport_permission 21 -+#define XEN_DOMCTL_hypercall_init 22 -+#define XEN_DOMCTL_arch_setup 23 -+#define XEN_DOMCTL_settimeoffset 24 -+#define XEN_DOMCTL_getvcpuaffinity 25 -+#define XEN_DOMCTL_real_mode_area 26 -+#define XEN_DOMCTL_resumedomain 27 -+#define XEN_DOMCTL_sendtrigger 28 -+#define XEN_DOMCTL_subscribe 29 -+#define XEN_DOMCTL_gethvmcontext 33 -+#define XEN_DOMCTL_sethvmcontext 34 -+#define XEN_DOMCTL_set_address_size 35 -+#define XEN_DOMCTL_get_address_size 36 -+#define XEN_DOMCTL_assign_device 37 -+#define XEN_DOMCTL_bind_pt_irq 38 -+#define XEN_DOMCTL_memory_mapping 39 -+#define XEN_DOMCTL_ioport_mapping 40 -+#define XEN_DOMCTL_pin_mem_cacheattr 41 -+#define XEN_DOMCTL_set_ext_vcpucontext 42 -+#define XEN_DOMCTL_get_ext_vcpucontext 43 -+#define XEN_DOMCTL_set_opt_feature 44 -+#define XEN_DOMCTL_test_assign_device 45 -+#define XEN_DOMCTL_set_target 46 -+#define XEN_DOMCTL_deassign_device 47 -+#define XEN_DOMCTL_unbind_pt_irq 48 -+#define XEN_DOMCTL_set_cpuid 49 -+#define XEN_DOMCTL_get_device_group 50 -+#define XEN_DOMCTL_set_machine_address_size 51 -+#define XEN_DOMCTL_get_machine_address_size 52 -+#define XEN_DOMCTL_suppress_spurious_page_faults 53 -+#define XEN_DOMCTL_debug_op 54 -+#define XEN_DOMCTL_gethvmcontext_partial 55 -+#define XEN_DOMCTL_mem_event_op 56 -+#define XEN_DOMCTL_mem_sharing_op 57 -+#define XEN_DOMCTL_disable_migrate 58 -+#define XEN_DOMCTL_gettscinfo 59 -+#define XEN_DOMCTL_settscinfo 60 -+#define XEN_DOMCTL_getpageframeinfo3 61 -+#define XEN_DOMCTL_setvcpuextstate 62 -+#define XEN_DOMCTL_getvcpuextstate 63 -+#define XEN_DOMCTL_set_access_required 64 -+#define XEN_DOMCTL_gdbsx_guestmemio 1000 -+#define XEN_DOMCTL_gdbsx_pausevcpu 1001 -+#define XEN_DOMCTL_gdbsx_unpausevcpu 1002 -+#define XEN_DOMCTL_gdbsx_domstatus 1003 -+ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ -+ domid_t domain; -+ union { -+ struct xen_domctl_createdomain createdomain; -+ struct xen_domctl_getdomaininfo getdomaininfo; -+ struct xen_domctl_getmemlist getmemlist; -+ struct xen_domctl_getpageframeinfo getpageframeinfo; -+ struct xen_domctl_getpageframeinfo2 getpageframeinfo2; -+ struct xen_domctl_getpageframeinfo3 getpageframeinfo3; -+ struct xen_domctl_vcpuaffinity vcpuaffinity; -+ struct xen_domctl_shadow_op shadow_op; -+ struct xen_domctl_max_mem max_mem; -+ struct xen_domctl_vcpucontext vcpucontext; -+ struct xen_domctl_getvcpuinfo getvcpuinfo; -+ struct xen_domctl_max_vcpus max_vcpus; -+ struct xen_domctl_scheduler_op scheduler_op; -+ struct xen_domctl_setdomainhandle setdomainhandle; -+ struct xen_domctl_setdebugging setdebugging; -+ struct xen_domctl_irq_permission irq_permission; -+ struct xen_domctl_iomem_permission iomem_permission; -+ struct xen_domctl_ioport_permission ioport_permission; -+ struct xen_domctl_hypercall_init hypercall_init; -+ struct xen_domctl_arch_setup arch_setup; -+ struct xen_domctl_settimeoffset settimeoffset; -+ struct xen_domctl_disable_migrate disable_migrate; -+ struct xen_domctl_tsc_info tsc_info; -+ struct xen_domctl_real_mode_area real_mode_area; -+ struct xen_domctl_hvmcontext hvmcontext; -+ struct xen_domctl_hvmcontext_partial hvmcontext_partial; -+ struct xen_domctl_address_size address_size; -+ struct xen_domctl_sendtrigger sendtrigger; -+ struct xen_domctl_get_device_group get_device_group; -+ struct xen_domctl_assign_device assign_device; -+ struct xen_domctl_bind_pt_irq bind_pt_irq; -+ struct xen_domctl_memory_mapping memory_mapping; -+ struct xen_domctl_ioport_mapping ioport_mapping; -+ struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr; -+ struct xen_domctl_ext_vcpucontext ext_vcpucontext; -+ struct xen_domctl_set_opt_feature set_opt_feature; -+ struct xen_domctl_set_target set_target; -+ struct xen_domctl_subscribe subscribe; -+ struct xen_domctl_debug_op debug_op; -+ struct xen_domctl_mem_event_op mem_event_op; -+ struct xen_domctl_mem_sharing_op mem_sharing_op; -+#if defined(__i386__) || defined(__x86_64__) -+ struct xen_domctl_cpuid cpuid; -+ struct xen_domctl_vcpuextstate vcpuextstate; -+#endif -+ struct xen_domctl_set_access_required access_required; -+ struct xen_domctl_gdbsx_memio gdbsx_guest_memio; -+ struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; -+ struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; -+ uint8_t pad[128]; -+ } u; -+}; -+typedef struct xen_domctl xen_domctl_t; -+DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); -+ -+#endif /* __XEN_PUBLIC_DOMCTL_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/hvm/e820.h 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,34 @@ -+ -+/* -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_HVM_E820_H__ -+#define __XEN_PUBLIC_HVM_E820_H__ -+ -+/* E820 location in HVM virtual address space. */ -+#define HVM_E820_PAGE 0x00090000 -+#define HVM_E820_NR_OFFSET 0x000001E8 -+#define HVM_E820_OFFSET 0x000002D0 -+ -+#define HVM_BELOW_4G_RAM_END 0xF0000000 -+#define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END -+#define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) -+ -+#endif /* __XEN_PUBLIC_HVM_E820_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/hvm/hvm_info_table.h 2010-05-07 11:10:48.000000000 +0200 -@@ -0,0 +1,75 @@ -+/****************************************************************************** -+ * hvm/hvm_info_table.h -+ * -+ * HVM parameter and information table, written into guest memory map. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ -+#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ -+ -+#define HVM_INFO_PFN 0x09F -+#define HVM_INFO_OFFSET 0x800 -+#define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) -+ -+/* Maximum we can support with current vLAPIC ID mapping. */ -+#define HVM_MAX_VCPUS 128 -+ -+struct hvm_info_table { -+ char signature[8]; /* "HVM INFO" */ -+ uint32_t length; -+ uint8_t checksum; -+ -+ /* Should firmware build ACPI tables? */ -+ uint8_t acpi_enabled; -+ -+ /* Should firmware build APIC descriptors (APIC MADT / MP BIOS)? */ -+ uint8_t apic_mode; -+ -+ /* How many CPUs does this domain have? */ -+ uint32_t nr_vcpus; -+ -+ /* -+ * MEMORY MAP provided by HVM domain builder. -+ * Notes: -+ * 1. page_to_phys(x) = x << 12 -+ * 2. If a field is zero, the corresponding range does not exist. -+ */ -+ /* -+ * 0x0 to page_to_phys(low_mem_pgend)-1: -+ * RAM below 4GB (except for VGA hole 0xA0000-0xBFFFF) -+ */ -+ uint32_t low_mem_pgend; -+ /* -+ * page_to_phys(reserved_mem_pgstart) to 0xFFFFFFFF: -+ * Reserved for special memory mappings -+ */ -+ uint32_t reserved_mem_pgstart; -+ /* -+ * 0x100000000 to page_to_phys(high_mem_pgend)-1: -+ * RAM above 4GB -+ */ -+ uint32_t high_mem_pgend; -+ -+ /* Bitmap of which CPUs are online at boot time. */ -+ uint8_t vcpu_online[(HVM_MAX_VCPUS + 7)/8]; -+}; -+ -+#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/hvm/ioreq.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,140 @@ -+/* -+ * ioreq.h: I/O request definitions for device models -+ * Copyright (c) 2004, Intel Corporation. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _IOREQ_H_ -+#define _IOREQ_H_ -+ -+#define IOREQ_READ 1 -+#define IOREQ_WRITE 0 -+ -+#define STATE_IOREQ_NONE 0 -+#define STATE_IOREQ_READY 1 -+#define STATE_IOREQ_INPROCESS 2 -+#define STATE_IORESP_READY 3 -+ -+#define IOREQ_TYPE_PIO 0 /* pio */ -+#define IOREQ_TYPE_COPY 1 /* mmio ops */ -+#define IOREQ_TYPE_TIMEOFFSET 7 -+#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ -+ -+/* -+ * VMExit dispatcher should cooperate with instruction decoder to -+ * prepare this structure and notify service OS and DM by sending -+ * virq -+ */ -+struct ioreq { -+ uint64_t addr; /* physical address */ -+ uint64_t data; /* data (or paddr of data) */ -+ uint32_t count; /* for rep prefixes */ -+ uint32_t size; /* size in bytes */ -+ uint32_t vp_eport; /* evtchn for notifications to/from device model */ -+ uint16_t _pad0; -+ uint8_t state:4; -+ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr -+ * of the real data to use. */ -+ uint8_t dir:1; /* 1=read, 0=write */ -+ uint8_t df:1; -+ uint8_t _pad1:1; -+ uint8_t type; /* I/O type */ -+}; -+typedef struct ioreq ioreq_t; -+ -+struct shared_iopage { -+ struct ioreq vcpu_ioreq[1]; -+}; -+typedef struct shared_iopage shared_iopage_t; -+ -+struct buf_ioreq { -+ uint8_t type; /* I/O type */ -+ uint8_t pad:1; -+ uint8_t dir:1; /* 1=read, 0=write */ -+ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ -+ uint32_t addr:20;/* physical address */ -+ uint32_t data; /* data */ -+}; -+typedef struct buf_ioreq buf_ioreq_t; -+ -+#define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ -+struct buffered_iopage { -+ unsigned int read_pointer; -+ unsigned int write_pointer; -+ buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; -+}; /* NB. Size of this structure must be no greater than one page. */ -+typedef struct buffered_iopage buffered_iopage_t; -+ -+#if defined(__ia64__) -+struct pio_buffer { -+ uint32_t page_offset; -+ uint32_t pointer; -+ uint32_t data_end; -+ uint32_t buf_size; -+ void *opaque; -+}; -+ -+#define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ -+#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ -+#define PIO_BUFFER_ENTRY_NUM 2 -+struct buffered_piopage { -+ struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; -+ uint8_t buffer[1]; -+}; -+#endif /* defined(__ia64__) */ -+ -+/* -+ * ACPI Control/Event register locations. Location is controlled by a -+ * version number in HVM_PARAM_ACPI_IOPORTS_LOCATION. -+ */ -+ -+/* Version 0 (default): Traditional Xen locations. */ -+#define ACPI_PM1A_EVT_BLK_ADDRESS_V0 0x1f40 -+#define ACPI_PM1A_CNT_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x04) -+#define ACPI_PM_TMR_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x08) -+#define ACPI_GPE0_BLK_ADDRESS_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0 + 0x20) -+#define ACPI_GPE0_BLK_LEN_V0 0x08 -+ -+/* Version 1: Locations preferred by modern Qemu. */ -+#define ACPI_PM1A_EVT_BLK_ADDRESS_V1 0xb000 -+#define ACPI_PM1A_CNT_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x04) -+#define ACPI_PM_TMR_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x08) -+#define ACPI_GPE0_BLK_ADDRESS_V1 0xafe0 -+#define ACPI_GPE0_BLK_LEN_V1 0x04 -+ -+/* Compatibility definitions for the default location (version 0). */ -+#define ACPI_PM1A_EVT_BLK_ADDRESS ACPI_PM1A_EVT_BLK_ADDRESS_V0 -+#define ACPI_PM1A_CNT_BLK_ADDRESS ACPI_PM1A_CNT_BLK_ADDRESS_V0 -+#define ACPI_PM_TMR_BLK_ADDRESS ACPI_PM_TMR_BLK_ADDRESS_V0 -+#define ACPI_GPE0_BLK_ADDRESS ACPI_GPE0_BLK_ADDRESS_V0 -+#define ACPI_GPE0_BLK_LEN ACPI_GPE0_BLK_LEN_V0 -+ -+ -+#endif /* _IOREQ_H_ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/hvm/save.h 2008-04-02 12:34:02.000000000 +0200 -@@ -0,0 +1,88 @@ -+/* -+ * hvm/save.h -+ * -+ * Structure definitions for HVM state that is held by Xen and must -+ * be saved along with the domain's memory and device-model state. -+ * -+ * Copyright (c) 2007 XenSource Ltd. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_HVM_SAVE_H__ -+#define __XEN_PUBLIC_HVM_SAVE_H__ -+ -+/* -+ * Structures in this header *must* have the same layout in 32bit -+ * and 64bit environments: this means that all fields must be explicitly -+ * sized types and aligned to their sizes, and the structs must be -+ * a multiple of eight bytes long. -+ * -+ * Only the state necessary for saving and restoring (i.e. fields -+ * that are analogous to actual hardware state) should go in this file. -+ * Internal mechanisms should be kept in Xen-private headers. -+ */ -+ -+#if !defined(__GNUC__) || defined(__STRICT_ANSI__) -+#error "Anonymous structs/unions are a GNU extension." -+#endif -+ -+/* -+ * Each entry is preceded by a descriptor giving its type and length -+ */ -+struct hvm_save_descriptor { -+ uint16_t typecode; /* Used to demux the various types below */ -+ uint16_t instance; /* Further demux within a type */ -+ uint32_t length; /* In bytes, *not* including this descriptor */ -+}; -+ -+ -+/* -+ * Each entry has a datatype associated with it: for example, the CPU state -+ * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), -+ * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). -+ * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system -+ * ugliness. -+ */ -+ -+#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ -+ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } -+ -+#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) -+#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) -+#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) -+ -+ -+/* -+ * The series of save records is teminated by a zero-type, zero-length -+ * descriptor. -+ */ -+ -+struct hvm_save_end {}; -+DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); -+ -+#if defined(__i386__) || defined(__x86_64__) -+#include "../arch-x86/hvm/save.h" -+#elif defined(__ia64__) -+#include "../arch-ia64/hvm/save.h" -+#else -+#error "unsupported architecture" -+#endif -+ -+#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/io/fsif.h 2009-06-23 09:28:21.000000000 +0200 -@@ -0,0 +1,192 @@ -+/****************************************************************************** -+ * fsif.h -+ * -+ * Interface to FS level split device drivers. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2007, Grzegorz Milos, . -+ */ -+ -+#ifndef __XEN_PUBLIC_IO_FSIF_H__ -+#define __XEN_PUBLIC_IO_FSIF_H__ -+ -+#include "ring.h" -+#include "../grant_table.h" -+ -+#define REQ_FILE_OPEN 1 -+#define REQ_FILE_CLOSE 2 -+#define REQ_FILE_READ 3 -+#define REQ_FILE_WRITE 4 -+#define REQ_STAT 5 -+#define REQ_FILE_TRUNCATE 6 -+#define REQ_REMOVE 7 -+#define REQ_RENAME 8 -+#define REQ_CREATE 9 -+#define REQ_DIR_LIST 10 -+#define REQ_CHMOD 11 -+#define REQ_FS_SPACE 12 -+#define REQ_FILE_SYNC 13 -+ -+struct fsif_open_request { -+ grant_ref_t gref; -+}; -+ -+struct fsif_close_request { -+ uint32_t fd; -+}; -+ -+struct fsif_read_request { -+ uint32_t fd; -+ int32_t pad; -+ uint64_t len; -+ uint64_t offset; -+ grant_ref_t grefs[1]; /* Variable length */ -+}; -+ -+struct fsif_write_request { -+ uint32_t fd; -+ int32_t pad; -+ uint64_t len; -+ uint64_t offset; -+ grant_ref_t grefs[1]; /* Variable length */ -+}; -+ -+struct fsif_stat_request { -+ uint32_t fd; -+}; -+ -+/* This structure is a copy of some fields from stat structure, returned -+ * via the ring. */ -+struct fsif_stat_response { -+ int32_t stat_mode; -+ uint32_t stat_uid; -+ uint32_t stat_gid; -+ int32_t stat_ret; -+ int64_t stat_size; -+ int64_t stat_atime; -+ int64_t stat_mtime; -+ int64_t stat_ctime; -+}; -+ -+struct fsif_truncate_request { -+ uint32_t fd; -+ int32_t pad; -+ int64_t length; -+}; -+ -+struct fsif_remove_request { -+ grant_ref_t gref; -+}; -+ -+struct fsif_rename_request { -+ uint16_t old_name_offset; -+ uint16_t new_name_offset; -+ grant_ref_t gref; -+}; -+ -+struct fsif_create_request { -+ int8_t directory; -+ int8_t pad; -+ int16_t pad2; -+ int32_t mode; -+ grant_ref_t gref; -+}; -+ -+struct fsif_list_request { -+ uint32_t offset; -+ grant_ref_t gref; -+}; -+ -+#define NR_FILES_SHIFT 0 -+#define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ -+#define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) -+#define ERROR_SIZE 32 /* 32 bits for the error mask */ -+#define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) -+#define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) -+#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) -+#define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) -+ -+struct fsif_chmod_request { -+ uint32_t fd; -+ int32_t mode; -+}; -+ -+struct fsif_space_request { -+ grant_ref_t gref; -+}; -+ -+struct fsif_sync_request { -+ uint32_t fd; -+}; -+ -+ -+/* FS operation request */ -+struct fsif_request { -+ uint8_t type; /* Type of the request */ -+ uint8_t pad; -+ uint16_t id; /* Request ID, copied to the response */ -+ uint32_t pad2; -+ union { -+ struct fsif_open_request fopen; -+ struct fsif_close_request fclose; -+ struct fsif_read_request fread; -+ struct fsif_write_request fwrite; -+ struct fsif_stat_request fstat; -+ struct fsif_truncate_request ftruncate; -+ struct fsif_remove_request fremove; -+ struct fsif_rename_request frename; -+ struct fsif_create_request fcreate; -+ struct fsif_list_request flist; -+ struct fsif_chmod_request fchmod; -+ struct fsif_space_request fspace; -+ struct fsif_sync_request fsync; -+ } u; -+}; -+typedef struct fsif_request fsif_request_t; -+ -+/* FS operation response */ -+struct fsif_response { -+ uint16_t id; -+ uint16_t pad1; -+ uint32_t pad2; -+ union { -+ uint64_t ret_val; -+ struct fsif_stat_response fstat; -+ } u; -+}; -+ -+typedef struct fsif_response fsif_response_t; -+ -+#define FSIF_RING_ENTRY_SIZE 64 -+ -+#define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \ -+ sizeof(grant_ref_t) + 1) -+#define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \ -+ sizeof(grant_ref_t) + 1) -+ -+DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); -+ -+#define STATE_INITIALISED "init" -+#define STATE_READY "ready" -+#define STATE_CLOSING "closing" -+#define STATE_CLOSED "closed" -+ -+ -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/io/tpmif.h 2007-06-12 13:14:19.000000000 +0200 -@@ -0,0 +1,77 @@ -+/****************************************************************************** -+ * tpmif.h -+ * -+ * TPM I/O interface for Xen guest OSes. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2005, IBM Corporation -+ * -+ * Author: Stefan Berger, stefanb@us.ibm.com -+ * Grant table support: Mahadevan Gomathisankaran -+ * -+ * This code has been derived from tools/libxc/xen/io/netif.h -+ * -+ * Copyright (c) 2003-2004, Keir Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_IO_TPMIF_H__ -+#define __XEN_PUBLIC_IO_TPMIF_H__ -+ -+#include "../grant_table.h" -+ -+struct tpmif_tx_request { -+ unsigned long addr; /* Machine address of packet. */ -+ grant_ref_t ref; /* grant table access reference */ -+ uint16_t unused; -+ uint16_t size; /* Packet size in bytes. */ -+}; -+typedef struct tpmif_tx_request tpmif_tx_request_t; -+ -+/* -+ * The TPMIF_TX_RING_SIZE defines the number of pages the -+ * front-end and backend can exchange (= size of array). -+ */ -+typedef uint32_t TPMIF_RING_IDX; -+ -+#define TPMIF_TX_RING_SIZE 1 -+ -+/* This structure must fit in a memory page. */ -+ -+struct tpmif_ring { -+ struct tpmif_tx_request req; -+}; -+typedef struct tpmif_ring tpmif_ring_t; -+ -+struct tpmif_tx_interface { -+ struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; -+}; -+typedef struct tpmif_tx_interface tpmif_tx_interface_t; -+ -+#endif -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/io/usbif.h 2010-02-24 13:13:46.000000000 +0100 -@@ -0,0 +1,151 @@ -+/* -+ * usbif.h -+ * -+ * USB I/O interface for Xen guest OSes. -+ * -+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD. -+ * Author: Noboru Iwamatsu -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __XEN_PUBLIC_IO_USBIF_H__ -+#define __XEN_PUBLIC_IO_USBIF_H__ -+ -+#include "ring.h" -+#include "../grant_table.h" -+ -+enum usb_spec_version { -+ USB_VER_UNKNOWN = 0, -+ USB_VER_USB11, -+ USB_VER_USB20, -+ USB_VER_USB30, /* not supported yet */ -+}; -+ -+/* -+ * USB pipe in usbif_request -+ * -+ * bits 0-5 are specific bits for virtual USB driver. -+ * bits 7-31 are standard urb pipe. -+ * -+ * - port number(NEW): bits 0-4 -+ * (USB_MAXCHILDREN is 31) -+ * -+ * - operation flag(NEW): bit 5 -+ * (0 = submit urb, -+ * 1 = unlink urb) -+ * -+ * - direction: bit 7 -+ * (0 = Host-to-Device [Out] -+ * 1 = Device-to-Host [In]) -+ * -+ * - device address: bits 8-14 -+ * -+ * - endpoint: bits 15-18 -+ * -+ * - pipe type: bits 30-31 -+ * (00 = isochronous, 01 = interrupt, -+ * 10 = control, 11 = bulk) -+ */ -+#define usbif_pipeportnum(pipe) ((pipe) & 0x1f) -+#define usbif_setportnum_pipe(pipe, portnum) \ -+ ((pipe)|(portnum)) -+ -+#define usbif_pipeunlink(pipe) ((pipe) & 0x20) -+#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe)) -+#define usbif_setunlink_pipe(pipe) ((pipe)|(0x20)) -+ -+#define USBIF_BACK_MAX_PENDING_REQS (128) -+#define USBIF_MAX_SEGMENTS_PER_REQUEST (16) -+ -+/* -+ * RING for transferring urbs. -+ */ -+struct usbif_request_segment { -+ grant_ref_t gref; -+ uint16_t offset; -+ uint16_t length; -+}; -+ -+struct usbif_urb_request { -+ uint16_t id; /* request id */ -+ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ -+ -+ /* basic urb parameter */ -+ uint32_t pipe; -+ uint16_t transfer_flags; -+ uint16_t buffer_length; -+ union { -+ uint8_t ctrl[8]; /* setup_packet (Ctrl) */ -+ -+ struct { -+ uint16_t interval; /* maximum (1024*8) in usb core */ -+ uint16_t start_frame; /* start frame */ -+ uint16_t number_of_packets; /* number of ISO packet */ -+ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ -+ } isoc; -+ -+ struct { -+ uint16_t interval; /* maximum (1024*8) in usb core */ -+ uint16_t pad[3]; -+ } intr; -+ -+ struct { -+ uint16_t unlink_id; /* unlink request id */ -+ uint16_t pad[3]; -+ } unlink; -+ -+ } u; -+ -+ /* urb data segments */ -+ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; -+}; -+typedef struct usbif_urb_request usbif_urb_request_t; -+ -+struct usbif_urb_response { -+ uint16_t id; /* request id */ -+ uint16_t start_frame; /* start frame (ISO) */ -+ int32_t status; /* status (non-ISO) */ -+ int32_t actual_length; /* actual transfer length */ -+ int32_t error_count; /* number of ISO errors */ -+}; -+typedef struct usbif_urb_response usbif_urb_response_t; -+ -+DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response); -+#define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, PAGE_SIZE) -+ -+/* -+ * RING for notifying connect/disconnect events to frontend -+ */ -+struct usbif_conn_request { -+ uint16_t id; -+}; -+typedef struct usbif_conn_request usbif_conn_request_t; -+ -+struct usbif_conn_response { -+ uint16_t id; /* request id */ -+ uint8_t portnum; /* port number */ -+ uint8_t speed; /* usb_device_speed */ -+}; -+typedef struct usbif_conn_response usbif_conn_response_t; -+ -+DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, struct usbif_conn_response); -+#define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, PAGE_SIZE) -+ -+#endif /* __XEN_PUBLIC_IO_USBIF_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/io/vscsiif.h 2008-07-21 11:00:33.000000000 +0200 -@@ -0,0 +1,105 @@ -+/****************************************************************************** -+ * vscsiif.h -+ * -+ * Based on the blkif.h code. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright(c) FUJITSU Limited 2008. -+ */ -+ -+#ifndef __XEN__PUBLIC_IO_SCSI_H__ -+#define __XEN__PUBLIC_IO_SCSI_H__ -+ -+#include "ring.h" -+#include "../grant_table.h" -+ -+/* command between backend and frontend */ -+#define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ -+#define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ -+#define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ -+ -+ -+#define VSCSIIF_BACK_MAX_PENDING_REQS 128 -+ -+/* -+ * Maximum scatter/gather segments per request. -+ * -+ * Considering balance between allocating al least 16 "vscsiif_request" -+ * structures on one page (4096bytes) and number of scatter gather -+ * needed, we decided to use 26 as a magic number. -+ */ -+#define VSCSIIF_SG_TABLESIZE 26 -+ -+/* -+ * base on linux kernel 2.6.18 -+ */ -+#define VSCSIIF_MAX_COMMAND_SIZE 16 -+#define VSCSIIF_SENSE_BUFFERSIZE 96 -+ -+ -+struct vscsiif_request { -+ uint16_t rqid; /* private guest value, echoed in resp */ -+ uint8_t act; /* command between backend and frontend */ -+ uint8_t cmd_len; -+ -+ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; -+ uint16_t timeout_per_command; /* The command is issued by twice -+ the value in Backend. */ -+ uint16_t channel, id, lun; -+ uint16_t padding; -+ uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) -+ DMA_FROM_DEVICE(2) -+ DMA_NONE(3) requests */ -+ uint8_t nr_segments; /* Number of pieces of scatter-gather */ -+ -+ struct scsiif_request_segment { -+ grant_ref_t gref; -+ uint16_t offset; -+ uint16_t length; -+ } seg[VSCSIIF_SG_TABLESIZE]; -+ uint32_t reserved[3]; -+}; -+typedef struct vscsiif_request vscsiif_request_t; -+ -+struct vscsiif_response { -+ uint16_t rqid; -+ uint8_t padding; -+ uint8_t sense_len; -+ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; -+ int32_t rslt; -+ uint32_t residual_len; /* request bufflen - -+ return the value from physical device */ -+ uint32_t reserved[36]; -+}; -+typedef struct vscsiif_response vscsiif_response_t; -+ -+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); -+ -+ -+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/kexec.h 2008-11-25 12:22:34.000000000 +0100 -@@ -0,0 +1,168 @@ -+/****************************************************************************** -+ * kexec.h - Public portion -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Xen port written by: -+ * - Simon 'Horms' Horman -+ * - Magnus Damm -+ */ -+ -+#ifndef _XEN_PUBLIC_KEXEC_H -+#define _XEN_PUBLIC_KEXEC_H -+ -+ -+/* This file describes the Kexec / Kdump hypercall interface for Xen. -+ * -+ * Kexec under vanilla Linux allows a user to reboot the physical machine -+ * into a new user-specified kernel. The Xen port extends this idea -+ * to allow rebooting of the machine from dom0. When kexec for dom0 -+ * is used to reboot, both the hypervisor and the domains get replaced -+ * with some other kernel. It is possible to kexec between vanilla -+ * Linux and Xen and back again. Xen to Xen works well too. -+ * -+ * The hypercall interface for kexec can be divided into three main -+ * types of hypercall operations: -+ * -+ * 1) Range information: -+ * This is used by the dom0 kernel to ask the hypervisor about various -+ * address information. This information is needed to allow kexec-tools -+ * to fill in the ELF headers for /proc/vmcore properly. -+ * -+ * 2) Load and unload of images: -+ * There are no big surprises here, the kexec binary from kexec-tools -+ * runs in userspace in dom0. The tool loads/unloads data into the -+ * dom0 kernel such as new kernel, initramfs and hypervisor. When -+ * loaded the dom0 kernel performs a load hypercall operation, and -+ * before releasing all page references the dom0 kernel calls unload. -+ * -+ * 3) Kexec operation: -+ * This is used to start a previously loaded kernel. -+ */ -+ -+#include "xen.h" -+ -+#if defined(__i386__) || defined(__x86_64__) -+#define KEXEC_XEN_NO_PAGES 17 -+#endif -+ -+/* -+ * Prototype for this hypercall is: -+ * int kexec_op(int cmd, void *args) -+ * @cmd == KEXEC_CMD_... -+ * KEXEC operation to perform -+ * @args == Operation-specific extra arguments (NULL if none). -+ */ -+ -+/* -+ * Kexec supports two types of operation: -+ * - kexec into a regular kernel, very similar to a standard reboot -+ * - KEXEC_TYPE_DEFAULT is used to specify this type -+ * - kexec into a special "crash kernel", aka kexec-on-panic -+ * - KEXEC_TYPE_CRASH is used to specify this type -+ * - parts of our system may be broken at kexec-on-panic time -+ * - the code should be kept as simple and self-contained as possible -+ */ -+ -+#define KEXEC_TYPE_DEFAULT 0 -+#define KEXEC_TYPE_CRASH 1 -+ -+ -+/* The kexec implementation for Xen allows the user to load two -+ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. -+ * All data needed for a kexec reboot is kept in one xen_kexec_image_t -+ * per "instance". The data mainly consists of machine address lists to pages -+ * together with destination addresses. The data in xen_kexec_image_t -+ * is passed to the "code page" which is one page of code that performs -+ * the final relocations before jumping to the new kernel. -+ */ -+ -+typedef struct xen_kexec_image { -+#if defined(__i386__) || defined(__x86_64__) -+ unsigned long page_list[KEXEC_XEN_NO_PAGES]; -+#endif -+#if defined(__ia64__) -+ unsigned long reboot_code_buffer; -+#endif -+ unsigned long indirection_page; -+ unsigned long start_address; -+} xen_kexec_image_t; -+ -+/* -+ * Perform kexec having previously loaded a kexec or kdump kernel -+ * as appropriate. -+ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] -+ */ -+#define KEXEC_CMD_kexec 0 -+typedef struct xen_kexec_exec { -+ int type; -+} xen_kexec_exec_t; -+ -+/* -+ * Load/Unload kernel image for kexec or kdump. -+ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] -+ * image == relocation information for kexec (ignored for unload) [in] -+ */ -+#define KEXEC_CMD_kexec_load 1 -+#define KEXEC_CMD_kexec_unload 2 -+typedef struct xen_kexec_load { -+ int type; -+ xen_kexec_image_t image; -+} xen_kexec_load_t; -+ -+#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ -+#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ -+#define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ -+#define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap -+ * Note that although this is adjacent -+ * to Xen it exists in a separate EFI -+ * region on ia64, and thus needs to be -+ * inserted into iomem_machine separately */ -+#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of -+ * the ia64_boot_param */ -+#define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of -+ * of the EFI Memory Map */ -+#define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */ -+ -+/* -+ * Find the address and size of certain memory areas -+ * range == KEXEC_RANGE_... [in] -+ * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] -+ * size == number of bytes reserved in window [out] -+ * start == address of the first byte in the window [out] -+ */ -+#define KEXEC_CMD_kexec_get_range 3 -+typedef struct xen_kexec_range { -+ int range; -+ int nr; -+ unsigned long size; -+ unsigned long start; -+} xen_kexec_range_t; -+ -+#endif /* _XEN_PUBLIC_KEXEC_H */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/mem_event.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,82 @@ -+/****************************************************************************** -+ * mem_event.h -+ * -+ * Memory event common structures. -+ * -+ * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef _XEN_PUBLIC_MEM_EVENT_H -+#define _XEN_PUBLIC_MEM_EVENT_H -+ -+#include "xen.h" -+#include "io/ring.h" -+ -+/* Memory event type */ -+#define MEM_EVENT_TYPE_SHARED 0 -+#define MEM_EVENT_TYPE_PAGING 1 -+#define MEM_EVENT_TYPE_ACCESS 2 -+ -+/* Memory event flags */ -+#define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) -+#define MEM_EVENT_FLAG_DROP_PAGE (1 << 1) -+ -+/* Reasons for the memory event request */ -+#define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */ -+#define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is address */ -+#define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is CR0 value */ -+#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */ -+#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */ -+#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */ -+ -+typedef struct mem_event_shared_page { -+ uint32_t port; -+} mem_event_shared_page_t; -+ -+typedef struct mem_event_st { -+ uint16_t type; -+ uint16_t flags; -+ uint32_t vcpu_id; -+ -+ uint64_t gfn; -+ uint64_t offset; -+ uint64_t gla; /* if gla_valid */ -+ -+ uint32_t p2mt; -+ -+ uint16_t access_r:1; -+ uint16_t access_w:1; -+ uint16_t access_x:1; -+ uint16_t gla_valid:1; -+ uint16_t available:12; -+ -+ uint16_t reason; -+} mem_event_request_t, mem_event_response_t; -+ -+DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t); -+ -+#endif -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/nmi.h 2009-06-23 09:28:21.000000000 +0200 -@@ -0,0 +1,80 @@ -+/****************************************************************************** -+ * nmi.h -+ * -+ * NMI callback registration and reason codes. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2005, Keir Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_NMI_H__ -+#define __XEN_PUBLIC_NMI_H__ -+ -+#include "xen.h" -+ -+/* -+ * NMI reason codes: -+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. -+ */ -+ /* I/O-check error reported via ISA port 0x61, bit 6. */ -+#define _XEN_NMIREASON_io_error 0 -+#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) -+ /* Parity error reported via ISA port 0x61, bit 7. */ -+#define _XEN_NMIREASON_parity_error 1 -+#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) -+ /* Unknown hardware-generated NMI. */ -+#define _XEN_NMIREASON_unknown 2 -+#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) -+ -+/* -+ * long nmi_op(unsigned int cmd, void *arg) -+ * NB. All ops return zero on success, else a negative error code. -+ */ -+ -+/* -+ * Register NMI callback for this (calling) VCPU. Currently this only makes -+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. -+ * arg == pointer to xennmi_callback structure. -+ */ -+#define XENNMI_register_callback 0 -+struct xennmi_callback { -+ unsigned long handler_address; -+ unsigned long pad; -+}; -+typedef struct xennmi_callback xennmi_callback_t; -+DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); -+ -+/* -+ * Deregister NMI callback for this (calling) VCPU. -+ * arg == NULL. -+ */ -+#define XENNMI_unregister_callback 1 -+ -+#endif /* __XEN_PUBLIC_NMI_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/platform.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,393 @@ -+/****************************************************************************** -+ * platform.h -+ * -+ * Hardware platform operations. Intended for use by domain-0 kernel. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2002-2006, K Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_PLATFORM_H__ -+#define __XEN_PUBLIC_PLATFORM_H__ -+ -+#include "xen.h" -+ -+#define XENPF_INTERFACE_VERSION 0x03000001 -+ -+/* -+ * Set clock such that it would read after 00:00:00 UTC, -+ * 1 January, 1970 if the current system time was . -+ */ -+#define XENPF_settime 17 -+struct xenpf_settime { -+ /* IN variables. */ -+ uint32_t secs; -+ uint32_t nsecs; -+ uint64_t system_time; -+}; -+typedef struct xenpf_settime xenpf_settime_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); -+ -+/* -+ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. -+ * On x86, @type is an architecture-defined MTRR memory type. -+ * On success, returns the MTRR that was used (@reg) and a handle that can -+ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. -+ * (x86-specific). -+ */ -+#define XENPF_add_memtype 31 -+struct xenpf_add_memtype { -+ /* IN variables. */ -+ xen_pfn_t mfn; -+ uint64_t nr_mfns; -+ uint32_t type; -+ /* OUT variables. */ -+ uint32_t handle; -+ uint32_t reg; -+}; -+typedef struct xenpf_add_memtype xenpf_add_memtype_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); -+ -+/* -+ * Tear down an existing memory-range type. If @handle is remembered then it -+ * should be passed in to accurately tear down the correct setting (in case -+ * of overlapping memory regions with differing types). If it is not known -+ * then @handle should be set to zero. In all cases @reg must be set. -+ * (x86-specific). -+ */ -+#define XENPF_del_memtype 32 -+struct xenpf_del_memtype { -+ /* IN variables. */ -+ uint32_t handle; -+ uint32_t reg; -+}; -+typedef struct xenpf_del_memtype xenpf_del_memtype_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); -+ -+/* Read current type of an MTRR (x86-specific). */ -+#define XENPF_read_memtype 33 -+struct xenpf_read_memtype { -+ /* IN variables. */ -+ uint32_t reg; -+ /* OUT variables. */ -+ xen_pfn_t mfn; -+ uint64_t nr_mfns; -+ uint32_t type; -+}; -+typedef struct xenpf_read_memtype xenpf_read_memtype_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); -+ -+#define XENPF_microcode_update 35 -+struct xenpf_microcode_update { -+ /* IN variables. */ -+ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ -+ uint32_t length; /* Length of microcode data. */ -+}; -+typedef struct xenpf_microcode_update xenpf_microcode_update_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); -+ -+#define XENPF_platform_quirk 39 -+#define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ -+#define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ -+#define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ -+struct xenpf_platform_quirk { -+ /* IN variables. */ -+ uint32_t quirk_id; -+}; -+typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); -+ -+#define XENPF_firmware_info 50 -+#define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ -+#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ -+#define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ -+struct xenpf_firmware_info { -+ /* IN variables. */ -+ uint32_t type; -+ uint32_t index; -+ /* OUT variables. */ -+ union { -+ struct { -+ /* Int13, Fn48: Check Extensions Present. */ -+ uint8_t device; /* %dl: bios device number */ -+ uint8_t version; /* %ah: major version */ -+ uint16_t interface_support; /* %cx: support bitmap */ -+ /* Int13, Fn08: Legacy Get Device Parameters. */ -+ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ -+ uint8_t legacy_max_head; /* %dh: max head # */ -+ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ -+ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ -+ /* NB. First uint16_t of buffer must be set to buffer size. */ -+ XEN_GUEST_HANDLE(void) edd_params; -+ } disk_info; /* XEN_FW_DISK_INFO */ -+ struct { -+ uint8_t device; /* bios device number */ -+ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ -+ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ -+ struct { -+ /* Int10, AX=4F15: Get EDID info. */ -+ uint8_t capabilities; -+ uint8_t edid_transfer_time; -+ /* must refer to 128-byte buffer */ -+ XEN_GUEST_HANDLE(uint8) edid; -+ } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ -+ } u; -+}; -+typedef struct xenpf_firmware_info xenpf_firmware_info_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); -+ -+#define XENPF_enter_acpi_sleep 51 -+struct xenpf_enter_acpi_sleep { -+ /* IN variables */ -+ uint16_t pm1a_cnt_val; /* PM1a control value. */ -+ uint16_t pm1b_cnt_val; /* PM1b control value. */ -+ uint32_t sleep_state; /* Which state to enter (Sn). */ -+ uint32_t flags; /* Must be zero. */ -+}; -+typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); -+ -+#define XENPF_change_freq 52 -+struct xenpf_change_freq { -+ /* IN variables */ -+ uint32_t flags; /* Must be zero. */ -+ uint32_t cpu; /* Physical cpu. */ -+ uint64_t freq; /* New frequency (Hz). */ -+}; -+typedef struct xenpf_change_freq xenpf_change_freq_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); -+ -+/* -+ * Get idle times (nanoseconds since boot) for physical CPUs specified in the -+ * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is -+ * indexed by CPU number; only entries with the corresponding @cpumap_bitmap -+ * bit set are written to. On return, @cpumap_bitmap is modified so that any -+ * non-existent CPUs are cleared. Such CPUs have their @idletime array entry -+ * cleared. -+ */ -+#define XENPF_getidletime 53 -+struct xenpf_getidletime { -+ /* IN/OUT variables */ -+ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ -+ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; -+ /* IN variables */ -+ /* Size of cpumap bitmap. */ -+ uint32_t cpumap_nr_cpus; -+ /* Must be indexable for every cpu in cpumap_bitmap. */ -+ XEN_GUEST_HANDLE(uint64) idletime; -+ /* OUT variables */ -+ /* System time when the idletime snapshots were taken. */ -+ uint64_t now; -+}; -+typedef struct xenpf_getidletime xenpf_getidletime_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); -+ -+#define XENPF_set_processor_pminfo 54 -+ -+/* ability bits */ -+#define XEN_PROCESSOR_PM_CX 1 -+#define XEN_PROCESSOR_PM_PX 2 -+#define XEN_PROCESSOR_PM_TX 4 -+ -+/* cmd type */ -+#define XEN_PM_CX 0 -+#define XEN_PM_PX 1 -+#define XEN_PM_TX 2 -+ -+/* Px sub info type */ -+#define XEN_PX_PCT 1 -+#define XEN_PX_PSS 2 -+#define XEN_PX_PPC 4 -+#define XEN_PX_PSD 8 -+ -+struct xen_power_register { -+ uint32_t space_id; -+ uint32_t bit_width; -+ uint32_t bit_offset; -+ uint32_t access_size; -+ uint64_t address; -+}; -+ -+struct xen_processor_csd { -+ uint32_t domain; /* domain number of one dependent group */ -+ uint32_t coord_type; /* coordination type */ -+ uint32_t num; /* number of processors in same domain */ -+}; -+typedef struct xen_processor_csd xen_processor_csd_t; -+DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); -+ -+struct xen_processor_cx { -+ struct xen_power_register reg; /* GAS for Cx trigger register */ -+ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ -+ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ -+ uint32_t power; /* average power consumption(mW) */ -+ uint32_t dpcnt; /* number of dependency entries */ -+ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ -+}; -+typedef struct xen_processor_cx xen_processor_cx_t; -+DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); -+ -+struct xen_processor_flags { -+ uint32_t bm_control:1; -+ uint32_t bm_check:1; -+ uint32_t has_cst:1; -+ uint32_t power_setup_done:1; -+ uint32_t bm_rld_set:1; -+}; -+ -+struct xen_processor_power { -+ uint32_t count; /* number of C state entries in array below */ -+ struct xen_processor_flags flags; /* global flags of this processor */ -+ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ -+}; -+ -+struct xen_pct_register { -+ uint8_t descriptor; -+ uint16_t length; -+ uint8_t space_id; -+ uint8_t bit_width; -+ uint8_t bit_offset; -+ uint8_t reserved; -+ uint64_t address; -+}; -+ -+struct xen_processor_px { -+ uint64_t core_frequency; /* megahertz */ -+ uint64_t power; /* milliWatts */ -+ uint64_t transition_latency; /* microseconds */ -+ uint64_t bus_master_latency; /* microseconds */ -+ uint64_t control; /* control value */ -+ uint64_t status; /* success indicator */ -+}; -+typedef struct xen_processor_px xen_processor_px_t; -+DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); -+ -+struct xen_psd_package { -+ uint64_t num_entries; -+ uint64_t revision; -+ uint64_t domain; -+ uint64_t coord_type; -+ uint64_t num_processors; -+}; -+ -+struct xen_processor_performance { -+ uint32_t flags; /* flag for Px sub info type */ -+ uint32_t platform_limit; /* Platform limitation on freq usage */ -+ struct xen_pct_register control_register; -+ struct xen_pct_register status_register; -+ uint32_t state_count; /* total available performance states */ -+ XEN_GUEST_HANDLE(xen_processor_px_t) states; -+ struct xen_psd_package domain_info; -+ uint32_t shared_type; /* coordination type of this processor */ -+}; -+typedef struct xen_processor_performance xen_processor_performance_t; -+DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); -+ -+struct xenpf_set_processor_pminfo { -+ /* IN variables */ -+ uint32_t id; /* ACPI CPU ID */ -+ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ -+ union { -+ struct xen_processor_power power;/* Cx: _CST/_CSD */ -+ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ -+ } u; -+}; -+typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); -+ -+#define XENPF_get_cpuinfo 55 -+struct xenpf_pcpuinfo { -+ /* IN */ -+ uint32_t xen_cpuid; -+ /* OUT */ -+ /* The maxium cpu_id that is present */ -+ uint32_t max_present; -+#define XEN_PCPU_FLAGS_ONLINE 1 -+ /* Correponding xen_cpuid is not present*/ -+#define XEN_PCPU_FLAGS_INVALID 2 -+ uint32_t flags; -+ uint32_t apic_id; -+ uint32_t acpi_id; -+}; -+typedef struct xenpf_pcpuinfo xenpf_pcpuinfo_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_pcpuinfo_t); -+ -+#define XENPF_cpu_online 56 -+#define XENPF_cpu_offline 57 -+struct xenpf_cpu_ol -+{ -+ uint32_t cpuid; -+}; -+typedef struct xenpf_cpu_ol xenpf_cpu_ol_t; -+DEFINE_XEN_GUEST_HANDLE(xenpf_cpu_ol_t); -+ -+#define XENPF_cpu_hotadd 58 -+struct xenpf_cpu_hotadd -+{ -+ uint32_t apic_id; -+ uint32_t acpi_id; -+ uint32_t pxm; -+}; -+ -+#define XENPF_mem_hotadd 59 -+struct xenpf_mem_hotadd -+{ -+ uint64_t spfn; -+ uint64_t epfn; -+ uint32_t pxm; -+ uint32_t flags; -+}; -+ -+struct xen_platform_op { -+ uint32_t cmd; -+ uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ -+ union { -+ struct xenpf_settime settime; -+ struct xenpf_add_memtype add_memtype; -+ struct xenpf_del_memtype del_memtype; -+ struct xenpf_read_memtype read_memtype; -+ struct xenpf_microcode_update microcode; -+ struct xenpf_platform_quirk platform_quirk; -+ struct xenpf_firmware_info firmware_info; -+ struct xenpf_enter_acpi_sleep enter_acpi_sleep; -+ struct xenpf_change_freq change_freq; -+ struct xenpf_getidletime getidletime; -+ struct xenpf_set_processor_pminfo set_pminfo; -+ struct xenpf_pcpuinfo pcpu_info; -+ struct xenpf_cpu_ol cpu_ol; -+ struct xenpf_cpu_hotadd cpu_add; -+ struct xenpf_mem_hotadd mem_add; -+ uint8_t pad[128]; -+ } u; -+}; -+typedef struct xen_platform_op xen_platform_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); -+ -+#endif /* __XEN_PUBLIC_PLATFORM_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/sysctl.h 2011-03-17 13:50:24.000000000 +0100 -@@ -0,0 +1,637 @@ -+/****************************************************************************** -+ * sysctl.h -+ * -+ * System management operations. For use by node control stack. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2002-2006, K Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_SYSCTL_H__ -+#define __XEN_PUBLIC_SYSCTL_H__ -+ -+#if !defined(__XEN__) && !defined(__XEN_TOOLS__) -+#error "sysctl operations are intended for use by node control tools only" -+#endif -+ -+#include "xen.h" -+#include "domctl.h" -+ -+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000008 -+ -+/* -+ * Read console content from Xen buffer ring. -+ */ -+/* XEN_SYSCTL_readconsole */ -+struct xen_sysctl_readconsole { -+ /* IN: Non-zero -> clear after reading. */ -+ uint8_t clear; -+ /* IN: Non-zero -> start index specified by @index field. */ -+ uint8_t incremental; -+ uint8_t pad0, pad1; -+ /* -+ * IN: Start index for consuming from ring buffer (if @incremental); -+ * OUT: End index after consuming from ring buffer. -+ */ -+ uint32_t index; -+ /* IN: Virtual address to write console data. */ -+ XEN_GUEST_HANDLE_64(char) buffer; -+ /* IN: Size of buffer; OUT: Bytes written to buffer. */ -+ uint32_t count; -+}; -+typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); -+ -+/* Get trace buffers machine base address */ -+/* XEN_SYSCTL_tbuf_op */ -+struct xen_sysctl_tbuf_op { -+ /* IN variables */ -+#define XEN_SYSCTL_TBUFOP_get_info 0 -+#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 -+#define XEN_SYSCTL_TBUFOP_set_evt_mask 2 -+#define XEN_SYSCTL_TBUFOP_set_size 3 -+#define XEN_SYSCTL_TBUFOP_enable 4 -+#define XEN_SYSCTL_TBUFOP_disable 5 -+ uint32_t cmd; -+ /* IN/OUT variables */ -+ struct xenctl_cpumap cpu_mask; -+ uint32_t evt_mask; -+ /* OUT variables */ -+ uint64_aligned_t buffer_mfn; -+ uint32_t size; /* Also an IN variable! */ -+}; -+typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); -+ -+/* -+ * Get physical information about the host machine -+ */ -+/* XEN_SYSCTL_physinfo */ -+ /* (x86) The platform supports HVM guests. */ -+#define _XEN_SYSCTL_PHYSCAP_hvm 0 -+#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm) -+ /* (x86) The platform supports HVM-guest direct access to I/O devices. */ -+#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1 -+#define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio) -+struct xen_sysctl_physinfo { -+ uint32_t threads_per_core; -+ uint32_t cores_per_socket; -+ uint32_t nr_cpus; /* # CPUs currently online */ -+ uint32_t max_cpu_id; /* Largest possible CPU ID on this host */ -+ uint32_t nr_nodes; /* # nodes currently online */ -+ uint32_t max_node_id; /* Largest possible node ID on this host */ -+ uint32_t cpu_khz; -+ uint64_aligned_t total_pages; -+ uint64_aligned_t free_pages; -+ uint64_aligned_t scrub_pages; -+ uint32_t hw_cap[8]; -+ -+ /* XEN_SYSCTL_PHYSCAP_??? */ -+ uint32_t capabilities; -+}; -+typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); -+ -+/* -+ * Get the ID of the current scheduler. -+ */ -+/* XEN_SYSCTL_sched_id */ -+struct xen_sysctl_sched_id { -+ /* OUT variable */ -+ uint32_t sched_id; -+}; -+typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); -+ -+/* Interface for controlling Xen software performance counters. */ -+/* XEN_SYSCTL_perfc_op */ -+/* Sub-operations: */ -+#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ -+#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ -+struct xen_sysctl_perfc_desc { -+ char name[80]; /* name of perf counter */ -+ uint32_t nr_vals; /* number of values for this counter */ -+}; -+typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); -+typedef uint32_t xen_sysctl_perfc_val_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); -+ -+struct xen_sysctl_perfc_op { -+ /* IN variables. */ -+ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ -+ /* OUT variables. */ -+ uint32_t nr_counters; /* number of counters description */ -+ uint32_t nr_vals; /* number of values */ -+ /* counter information (or NULL) */ -+ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; -+ /* counter values (or NULL) */ -+ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; -+}; -+typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); -+ -+/* XEN_SYSCTL_getdomaininfolist */ -+struct xen_sysctl_getdomaininfolist { -+ /* IN variables. */ -+ domid_t first_domain; -+ uint32_t max_domains; -+ XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; -+ /* OUT variables. */ -+ uint32_t num_domains; -+}; -+typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); -+ -+/* Inject debug keys into Xen. */ -+/* XEN_SYSCTL_debug_keys */ -+struct xen_sysctl_debug_keys { -+ /* IN variables. */ -+ XEN_GUEST_HANDLE_64(char) keys; -+ uint32_t nr_keys; -+}; -+typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); -+ -+/* Get physical CPU information. */ -+/* XEN_SYSCTL_getcpuinfo */ -+struct xen_sysctl_cpuinfo { -+ uint64_aligned_t idletime; -+}; -+typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); -+struct xen_sysctl_getcpuinfo { -+ /* IN variables. */ -+ uint32_t max_cpus; -+ XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info; -+ /* OUT variables. */ -+ uint32_t nr_cpus; -+}; -+typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); -+ -+/* XEN_SYSCTL_availheap */ -+struct xen_sysctl_availheap { -+ /* IN variables. */ -+ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */ -+ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */ -+ int32_t node; /* NUMA node of interest (-1 for all nodes). */ -+ /* OUT variables. */ -+ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */ -+}; -+typedef struct xen_sysctl_availheap xen_sysctl_availheap_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t); -+ -+/* XEN_SYSCTL_get_pmstat */ -+struct pm_px_val { -+ uint64_aligned_t freq; /* Px core frequency */ -+ uint64_aligned_t residency; /* Px residency time */ -+ uint64_aligned_t count; /* Px transition count */ -+}; -+typedef struct pm_px_val pm_px_val_t; -+DEFINE_XEN_GUEST_HANDLE(pm_px_val_t); -+ -+struct pm_px_stat { -+ uint8_t total; /* total Px states */ -+ uint8_t usable; /* usable Px states */ -+ uint8_t last; /* last Px state */ -+ uint8_t cur; /* current Px state */ -+ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */ -+ XEN_GUEST_HANDLE_64(pm_px_val_t) pt; -+}; -+typedef struct pm_px_stat pm_px_stat_t; -+DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t); -+ -+struct pm_cx_stat { -+ uint32_t nr; /* entry nr in triggers & residencies, including C0 */ -+ uint32_t last; /* last Cx state */ -+ uint64_aligned_t idle_time; /* idle time from boot */ -+ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */ -+ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */ -+ uint64_aligned_t pc3; -+ uint64_aligned_t pc6; -+ uint64_aligned_t pc7; -+ uint64_aligned_t cc3; -+ uint64_aligned_t cc6; -+}; -+ -+struct xen_sysctl_get_pmstat { -+#define PMSTAT_CATEGORY_MASK 0xf0 -+#define PMSTAT_PX 0x10 -+#define PMSTAT_CX 0x20 -+#define PMSTAT_get_max_px (PMSTAT_PX | 0x1) -+#define PMSTAT_get_pxstat (PMSTAT_PX | 0x2) -+#define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3) -+#define PMSTAT_get_max_cx (PMSTAT_CX | 0x1) -+#define PMSTAT_get_cxstat (PMSTAT_CX | 0x2) -+#define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3) -+ uint32_t type; -+ uint32_t cpuid; -+ union { -+ struct pm_px_stat getpx; -+ struct pm_cx_stat getcx; -+ /* other struct for tx, etc */ -+ } u; -+}; -+typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t); -+ -+/* XEN_SYSCTL_cpu_hotplug */ -+struct xen_sysctl_cpu_hotplug { -+ /* IN variables */ -+ uint32_t cpu; /* Physical cpu. */ -+#define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0 -+#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1 -+ uint32_t op; /* hotplug opcode */ -+}; -+typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t); -+ -+/* -+ * Get/set xen power management, include -+ * 1. cpufreq governors and related parameters -+ */ -+/* XEN_SYSCTL_pm_op */ -+struct xen_userspace { -+ uint32_t scaling_setspeed; -+}; -+typedef struct xen_userspace xen_userspace_t; -+ -+struct xen_ondemand { -+ uint32_t sampling_rate_max; -+ uint32_t sampling_rate_min; -+ -+ uint32_t sampling_rate; -+ uint32_t up_threshold; -+}; -+typedef struct xen_ondemand xen_ondemand_t; -+ -+/* -+ * cpufreq para name of this structure named -+ * same as sysfs file name of native linux -+ */ -+#define CPUFREQ_NAME_LEN 16 -+struct xen_get_cpufreq_para { -+ /* IN/OUT variable */ -+ uint32_t cpu_num; -+ uint32_t freq_num; -+ uint32_t gov_num; -+ -+ /* for all governors */ -+ /* OUT variable */ -+ XEN_GUEST_HANDLE_64(uint32) affected_cpus; -+ XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies; -+ XEN_GUEST_HANDLE_64(char) scaling_available_governors; -+ char scaling_driver[CPUFREQ_NAME_LEN]; -+ -+ uint32_t cpuinfo_cur_freq; -+ uint32_t cpuinfo_max_freq; -+ uint32_t cpuinfo_min_freq; -+ uint32_t scaling_cur_freq; -+ -+ char scaling_governor[CPUFREQ_NAME_LEN]; -+ uint32_t scaling_max_freq; -+ uint32_t scaling_min_freq; -+ -+ /* for specific governor */ -+ union { -+ struct xen_userspace userspace; -+ struct xen_ondemand ondemand; -+ } u; -+ -+ int32_t turbo_enabled; -+}; -+ -+struct xen_set_cpufreq_gov { -+ char scaling_governor[CPUFREQ_NAME_LEN]; -+}; -+ -+struct xen_set_cpufreq_para { -+ #define SCALING_MAX_FREQ 1 -+ #define SCALING_MIN_FREQ 2 -+ #define SCALING_SETSPEED 3 -+ #define SAMPLING_RATE 4 -+ #define UP_THRESHOLD 5 -+ -+ uint32_t ctrl_type; -+ uint32_t ctrl_value; -+}; -+ -+struct xen_sysctl_pm_op { -+ #define PM_PARA_CATEGORY_MASK 0xf0 -+ #define CPUFREQ_PARA 0x10 -+ -+ /* cpufreq command type */ -+ #define GET_CPUFREQ_PARA (CPUFREQ_PARA | 0x01) -+ #define SET_CPUFREQ_GOV (CPUFREQ_PARA | 0x02) -+ #define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03) -+ #define GET_CPUFREQ_AVGFREQ (CPUFREQ_PARA | 0x04) -+ -+ /* set/reset scheduler power saving option */ -+ #define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21 -+ -+ /* cpuidle max_cstate access command */ -+ #define XEN_SYSCTL_pm_op_get_max_cstate 0x22 -+ #define XEN_SYSCTL_pm_op_set_max_cstate 0x23 -+ -+ /* set scheduler migration cost value */ -+ #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay 0x24 -+ #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay 0x25 -+ -+ /* enable/disable turbo mode when in dbs governor */ -+ #define XEN_SYSCTL_pm_op_enable_turbo 0x26 -+ #define XEN_SYSCTL_pm_op_disable_turbo 0x27 -+ -+ uint32_t cmd; -+ uint32_t cpuid; -+ union { -+ struct xen_get_cpufreq_para get_para; -+ struct xen_set_cpufreq_gov set_gov; -+ struct xen_set_cpufreq_para set_para; -+ uint64_aligned_t get_avgfreq; -+ uint32_t set_sched_opt_smt; -+ uint32_t get_max_cstate; -+ uint32_t set_max_cstate; -+ uint32_t get_vcpu_migration_delay; -+ uint32_t set_vcpu_migration_delay; -+ } u; -+}; -+ -+/* XEN_SYSCTL_page_offline_op */ -+struct xen_sysctl_page_offline_op { -+ /* IN: range of page to be offlined */ -+#define sysctl_page_offline 1 -+#define sysctl_page_online 2 -+#define sysctl_query_page_offline 3 -+ uint32_t cmd; -+ uint32_t start; -+ uint32_t end; -+ /* OUT: result of page offline request */ -+ /* -+ * bit 0~15: result flags -+ * bit 16~31: owner -+ */ -+ XEN_GUEST_HANDLE(uint32) status; -+}; -+ -+#define PG_OFFLINE_STATUS_MASK (0xFFUL) -+ -+/* The result is invalid, i.e. HV does not handle it */ -+#define PG_OFFLINE_INVALID (0x1UL << 0) -+ -+#define PG_OFFLINE_OFFLINED (0x1UL << 1) -+#define PG_OFFLINE_PENDING (0x1UL << 2) -+#define PG_OFFLINE_FAILED (0x1UL << 3) -+ -+#define PG_ONLINE_FAILED PG_OFFLINE_FAILED -+#define PG_ONLINE_ONLINED PG_OFFLINE_OFFLINED -+ -+#define PG_OFFLINE_STATUS_OFFLINED (0x1UL << 1) -+#define PG_OFFLINE_STATUS_ONLINE (0x1UL << 2) -+#define PG_OFFLINE_STATUS_OFFLINE_PENDING (0x1UL << 3) -+#define PG_OFFLINE_STATUS_BROKEN (0x1UL << 4) -+ -+#define PG_OFFLINE_MISC_MASK (0xFFUL << 4) -+ -+/* only valid when PG_OFFLINE_FAILED */ -+#define PG_OFFLINE_XENPAGE (0x1UL << 8) -+#define PG_OFFLINE_DOM0PAGE (0x1UL << 9) -+#define PG_OFFLINE_ANONYMOUS (0x1UL << 10) -+#define PG_OFFLINE_NOT_CONV_RAM (0x1UL << 11) -+#define PG_OFFLINE_OWNED (0x1UL << 12) -+ -+#define PG_OFFLINE_BROKEN (0x1UL << 13) -+#define PG_ONLINE_BROKEN PG_OFFLINE_BROKEN -+ -+#define PG_OFFLINE_OWNER_SHIFT 16 -+ -+/* XEN_SYSCTL_lockprof_op */ -+/* Sub-operations: */ -+#define XEN_SYSCTL_LOCKPROF_reset 1 /* Reset all profile data to zero. */ -+#define XEN_SYSCTL_LOCKPROF_query 2 /* Get lock profile information. */ -+/* Record-type: */ -+#define LOCKPROF_TYPE_GLOBAL 0 /* global lock, idx meaningless */ -+#define LOCKPROF_TYPE_PERDOM 1 /* per-domain lock, idx is domid */ -+#define LOCKPROF_TYPE_N 2 /* number of types */ -+struct xen_sysctl_lockprof_data { -+ char name[40]; /* lock name (may include up to 2 %d specifiers) */ -+ int32_t type; /* LOCKPROF_TYPE_??? */ -+ int32_t idx; /* index (e.g. domain id) */ -+ uint64_aligned_t lock_cnt; /* # of locking succeeded */ -+ uint64_aligned_t block_cnt; /* # of wait for lock */ -+ uint64_aligned_t lock_time; /* nsecs lock held */ -+ uint64_aligned_t block_time; /* nsecs waited for lock */ -+}; -+typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t); -+struct xen_sysctl_lockprof_op { -+ /* IN variables. */ -+ uint32_t cmd; /* XEN_SYSCTL_LOCKPROF_??? */ -+ uint32_t max_elem; /* size of output buffer */ -+ /* OUT variables (query only). */ -+ uint32_t nr_elem; /* number of elements available */ -+ uint64_aligned_t time; /* nsecs of profile measurement */ -+ /* profile information (or NULL) */ -+ XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data; -+}; -+typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t); -+ -+/* XEN_SYSCTL_topologyinfo */ -+#define INVALID_TOPOLOGY_ID (~0U) -+struct xen_sysctl_topologyinfo { -+ /* -+ * IN: maximum addressable entry in the caller-provided arrays. -+ * OUT: largest cpu identifier in the system. -+ * If OUT is greater than IN then the arrays are truncated! -+ * If OUT is leass than IN then the array tails are not written by sysctl. -+ */ -+ uint32_t max_cpu_index; -+ -+ /* -+ * If not NULL, these arrays are filled with core/socket/node identifier -+ * for each cpu. -+ * If a cpu has no core/socket/node information (e.g., cpu not present) -+ * then the sentinel value ~0u is written to each array. -+ * The number of array elements written by the sysctl is: -+ * min(@max_cpu_index_IN,@max_cpu_index_OUT)+1 -+ */ -+ XEN_GUEST_HANDLE_64(uint32) cpu_to_core; -+ XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; -+ XEN_GUEST_HANDLE_64(uint32) cpu_to_node; -+}; -+typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t); -+ -+/* XEN_SYSCTL_numainfo */ -+struct xen_sysctl_numainfo { -+ /* -+ * IN: maximum addressable entry in the caller-provided arrays. -+ * OUT: largest node identifier in the system. -+ * If OUT is greater than IN then the arrays are truncated! -+ */ -+ uint32_t max_node_index; -+ -+ /* NB. Entries are 0 if node is not present. */ -+ XEN_GUEST_HANDLE_64(uint64) node_to_memsize; -+ XEN_GUEST_HANDLE_64(uint64) node_to_memfree; -+ -+ /* -+ * Array, of size (max_node_index+1)^2, listing memory access distances -+ * between nodes. If an entry has no node distance information (e.g., node -+ * not present) then the value ~0u is written. -+ * -+ * Note that the array rows must be indexed by multiplying by the minimum -+ * of the caller-provided max_node_index and the returned value of -+ * max_node_index. That is, if the largest node index in the system is -+ * smaller than the caller can handle, a smaller 2-d array is constructed -+ * within the space provided by the caller. When this occurs, trailing -+ * space provided by the caller is not modified. If the largest node index -+ * in the system is larger than the caller can handle, then a 2-d array of -+ * the maximum size handleable by the caller is constructed. -+ */ -+ XEN_GUEST_HANDLE_64(uint32) node_to_node_distance; -+}; -+typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t); -+ -+/* XEN_SYSCTL_cpupool_op */ -+#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */ -+#define XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */ -+#define XEN_SYSCTL_CPUPOOL_OP_INFO 3 /* I */ -+#define XEN_SYSCTL_CPUPOOL_OP_ADDCPU 4 /* A */ -+#define XEN_SYSCTL_CPUPOOL_OP_RMCPU 5 /* R */ -+#define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */ -+#define XEN_SYSCTL_CPUPOOL_OP_FREEINFO 7 /* F */ -+#define XEN_SYSCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF -+struct xen_sysctl_cpupool_op { -+ uint32_t op; /* IN */ -+ uint32_t cpupool_id; /* IN: CDIARM OUT: CI */ -+ uint32_t sched_id; /* IN: C OUT: I */ -+ uint32_t domid; /* IN: M */ -+ uint32_t cpu; /* IN: AR */ -+ uint32_t n_dom; /* OUT: I */ -+ struct xenctl_cpumap cpumap; /* OUT: IF */ -+}; -+typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t); -+ -+#define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64 -+/* -+ * This structure is used to pass a new ARINC653 schedule from a -+ * privileged domain (ie dom0) to Xen. -+ */ -+struct xen_sysctl_arinc653_schedule { -+ /* major_frame holds the time for the new schedule's major frame -+ * in nanoseconds. */ -+ uint64_aligned_t major_frame; -+ /* num_sched_entries holds how many of the entries in the -+ * sched_entries[] array are valid. */ -+ uint8_t num_sched_entries; -+ /* The sched_entries array holds the actual schedule entries. */ -+ struct { -+ /* dom_handle must match a domain's UUID */ -+ xen_domain_handle_t dom_handle; -+ /* If a domain has multiple VCPUs, vcpu_id specifies which one -+ * this schedule entry applies to. It should be set to 0 if -+ * there is only one VCPU for the domain. */ -+ unsigned int vcpu_id; -+ /* runtime specifies the amount of time that should be allocated -+ * to this VCPU per major frame. It is specified in nanoseconds */ -+ uint64_aligned_t runtime; -+ } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE]; -+}; -+typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t); -+ -+/* XEN_SYSCTL_scheduler_op */ -+/* Set or get info? */ -+#define XEN_SYSCTL_SCHEDOP_putinfo 0 -+#define XEN_SYSCTL_SCHEDOP_getinfo 1 -+struct xen_sysctl_scheduler_op { -+ uint32_t cpupool_id; /* Cpupool whose scheduler is to be targetted. */ -+ uint32_t sched_id; /* XEN_SCHEDULER_* (domctl.h) */ -+ uint32_t cmd; /* XEN_SYSCTL_SCHEDOP_* */ -+ union { -+ struct xen_sysctl_sched_arinc653 { -+ XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule; -+ } sched_arinc653; -+ } u; -+}; -+typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t); -+ -+struct xen_sysctl { -+ uint32_t cmd; -+#define XEN_SYSCTL_readconsole 1 -+#define XEN_SYSCTL_tbuf_op 2 -+#define XEN_SYSCTL_physinfo 3 -+#define XEN_SYSCTL_sched_id 4 -+#define XEN_SYSCTL_perfc_op 5 -+#define XEN_SYSCTL_getdomaininfolist 6 -+#define XEN_SYSCTL_debug_keys 7 -+#define XEN_SYSCTL_getcpuinfo 8 -+#define XEN_SYSCTL_availheap 9 -+#define XEN_SYSCTL_get_pmstat 10 -+#define XEN_SYSCTL_cpu_hotplug 11 -+#define XEN_SYSCTL_pm_op 12 -+#define XEN_SYSCTL_page_offline_op 14 -+#define XEN_SYSCTL_lockprof_op 15 -+#define XEN_SYSCTL_topologyinfo 16 -+#define XEN_SYSCTL_numainfo 17 -+#define XEN_SYSCTL_cpupool_op 18 -+#define XEN_SYSCTL_scheduler_op 19 -+ uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ -+ union { -+ struct xen_sysctl_readconsole readconsole; -+ struct xen_sysctl_tbuf_op tbuf_op; -+ struct xen_sysctl_physinfo physinfo; -+ struct xen_sysctl_topologyinfo topologyinfo; -+ struct xen_sysctl_numainfo numainfo; -+ struct xen_sysctl_sched_id sched_id; -+ struct xen_sysctl_perfc_op perfc_op; -+ struct xen_sysctl_getdomaininfolist getdomaininfolist; -+ struct xen_sysctl_debug_keys debug_keys; -+ struct xen_sysctl_getcpuinfo getcpuinfo; -+ struct xen_sysctl_availheap availheap; -+ struct xen_sysctl_get_pmstat get_pmstat; -+ struct xen_sysctl_cpu_hotplug cpu_hotplug; -+ struct xen_sysctl_pm_op pm_op; -+ struct xen_sysctl_page_offline_op page_offline; -+ struct xen_sysctl_lockprof_op lockprof_op; -+ struct xen_sysctl_cpupool_op cpupool_op; -+ struct xen_sysctl_scheduler_op scheduler_op; -+ uint8_t pad[128]; -+ } u; -+}; -+typedef struct xen_sysctl xen_sysctl_t; -+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); -+ -+#endif /* __XEN_PUBLIC_SYSCTL_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/tmem.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,144 @@ -+/****************************************************************************** -+ * tmem.h -+ * -+ * Guest OS interface to Xen Transcendent Memory. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2004, K A Fraser -+ */ -+ -+#ifndef __XEN_PUBLIC_TMEM_H__ -+#define __XEN_PUBLIC_TMEM_H__ -+ -+#include "xen.h" -+ -+/* Commands to HYPERVISOR_tmem_op() */ -+#define TMEM_CONTROL 0 -+#define TMEM_NEW_POOL 1 -+#define TMEM_DESTROY_POOL 2 -+#define TMEM_NEW_PAGE 3 -+#define TMEM_PUT_PAGE 4 -+#define TMEM_GET_PAGE 5 -+#define TMEM_FLUSH_PAGE 6 -+#define TMEM_FLUSH_OBJECT 7 -+#define TMEM_READ 8 -+#define TMEM_WRITE 9 -+#define TMEM_XCHG 10 -+ -+/* Privileged commands to HYPERVISOR_tmem_op() */ -+#define TMEM_AUTH 101 -+#define TMEM_RESTORE_NEW 102 -+ -+/* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */ -+#define TMEMC_THAW 0 -+#define TMEMC_FREEZE 1 -+#define TMEMC_FLUSH 2 -+#define TMEMC_DESTROY 3 -+#define TMEMC_LIST 4 -+#define TMEMC_SET_WEIGHT 5 -+#define TMEMC_SET_CAP 6 -+#define TMEMC_SET_COMPRESS 7 -+#define TMEMC_QUERY_FREEABLE_MB 8 -+#define TMEMC_SAVE_BEGIN 10 -+#define TMEMC_SAVE_GET_VERSION 11 -+#define TMEMC_SAVE_GET_MAXPOOLS 12 -+#define TMEMC_SAVE_GET_CLIENT_WEIGHT 13 -+#define TMEMC_SAVE_GET_CLIENT_CAP 14 -+#define TMEMC_SAVE_GET_CLIENT_FLAGS 15 -+#define TMEMC_SAVE_GET_POOL_FLAGS 16 -+#define TMEMC_SAVE_GET_POOL_NPAGES 17 -+#define TMEMC_SAVE_GET_POOL_UUID 18 -+#define TMEMC_SAVE_GET_NEXT_PAGE 19 -+#define TMEMC_SAVE_GET_NEXT_INV 20 -+#define TMEMC_SAVE_END 21 -+#define TMEMC_RESTORE_BEGIN 30 -+#define TMEMC_RESTORE_PUT_PAGE 32 -+#define TMEMC_RESTORE_FLUSH_PAGE 33 -+ -+/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ -+#define TMEM_POOL_PERSIST 1 -+#define TMEM_POOL_SHARED 2 -+#define TMEM_POOL_PAGESIZE_SHIFT 4 -+#define TMEM_POOL_PAGESIZE_MASK 0xf -+#define TMEM_POOL_VERSION_SHIFT 24 -+#define TMEM_POOL_VERSION_MASK 0xff -+ -+/* Bits for client flags (save/restore) */ -+#define TMEM_CLIENT_COMPRESS 1 -+#define TMEM_CLIENT_FROZEN 2 -+ -+/* Special errno values */ -+#define EFROZEN 1000 -+#define EEMPTY 1001 -+ -+ -+#ifndef __ASSEMBLY__ -+typedef xen_pfn_t tmem_cli_mfn_t; -+typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t; -+struct tmem_op { -+ uint32_t cmd; -+ int32_t pool_id; -+ union { -+ struct { -+ uint64_t uuid[2]; -+ uint32_t flags; -+ uint32_t arg1; -+ } new; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */ -+ struct { -+ uint32_t subop; -+ uint32_t cli_id; -+ uint32_t arg1; -+ uint32_t arg2; -+ uint64_t arg3; -+ tmem_cli_va_t buf; -+ } ctrl; /* for cmd == TMEM_CONTROL */ -+ struct { -+ -+ uint64_t object; -+ uint32_t index; -+ uint32_t tmem_offset; -+ uint32_t pfn_offset; -+ uint32_t len; -+ tmem_cli_mfn_t cmfn; /* client machine page frame */ -+ } gen; /* for all other cmd ("generic") */ -+ } u; -+}; -+typedef struct tmem_op tmem_op_t; -+DEFINE_XEN_GUEST_HANDLE(tmem_op_t); -+ -+struct tmem_handle { -+ uint32_t pool_id; -+ uint32_t index; -+ uint64_t oid; -+}; -+ -+#endif -+ -+#endif /* __XEN_PUBLIC_TMEM_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/trace.h 2010-08-31 09:24:21.000000000 +0200 -@@ -0,0 +1,230 @@ -+/****************************************************************************** -+ * include/public/trace.h -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Mark Williamson, (C) 2004 Intel Research Cambridge -+ * Copyright (C) 2005 Bin Ren -+ */ -+ -+#ifndef __XEN_PUBLIC_TRACE_H__ -+#define __XEN_PUBLIC_TRACE_H__ -+ -+#define TRACE_EXTRA_MAX 7 -+#define TRACE_EXTRA_SHIFT 28 -+ -+/* Trace classes */ -+#define TRC_CLS_SHIFT 16 -+#define TRC_GEN 0x0001f000 /* General trace */ -+#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ -+#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ -+#define TRC_HVM 0x0008f000 /* Xen HVM trace */ -+#define TRC_MEM 0x0010f000 /* Xen memory trace */ -+#define TRC_PV 0x0020f000 /* Xen PV traces */ -+#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ -+#define TRC_PM 0x0080f000 /* Xen power management trace */ -+#define TRC_GUEST 0x0800f000 /* Guest-generated traces */ -+#define TRC_ALL 0x0ffff000 -+#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) -+#define TRC_HD_CYCLE_FLAG (1UL<<31) -+#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) -+#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) -+ -+/* Trace subclasses */ -+#define TRC_SUBCLS_SHIFT 12 -+ -+/* trace subclasses for SVM */ -+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ -+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ -+ -+#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ -+#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */ -+#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ -+ -+/* Trace events per class */ -+#define TRC_LOST_RECORDS (TRC_GEN + 1) -+#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2) -+#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3) -+#define TRC_TRACE_IRQ (TRC_GEN + 4) -+ -+#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1) -+#define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2) -+#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1) -+#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2) -+#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3) -+#define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4) -+#define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5) -+#define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6) -+#define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7) -+#define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8) -+#define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9) -+#define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10) -+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11) -+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12) -+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13) -+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14) -+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15) -+#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16) -+ -+#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) -+#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) -+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) -+#define TRC_MEM_SET_P2M_ENTRY (TRC_MEM + 4) -+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5) -+#define TRC_MEM_POD_POPULATE (TRC_MEM + 16) -+#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17) -+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18) -+ -+ -+#define TRC_PV_HYPERCALL (TRC_PV + 1) -+#define TRC_PV_TRAP (TRC_PV + 3) -+#define TRC_PV_PAGE_FAULT (TRC_PV + 4) -+#define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5) -+#define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6) -+#define TRC_PV_EMULATE_4GB (TRC_PV + 7) -+#define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8) -+#define TRC_PV_PAGING_FIXUP (TRC_PV + 9) -+#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10) -+#define TRC_PV_PTWR_EMULATION (TRC_PV + 11) -+#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12) -+ /* Indicates that addresses in trace record are 64 bits */ -+#define TRC_64_FLAG (0x100) -+ -+#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1) -+#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2) -+#define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3) -+#define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4) -+#define TRC_SHADOW_MMIO (TRC_SHADOW + 5) -+#define TRC_SHADOW_FIXUP (TRC_SHADOW + 6) -+#define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7) -+#define TRC_SHADOW_EMULATE (TRC_SHADOW + 8) -+#define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9) -+#define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10) -+#define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11) -+#define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12) -+#define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13) -+#define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14) -+#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15) -+ -+/* trace events per subclass */ -+#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) -+#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02) -+#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02) -+#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01) -+#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01) -+#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02) -+#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02) -+#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03) -+#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04) -+#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05) -+#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06) -+#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07) -+#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08) -+#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08) -+#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09) -+#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09) -+#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A) -+#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B) -+#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C) -+#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D) -+#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E) -+#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F) -+#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10) -+#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11) -+#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12) -+#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13) -+#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14) -+#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14) -+#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15) -+#define TRC_HVM_IOPORT_READ (TRC_HVM_HANDLER + 0x16) -+#define TRC_HVM_IOMEM_READ (TRC_HVM_HANDLER + 0x17) -+#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18) -+#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) -+#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19) -+#define TRC_HVM_RDTSC (TRC_HVM_HANDLER + 0x1a) -+#define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20) -+#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21) -+ -+#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216) -+#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217) -+ -+/* trace subclasses for power management */ -+#define TRC_PM_FREQ 0x00801000 /* xen cpu freq events */ -+#define TRC_PM_IDLE 0x00802000 /* xen cpu idle events */ -+ -+/* trace events for per class */ -+#define TRC_PM_FREQ_CHANGE (TRC_PM_FREQ + 0x01) -+#define TRC_PM_IDLE_ENTRY (TRC_PM_IDLE + 0x01) -+#define TRC_PM_IDLE_EXIT (TRC_PM_IDLE + 0x02) -+ -+/* This structure represents a single trace buffer record. */ -+struct t_rec { -+ uint32_t event:28; -+ uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */ -+ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */ -+ union { -+ struct { -+ uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */ -+ uint32_t extra_u32[7]; /* event data items */ -+ } cycles; -+ struct { -+ uint32_t extra_u32[7]; /* event data items */ -+ } nocycles; -+ } u; -+}; -+ -+/* -+ * This structure contains the metadata for a single trace buffer. The head -+ * field, indexes into an array of struct t_rec's. -+ */ -+struct t_buf { -+ /* Assume the data buffer size is X. X is generally not a power of 2. -+ * CONS and PROD are incremented modulo (2*X): -+ * 0 <= cons < 2*X -+ * 0 <= prod < 2*X -+ * This is done because addition modulo X breaks at 2^32 when X is not a -+ * power of 2: -+ * (((2^32 - 1) % X) + 1) % X != (2^32) % X -+ */ -+ uint32_t cons; /* Offset of next item to be consumed by control tools. */ -+ uint32_t prod; /* Offset of next item to be produced by Xen. */ -+ /* Records follow immediately after the meta-data header. */ -+}; -+ -+/* Structure used to pass MFNs to the trace buffers back to trace consumers. -+ * Offset is an offset into the mapped structure where the mfn list will be held. -+ * MFNs will be at ((unsigned long *)(t_info))+(t_info->cpu_offset[cpu]). -+ */ -+struct t_info { -+ uint16_t tbuf_size; /* Size in pages of each trace buffer */ -+ uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */ -+ /* MFN lists immediately after the header */ -+}; -+ -+#endif /* __XEN_PUBLIC_TRACE_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/xen-compat.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,44 @@ -+/****************************************************************************** -+ * xen-compat.h -+ * -+ * Guest OS interface to Xen. Compatibility layer. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (c) 2006, Christian Limpach -+ */ -+ -+#ifndef __XEN_PUBLIC_XEN_COMPAT_H__ -+#define __XEN_PUBLIC_XEN_COMPAT_H__ -+ -+#define __XEN_LATEST_INTERFACE_VERSION__ 0x0003020a -+ -+#if defined(__XEN__) || defined(__XEN_TOOLS__) -+/* Xen is built with matching headers and implements the latest interface. */ -+#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ -+#elif !defined(__XEN_INTERFACE_VERSION__) -+/* Guests which do not specify a version get the legacy interface. */ -+#define __XEN_INTERFACE_VERSION__ 0x00000000 -+#endif -+ -+#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ -+#error "These header files do not support the requested interface version." -+#endif -+ -+#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/xenoprof.h 2010-08-31 09:24:21.000000000 +0200 -@@ -0,0 +1,152 @@ -+/****************************************************************************** -+ * xenoprof.h -+ * -+ * Interface for enabling system wide profiling based on hardware performance -+ * counters -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Copyright (C) 2005 Hewlett-Packard Co. -+ * Written by Aravind Menon & Jose Renato Santos -+ */ -+ -+#ifndef __XEN_PUBLIC_XENOPROF_H__ -+#define __XEN_PUBLIC_XENOPROF_H__ -+ -+#include "xen.h" -+ -+/* -+ * Commands to HYPERVISOR_xenoprof_op(). -+ */ -+#define XENOPROF_init 0 -+#define XENOPROF_reset_active_list 1 -+#define XENOPROF_reset_passive_list 2 -+#define XENOPROF_set_active 3 -+#define XENOPROF_set_passive 4 -+#define XENOPROF_reserve_counters 5 -+#define XENOPROF_counter 6 -+#define XENOPROF_setup_events 7 -+#define XENOPROF_enable_virq 8 -+#define XENOPROF_start 9 -+#define XENOPROF_stop 10 -+#define XENOPROF_disable_virq 11 -+#define XENOPROF_release_counters 12 -+#define XENOPROF_shutdown 13 -+#define XENOPROF_get_buffer 14 -+#define XENOPROF_set_backtrace 15 -+ -+/* AMD IBS support */ -+#define XENOPROF_get_ibs_caps 16 -+#define XENOPROF_ibs_counter 17 -+#define XENOPROF_last_op 17 -+ -+#define MAX_OPROF_EVENTS 32 -+#define MAX_OPROF_DOMAINS 25 -+#define XENOPROF_CPU_TYPE_SIZE 64 -+ -+/* Xenoprof performance events (not Xen events) */ -+struct event_log { -+ uint64_t eip; -+ uint8_t mode; -+ uint8_t event; -+}; -+ -+/* PC value that indicates a special code */ -+#define XENOPROF_ESCAPE_CODE ~0UL -+/* Transient events for the xenoprof->oprofile cpu buf */ -+#define XENOPROF_TRACE_BEGIN 1 -+ -+/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ -+struct xenoprof_buf { -+ uint32_t event_head; -+ uint32_t event_tail; -+ uint32_t event_size; -+ uint32_t vcpu_id; -+ uint64_t xen_samples; -+ uint64_t kernel_samples; -+ uint64_t user_samples; -+ uint64_t lost_samples; -+ struct event_log event_log[1]; -+}; -+#ifndef __XEN__ -+typedef struct xenoprof_buf xenoprof_buf_t; -+DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); -+#endif -+ -+struct xenoprof_init { -+ int32_t num_events; -+ int32_t is_primary; -+ char cpu_type[XENOPROF_CPU_TYPE_SIZE]; -+}; -+typedef struct xenoprof_init xenoprof_init_t; -+DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); -+ -+struct xenoprof_get_buffer { -+ int32_t max_samples; -+ int32_t nbuf; -+ int32_t bufsize; -+ uint64_t buf_gmaddr; -+}; -+typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; -+DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); -+ -+struct xenoprof_counter { -+ uint32_t ind; -+ uint64_t count; -+ uint32_t enabled; -+ uint32_t event; -+ uint32_t hypervisor; -+ uint32_t kernel; -+ uint32_t user; -+ uint64_t unit_mask; -+}; -+typedef struct xenoprof_counter xenoprof_counter_t; -+DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); -+ -+typedef struct xenoprof_passive { -+ uint16_t domain_id; -+ int32_t max_samples; -+ int32_t nbuf; -+ int32_t bufsize; -+ uint64_t buf_gmaddr; -+} xenoprof_passive_t; -+DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); -+ -+struct xenoprof_ibs_counter { -+ uint64_t op_enabled; -+ uint64_t fetch_enabled; -+ uint64_t max_cnt_fetch; -+ uint64_t max_cnt_op; -+ uint64_t rand_en; -+ uint64_t dispatched_ops; -+}; -+typedef struct xenoprof_ibs_counter xenoprof_ibs_counter_t; -+DEFINE_XEN_GUEST_HANDLE(xenoprof_ibs_counter_t); -+ -+#endif /* __XEN_PUBLIC_XENOPROF_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/xsm/acm.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,223 @@ -+/* -+ * acm.h: Xen access control module interface defintions -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Reiner Sailer -+ * Copyright (c) 2005, International Business Machines Corporation. -+ */ -+ -+#ifndef _XEN_PUBLIC_ACM_H -+#define _XEN_PUBLIC_ACM_H -+ -+#include "../xen.h" -+ -+/* default ssid reference value if not supplied */ -+#define ACM_DEFAULT_SSID 0x0 -+#define ACM_DEFAULT_LOCAL_SSID 0x0 -+ -+/* Internal ACM ERROR types */ -+#define ACM_OK 0 -+#define ACM_UNDEF -1 -+#define ACM_INIT_SSID_ERROR -2 -+#define ACM_INIT_SOID_ERROR -3 -+#define ACM_ERROR -4 -+ -+/* External ACCESS DECISIONS */ -+#define ACM_ACCESS_PERMITTED 0 -+#define ACM_ACCESS_DENIED -111 -+#define ACM_NULL_POINTER_ERROR -200 -+ -+/* -+ Error codes reported in when trying to test for a new policy -+ These error codes are reported in an array of tuples where -+ each error code is followed by a parameter describing the error -+ more closely, such as a domain id. -+*/ -+#define ACM_EVTCHN_SHARING_VIOLATION 0x100 -+#define ACM_GNTTAB_SHARING_VIOLATION 0x101 -+#define ACM_DOMAIN_LOOKUP 0x102 -+#define ACM_CHWALL_CONFLICT 0x103 -+#define ACM_SSIDREF_IN_USE 0x104 -+ -+ -+/* primary policy in lower 4 bits */ -+#define ACM_NULL_POLICY 0 -+#define ACM_CHINESE_WALL_POLICY 1 -+#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 -+#define ACM_POLICY_UNDEFINED 15 -+ -+/* combinations have secondary policy component in higher 4bit */ -+#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ -+ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) -+ -+/* policy: */ -+#define ACM_POLICY_NAME(X) \ -+ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ -+ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ -+ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ -+ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ -+ "UNDEFINED" -+ -+/* the following policy versions must be increased -+ * whenever the interpretation of the related -+ * policy's data structure changes -+ */ -+#define ACM_POLICY_VERSION 4 -+#define ACM_CHWALL_VERSION 1 -+#define ACM_STE_VERSION 1 -+ -+/* defines a ssid reference used by xen */ -+typedef uint32_t ssidref_t; -+ -+/* hooks that are known to domains */ -+#define ACMHOOK_none 0 -+#define ACMHOOK_sharing 1 -+#define ACMHOOK_authorization 2 -+#define ACMHOOK_conflictset 3 -+ -+/* -------security policy relevant type definitions-------- */ -+ -+/* type identifier; compares to "equal" or "not equal" */ -+typedef uint16_t domaintype_t; -+ -+/* CHINESE WALL POLICY DATA STRUCTURES -+ * -+ * current accumulated conflict type set: -+ * When a domain is started and has a type that is in -+ * a conflict set, the conflicting types are incremented in -+ * the aggregate set. When a domain is destroyed, the -+ * conflicting types to its type are decremented. -+ * If a domain has multiple types, this procedure works over -+ * all those types. -+ * -+ * conflict_aggregate_set[i] holds the number of -+ * running domains that have a conflict with type i. -+ * -+ * running_types[i] holds the number of running domains -+ * that include type i in their ssidref-referenced type set -+ * -+ * conflict_sets[i][j] is "0" if type j has no conflict -+ * with type i and is "1" otherwise. -+ */ -+/* high-16 = version, low-16 = check magic */ -+#define ACM_MAGIC 0x0001debc -+ -+/* size of the SHA1 hash identifying the XML policy from which the -+ binary policy was created */ -+#define ACM_SHA1_HASH_SIZE 20 -+ -+/* each offset in bytes from start of the struct they -+ * are part of */ -+ -+/* V3 of the policy buffer aded a version structure */ -+struct acm_policy_version -+{ -+ uint32_t major; -+ uint32_t minor; -+}; -+ -+ -+/* each buffer consists of all policy information for -+ * the respective policy given in the policy code -+ * -+ * acm_policy_buffer, acm_chwall_policy_buffer, -+ * and acm_ste_policy_buffer need to stay 32-bit aligned -+ * because we create binary policies also with external -+ * tools that assume packed representations (e.g. the java tool) -+ */ -+struct acm_policy_buffer { -+ uint32_t magic; -+ uint32_t policy_version; /* ACM_POLICY_VERSION */ -+ uint32_t len; -+ uint32_t policy_reference_offset; -+ uint32_t primary_policy_code; -+ uint32_t primary_buffer_offset; -+ uint32_t secondary_policy_code; -+ uint32_t secondary_buffer_offset; -+ struct acm_policy_version xml_pol_version; /* add in V3 */ -+ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */ -+}; -+ -+ -+struct acm_policy_reference_buffer { -+ uint32_t len; -+}; -+ -+struct acm_chwall_policy_buffer { -+ uint32_t policy_version; /* ACM_CHWALL_VERSION */ -+ uint32_t policy_code; -+ uint32_t chwall_max_types; -+ uint32_t chwall_max_ssidrefs; -+ uint32_t chwall_max_conflictsets; -+ uint32_t chwall_ssid_offset; -+ uint32_t chwall_conflict_sets_offset; -+ uint32_t chwall_running_types_offset; -+ uint32_t chwall_conflict_aggregate_offset; -+}; -+ -+struct acm_ste_policy_buffer { -+ uint32_t policy_version; /* ACM_STE_VERSION */ -+ uint32_t policy_code; -+ uint32_t ste_max_types; -+ uint32_t ste_max_ssidrefs; -+ uint32_t ste_ssid_offset; -+}; -+ -+struct acm_stats_buffer { -+ uint32_t magic; -+ uint32_t len; -+ uint32_t primary_policy_code; -+ uint32_t primary_stats_offset; -+ uint32_t secondary_policy_code; -+ uint32_t secondary_stats_offset; -+}; -+ -+struct acm_ste_stats_buffer { -+ uint32_t ec_eval_count; -+ uint32_t gt_eval_count; -+ uint32_t ec_denied_count; -+ uint32_t gt_denied_count; -+ uint32_t ec_cachehit_count; -+ uint32_t gt_cachehit_count; -+}; -+ -+struct acm_ssid_buffer { -+ uint32_t len; -+ ssidref_t ssidref; -+ uint32_t policy_reference_offset; -+ uint32_t primary_policy_code; -+ uint32_t primary_max_types; -+ uint32_t primary_types_offset; -+ uint32_t secondary_policy_code; -+ uint32_t secondary_max_types; -+ uint32_t secondary_types_offset; -+}; -+ -+#endif -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/xsm/acm_ops.h 2007-10-22 13:39:15.000000000 +0200 -@@ -0,0 +1,159 @@ -+/* -+ * acm_ops.h: Xen access control module hypervisor commands -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to -+ * deal in the Software without restriction, including without limitation the -+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Reiner Sailer -+ * Copyright (c) 2005,2006 International Business Machines Corporation. -+ */ -+ -+#ifndef __XEN_PUBLIC_ACM_OPS_H__ -+#define __XEN_PUBLIC_ACM_OPS_H__ -+ -+#include "../xen.h" -+#include "acm.h" -+ -+/* -+ * Make sure you increment the interface version whenever you modify this file! -+ * This makes sure that old versions of acm tools will stop working in a -+ * well-defined way (rather than crashing the machine, for instance). -+ */ -+#define ACM_INTERFACE_VERSION 0xAAAA000A -+ -+/************************************************************************/ -+ -+/* -+ * Prototype for this hypercall is: -+ * int acm_op(int cmd, void *args) -+ * @cmd == ACMOP_??? (access control module operation). -+ * @args == Operation-specific extra arguments (NULL if none). -+ */ -+ -+ -+#define ACMOP_setpolicy 1 -+struct acm_setpolicy { -+ /* IN */ -+ XEN_GUEST_HANDLE_64(void) pushcache; -+ uint32_t pushcache_size; -+}; -+ -+ -+#define ACMOP_getpolicy 2 -+struct acm_getpolicy { -+ /* IN */ -+ XEN_GUEST_HANDLE_64(void) pullcache; -+ uint32_t pullcache_size; -+}; -+ -+ -+#define ACMOP_dumpstats 3 -+struct acm_dumpstats { -+ /* IN */ -+ XEN_GUEST_HANDLE_64(void) pullcache; -+ uint32_t pullcache_size; -+}; -+ -+ -+#define ACMOP_getssid 4 -+#define ACM_GETBY_ssidref 1 -+#define ACM_GETBY_domainid 2 -+struct acm_getssid { -+ /* IN */ -+ uint32_t get_ssid_by; /* ACM_GETBY_* */ -+ union { -+ domaintype_t domainid; -+ ssidref_t ssidref; -+ } id; -+ XEN_GUEST_HANDLE_64(void) ssidbuf; -+ uint32_t ssidbuf_size; -+}; -+ -+#define ACMOP_getdecision 5 -+struct acm_getdecision { -+ /* IN */ -+ uint32_t get_decision_by1; /* ACM_GETBY_* */ -+ uint32_t get_decision_by2; /* ACM_GETBY_* */ -+ union { -+ domaintype_t domainid; -+ ssidref_t ssidref; -+ } id1; -+ union { -+ domaintype_t domainid; -+ ssidref_t ssidref; -+ } id2; -+ uint32_t hook; -+ /* OUT */ -+ uint32_t acm_decision; -+}; -+ -+ -+#define ACMOP_chgpolicy 6 -+struct acm_change_policy { -+ /* IN */ -+ XEN_GUEST_HANDLE_64(void) policy_pushcache; -+ uint32_t policy_pushcache_size; -+ XEN_GUEST_HANDLE_64(void) del_array; -+ uint32_t delarray_size; -+ XEN_GUEST_HANDLE_64(void) chg_array; -+ uint32_t chgarray_size; -+ /* OUT */ -+ /* array with error code */ -+ XEN_GUEST_HANDLE_64(void) err_array; -+ uint32_t errarray_size; -+}; -+ -+#define ACMOP_relabeldoms 7 -+struct acm_relabel_doms { -+ /* IN */ -+ XEN_GUEST_HANDLE_64(void) relabel_map; -+ uint32_t relabel_map_size; -+ /* OUT */ -+ XEN_GUEST_HANDLE_64(void) err_array; -+ uint32_t errarray_size; -+}; -+ -+/* future interface to Xen */ -+struct xen_acmctl { -+ uint32_t cmd; -+ uint32_t interface_version; -+ union { -+ struct acm_setpolicy setpolicy; -+ struct acm_getpolicy getpolicy; -+ struct acm_dumpstats dumpstats; -+ struct acm_getssid getssid; -+ struct acm_getdecision getdecision; -+ struct acm_change_policy change_policy; -+ struct acm_relabel_doms relabel_doms; -+ } u; -+}; -+ -+typedef struct xen_acmctl xen_acmctl_t; -+DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); -+ -+#endif /* __XEN_PUBLIC_ACM_OPS_H__ */ -+ -+/* -+ * Local variables: -+ * mode: C -+ * c-set-style: "BSD" -+ * c-basic-offset: 4 -+ * tab-width: 4 -+ * indent-tabs-mode: nil -+ * End: -+ */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/include/xen/interface/xsm/flask_op.h 2010-01-04 11:56:34.000000000 +0100 -@@ -0,0 +1,47 @@ -+/* -+ * This file contains the flask_op hypercall commands and definitions. -+ * -+ * Author: George Coker, -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2, -+ * as published by the Free Software Foundation. -+ */ -+ -+#ifndef __FLASK_OP_H__ -+#define __FLASK_OP_H__ -+ -+#define FLASK_LOAD 1 -+#define FLASK_GETENFORCE 2 -+#define FLASK_SETENFORCE 3 -+#define FLASK_CONTEXT_TO_SID 4 -+#define FLASK_SID_TO_CONTEXT 5 -+#define FLASK_ACCESS 6 -+#define FLASK_CREATE 7 -+#define FLASK_RELABEL 8 -+#define FLASK_USER 9 -+#define FLASK_POLICYVERS 10 -+#define FLASK_GETBOOL 11 -+#define FLASK_SETBOOL 12 -+#define FLASK_COMMITBOOLS 13 -+#define FLASK_MLS 14 -+#define FLASK_DISABLE 15 -+#define FLASK_GETAVC_THRESHOLD 16 -+#define FLASK_SETAVC_THRESHOLD 17 -+#define FLASK_AVC_HASHSTATS 18 -+#define FLASK_AVC_CACHESTATS 19 -+#define FLASK_MEMBER 20 -+#define FLASK_ADD_OCONTEXT 21 -+#define FLASK_DEL_OCONTEXT 22 -+ -+#define FLASK_LAST FLASK_DEL_OCONTEXT -+ -+typedef struct flask_op { -+ uint32_t cmd; -+ uint32_t size; -+ char *buf; -+} flask_op_t; -+ -+DEFINE_XEN_GUEST_HANDLE(flask_op_t); -+ -+#endif diff --git a/patches.xen/xen3-auto-xen-arch.diff b/patches.xen/xen3-auto-xen-arch.diff deleted file mode 100644 index ae988bd..0000000 --- a/patches.xen/xen3-auto-xen-arch.diff +++ /dev/null @@ -1,44652 +0,0 @@ -Subject: xen3 xen-arch -From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 1073:8fe973d8fb98) -Patch-mainline: n/a -Acked-by: jbeulich@novell.com - -List of files having Xen derivates (perhaps created during the merging -of newer kernel versions), for xen-port-patches.py to pick up (i.e. this -must be retained here until the XenSource tree has these in the right -places): -+++ linux/arch/x86/kernel/acpi/sleep-xen.c -+++ linux/arch/x86/kernel/apic/io_apic-xen.c -+++ linux/arch/x86/kernel/apic/ipi-xen.c -+++ linux/arch/x86/kernel/apic/probe_32-xen.c -+++ linux/arch/x86/kernel/apic-xen.c -+++ linux/arch/x86/kernel/cpu/common_64-xen.c -+++ linux/arch/x86/kernel/e820-xen.c -+++ linux/arch/x86/kernel/head-xen.c -+++ linux/arch/x86/kernel/head32-xen.c -+++ linux/arch/x86/kernel/ioport-xen.c -+++ linux/arch/x86/kernel/io_apic-xen.c -+++ linux/arch/x86/kernel/ipi-xen.c -+++ linux/arch/x86/kernel/irq-xen.c -+++ linux/arch/x86/kernel/irq_work-xen.c -+++ linux/arch/x86/kernel/ldt-xen.c -+++ linux/arch/x86/kernel/microcode_core-xen.c -+++ linux/arch/x86/kernel/mpparse-xen.c -+++ linux/arch/x86/kernel/msr-xen.c -+++ linux/arch/x86/kernel/pci-nommu-xen.c -+++ linux/arch/x86/kernel/process-xen.c -+++ linux/arch/x86/kernel/setup-xen.c -+++ linux/arch/x86/kernel/smp-xen.c -+++ linux/arch/x86/kernel/traps-xen.c -+++ linux/arch/x86/kernel/x86_init-xen.c -+++ linux/arch/x86/lib/cache-smp-xen.c -+++ linux/arch/x86/mm/dump_pagetables-xen.c -+++ linux/arch/x86/mm/fault-xen.c -+++ linux/arch/x86/mm/init-xen.c -+++ linux/arch/x86/mm/iomap_32-xen.c -+++ linux/arch/x86/mm/pageattr-xen.c -+++ linux/arch/x86/mm/pat-xen.c -+++ linux/arch/x86/mm/pgtable-xen.c -+++ linux/arch/x86/vdso/vdso32-setup-xen.c -+++ linux/drivers/char/mem-xen.c -+++ linux/drivers/hwmon/coretemp-xen.c -+++ linux/drivers/hwmon/pkgtemp-xen.c -+++ linux/drivers/hwmon/via-cputemp-xen.c -+++ linux/arch/x86/include/mach-xen/asm/desc.h -+++ linux/arch/x86/include/mach-xen/asm/dma-mapping.h -+++ linux/arch/x86/include/mach-xen/asm/fixmap.h -+++ linux/arch/x86/include/mach-xen/asm/io.h -+++ linux/arch/x86/include/mach-xen/asm/ipi.h -+++ linux/arch/x86/include/mach-xen/asm/irq_vectors.h -+++ linux/arch/x86/include/mach-xen/asm/irqflags.h -+++ linux/arch/x86/include/mach-xen/asm/mmu_context.h -+++ linux/arch/x86/include/mach-xen/asm/pci.h -+++ linux/arch/x86/include/mach-xen/asm/perf_event.h -+++ linux/arch/x86/include/mach-xen/asm/pgalloc.h -+++ linux/arch/x86/include/mach-xen/asm/pgtable.h -+++ linux/arch/x86/include/mach-xen/asm/pgtable-3level_types.h -+++ linux/arch/x86/include/mach-xen/asm/pgtable_64_types.h -+++ linux/arch/x86/include/mach-xen/asm/pgtable_types.h -+++ linux/arch/x86/include/mach-xen/asm/processor.h -+++ linux/arch/x86/include/mach-xen/asm/smp.h -+++ linux/arch/x86/include/mach-xen/asm/spinlock.h -+++ linux/arch/x86/include/mach-xen/asm/spinlock_types.h -+++ linux/arch/x86/include/mach-xen/asm/swiotlb.h -+++ linux/arch/x86/include/mach-xen/asm/system.h -+++ linux/arch/x86/include/mach-xen/asm/tlbflush.h -+++ linux/arch/x86/include/mach-xen/asm/xor.h - -List of files folded into their native counterparts (and hence removed -from this patch for xen-port-patches.py to not needlessly pick them up; -for reference, prefixed with the version the removal occured): -2.6.18/arch/x86/kernel/quirks-xen.c -2.6.18/arch/x86/include/mach-xen/asm/pgtable-2level.h -2.6.18/arch/x86/include/mach-xen/asm/pgtable-2level-defs.h -2.6.19/arch/x86/include/mach-xen/asm/ptrace.h -2.6.23/arch/x86/include/mach-xen/asm/ptrace_64.h -2.6.23/arch/x86/kernel/vsyscall-note_32-xen.S -2.6.24/arch/x86/include/mach-xen/asm/arch_hooks_64.h -2.6.24/arch/x86/include/mach-xen/asm/bootsetup_64.h -2.6.24/arch/x86/include/mach-xen/asm/mmu_32.h -2.6.24/arch/x86/include/mach-xen/asm/mmu_64.h -2.6.24/arch/x86/include/mach-xen/asm/nmi_64.h -2.6.24/arch/x86/include/mach-xen/asm/setup.h -2.6.24/arch/x86/include/mach-xen/asm/time_64.h (added in 2.6.20) -2.6.24/arch/x86/include/mach-xen/mach_timer.h -2.6.24/arch/x86/kernel/early_printk_32-xen.c -2.6.25/arch/x86/ia32/syscall32-xen.c -2.6.25/arch/x86/ia32/syscall32_syscall-xen.S -2.6.25/arch/x86/ia32/vsyscall-int80.S -2.6.25/arch/x86/include/mach-xen/asm/msr.h -2.6.25/arch/x86/include/mach-xen/asm/page_32.h -2.6.25/arch/x86/include/mach-xen/asm/spinlock_32.h -2.6.25/arch/x86/include/mach-xen/asm/timer.h (added in 2.6.24) -2.6.25/arch/x86/include/mach-xen/asm/timer_64.h -2.6.25/arch/x86/include/mach-xen/mach_time.h -2.6.25/arch/x86/kernel/acpi/boot-xen.c -2.6.26/arch/x86/include/mach-xen/asm/dma-mapping_32.h -2.6.26/arch/x86/include/mach-xen/asm/dma-mapping_64.h -2.6.26/arch/x86/include/mach-xen/asm/nmi.h (added in 2.6.24) -2.6.26/arch/x86/include/mach-xen/asm/scatterlist.h (added in 2.6.24) -2.6.26/arch/x86/include/mach-xen/asm/scatterlist_32.h -2.6.26/arch/x86/include/mach-xen/asm/swiotlb_32.h -2.6.26/arch/x86/kernel/pci-dma_32-xen.c -2.6.26/arch/x86/kernel/pci-swiotlb_64-xen.c -2.6.26/include/xen/xencomm.h -2.6.27/arch/x86/include/mach-xen/asm/e820.h (added in 2.6.24) -2.6.27/arch/x86/include/mach-xen/asm/e820_64.h -2.6.27/arch/x86/include/mach-xen/asm/hw_irq.h (added in 2.6.24) -2.6.27/arch/x86/include/mach-xen/asm/hw_irq_32.h -2.6.27/arch/x86/include/mach-xen/asm/hw_irq_64.h -2.6.27/arch/x86/include/mach-xen/asm/io_32.h -2.6.27/arch/x86/include/mach-xen/asm/io_64.h -2.6.27/arch/x86/include/mach-xen/asm/irq.h (added in 2.6.24) -2.6.27/arch/x86/include/mach-xen/asm/irq_64.h -2.6.27/arch/x86/kernel/e820_32-xen.c -2.6.28/arch/x86/include/mach-xen/asm/pci_64.h -2.6.28/arch/x86/include/mach-xen/asm/segment.h (added in 2.6.24) -2.6.28/arch/x86/include/mach-xen/asm/segment_32.h -2.6.30/arch/x86/include/mach-xen/asm/page.h (added in 2.6.24) -2.6.30/arch/x86/include/mach-xen/asm/page_64.h -2.6.30/arch/x86/include/mach-xen/asm/pci_32.h -2.6.30/arch/x86/kernel/apic/apic_xen_64.c -2.6.30/arch/x86/kernel/apic/probe_64-xen.c -2.6.30/arch/x86/kernel/setup_percpu-xen.c (added in 2.6.27) -2.6.31/arch/x86/kernel/init_task-xen.c -2.6.32/arch/x86/include/mach-xen/asm/setup_arch.h -2.6.33/arch/x86/kernel/irq_32-xen.c -2.6.33/arch/x86/kernel/irq_64-xen.c - ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/acpi/processor_extcntl_xen.c 2010-03-22 12:00:53.000000000 +0100 -@@ -0,0 +1,208 @@ -+/* -+ * processor_extcntl_xen.c - interface to notify Xen -+ * -+ * Copyright (C) 2008, Intel corporation -+ * -+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or (at -+ * your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+static int xen_cx_notifier(struct acpi_processor *pr, int action) -+{ -+ int ret, count = 0, i; -+ xen_platform_op_t op = { -+ .cmd = XENPF_set_processor_pminfo, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ .u.set_pminfo.id = pr->acpi_id, -+ .u.set_pminfo.type = XEN_PM_CX, -+ }; -+ struct xen_processor_cx *data, *buf; -+ struct acpi_processor_cx *cx; -+ -+ /* Convert to Xen defined structure and hypercall */ -+ buf = kzalloc(pr->power.count * sizeof(struct xen_processor_cx), -+ GFP_KERNEL); -+ if (!buf) -+ return -ENOMEM; -+ -+ data = buf; -+ for (i = 1; i <= pr->power.count; i++) { -+ cx = &pr->power.states[i]; -+ /* Skip invalid cstate entry */ -+ if (!cx->valid) -+ continue; -+ -+ data->type = cx->type; -+ data->latency = cx->latency; -+ data->power = cx->power; -+ data->reg.space_id = cx->reg.space_id; -+ data->reg.bit_width = cx->reg.bit_width; -+ data->reg.bit_offset = cx->reg.bit_offset; -+ data->reg.access_size = cx->reg.reserved; -+ data->reg.address = cx->reg.address; -+ -+ /* Get dependency relationships */ -+ if (cx->csd_count) { -+ printk("Wow! _CSD is found. Not support for now!\n"); -+ kfree(buf); -+ return -EINVAL; -+ } else { -+ data->dpcnt = 0; -+ set_xen_guest_handle(data->dp, NULL); -+ } -+ -+ data++; -+ count++; -+ } -+ -+ if (!count) { -+ printk("No available Cx info for cpu %d\n", pr->acpi_id); -+ kfree(buf); -+ return -EINVAL; -+ } -+ -+ op.u.set_pminfo.u.power.count = count; -+ op.u.set_pminfo.u.power.flags.bm_control = pr->flags.bm_control; -+ op.u.set_pminfo.u.power.flags.bm_check = pr->flags.bm_check; -+ op.u.set_pminfo.u.power.flags.has_cst = pr->flags.has_cst; -+ op.u.set_pminfo.u.power.flags.power_setup_done = pr->flags.power_setup_done; -+ -+ set_xen_guest_handle(op.u.set_pminfo.u.power.states, buf); -+ ret = HYPERVISOR_platform_op(&op); -+ kfree(buf); -+ return ret; -+} -+ -+static int xen_px_notifier(struct acpi_processor *pr, int action) -+{ -+ int ret = -EINVAL; -+ xen_platform_op_t op = { -+ .cmd = XENPF_set_processor_pminfo, -+ .interface_version = XENPF_INTERFACE_VERSION, -+ .u.set_pminfo.id = pr->acpi_id, -+ .u.set_pminfo.type = XEN_PM_PX, -+ }; -+ struct xen_processor_performance *perf; -+ struct xen_processor_px *states = NULL; -+ struct acpi_processor_performance *px; -+ struct acpi_psd_package *pdomain; -+ -+ if (!pr) -+ return -EINVAL; -+ -+ perf = &op.u.set_pminfo.u.perf; -+ px = pr->performance; -+ if (!px) -+ return -EINVAL; -+ -+ switch(action) { -+ case PROCESSOR_PM_CHANGE: -+ /* ppc dynamic handle */ -+ perf->flags = XEN_PX_PPC; -+ perf->platform_limit = pr->performance_platform_limit; -+ -+ ret = HYPERVISOR_platform_op(&op); -+ break; -+ -+ case PROCESSOR_PM_INIT: -+ /* px normal init */ -+ perf->flags = XEN_PX_PPC | -+ XEN_PX_PCT | -+ XEN_PX_PSS | -+ XEN_PX_PSD; -+ -+ /* ppc */ -+ perf->platform_limit = pr->performance_platform_limit; -+ -+ /* pct */ -+ xen_convert_pct_reg(&perf->control_register, &px->control_register); -+ xen_convert_pct_reg(&perf->status_register, &px->status_register); -+ -+ /* pss */ -+ perf->state_count = px->state_count; -+ states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL); -+ if (!states) -+ return -ENOMEM; -+ xen_convert_pss_states(states, px->states, px->state_count); -+ set_xen_guest_handle(perf->states, states); -+ -+ /* psd */ -+ pdomain = &px->domain_info; -+ xen_convert_psd_pack(&perf->domain_info, pdomain); -+ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) -+ perf->shared_type = CPUFREQ_SHARED_TYPE_ALL; -+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) -+ perf->shared_type = CPUFREQ_SHARED_TYPE_ANY; -+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) -+ perf->shared_type = CPUFREQ_SHARED_TYPE_HW; -+ else { -+ ret = -ENODEV; -+ kfree(states); -+ break; -+ } -+ -+ ret = HYPERVISOR_platform_op(&op); -+ kfree(states); -+ break; -+ -+ default: -+ break; -+ } -+ -+ return ret; -+} -+ -+static int xen_tx_notifier(struct acpi_processor *pr, int action) -+{ -+ return -EINVAL; -+} -+static int xen_hotplug_notifier(struct acpi_processor *pr, int event) -+{ -+ return -EINVAL; -+} -+ -+static struct processor_extcntl_ops xen_extcntl_ops = { -+ .hotplug = xen_hotplug_notifier, -+}; -+ -+void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops) -+{ -+ unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8; -+ -+ if (!pmbits) -+ return; -+ if (pmbits & XEN_PROCESSOR_PM_CX) -+ xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier; -+ if (pmbits & XEN_PROCESSOR_PM_PX) -+ xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier; -+ if (pmbits & XEN_PROCESSOR_PM_TX) -+ xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier; -+ -+ *ops = &xen_extcntl_ops; -+} -+EXPORT_SYMBOL(arch_acpi_processor_init_extcntl); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200 -@@ -0,0 +1,113 @@ -+/* -+ * sleep.c - x86-specific ACPI sleep support. -+ * -+ * Copyright (C) 2001-2003 Patrick Mochel -+ * Copyright (C) 2001-2003 Pavel Machek -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+ -+#ifndef CONFIG_ACPI_PV_SLEEP -+/* address in low memory of the wakeup routine. */ -+unsigned long acpi_wakeup_address = 0; -+unsigned long acpi_video_flags; -+extern char wakeup_start, wakeup_end; -+ -+extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); -+#endif -+ -+/** -+ * acpi_save_state_mem - save kernel state -+ * -+ * Create an identity mapped page table and copy the wakeup routine to -+ * low memory. -+ */ -+int acpi_save_state_mem(void) -+{ -+#ifndef CONFIG_ACPI_PV_SLEEP -+ if (!acpi_wakeup_address) -+ return 1; -+ memcpy((void *)acpi_wakeup_address, &wakeup_start, -+ &wakeup_end - &wakeup_start); -+ acpi_copy_wakeup_routine(acpi_wakeup_address); -+#endif -+ return 0; -+} -+ -+/* -+ * acpi_restore_state - undo effects of acpi_save_state_mem -+ */ -+void acpi_restore_state_mem(void) -+{ -+} -+ -+/** -+ * acpi_reserve_bootmem - do _very_ early ACPI initialisation -+ * -+ * We allocate a page from the first 1MB of memory for the wakeup -+ * routine for when we come back from a sleep state. The -+ * runtime allocator allows specification of <16MB pages, but not -+ * <1MB pages. -+ */ -+void __init acpi_reserve_bootmem(void) -+{ -+#ifndef CONFIG_ACPI_PV_SLEEP -+ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) { -+ printk(KERN_ERR -+ "ACPI: Wakeup code way too big, S3 disabled.\n"); -+ return; -+ } -+ -+ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); -+ if (!acpi_wakeup_address) -+ printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); -+#endif -+} -+ -+#ifndef CONFIG_ACPI_PV_SLEEP -+static int __init acpi_sleep_setup(char *str) -+{ -+ while ((str != NULL) && (*str != '\0')) { -+ if (strncmp(str, "s3_bios", 7) == 0) -+ acpi_video_flags = 1; -+ if (strncmp(str, "s3_mode", 7) == 0) -+ acpi_video_flags |= 2; -+ str = strchr(str, ','); -+ if (str != NULL) -+ str += strspn(str, ", \t"); -+ } -+ return 1; -+} -+ -+__setup("acpi_sleep=", acpi_sleep_setup); -+ -+static __init int reset_videomode_after_s3(struct dmi_system_id *d) -+{ -+ acpi_video_flags |= 2; -+ return 0; -+} -+ -+static __initdata struct dmi_system_id acpisleep_dmi_table[] = { -+ { /* Reset video mode after returning from ACPI S3 sleep */ -+ .callback = reset_videomode_after_s3, -+ .ident = "Toshiba Satellite 4030cdt", -+ .matches = { -+ DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), -+ }, -+ }, -+ {} -+}; -+ -+static int __init acpisleep_dmi_init(void) -+{ -+ dmi_check_system(acpisleep_dmi_table); -+ return 0; -+} -+ -+core_initcall(acpisleep_dmi_init); -+#endif /* CONFIG_ACPI_PV_SLEEP */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/apic/apic-xen.c 2007-06-12 13:12:48.000000000 +0200 -@@ -0,0 +1,155 @@ -+/* -+ * Local APIC handling, local APIC timers -+ * -+ * (c) 1999, 2000 Ingo Molnar -+ * -+ * Fixes -+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs; -+ * thanks to Eric Gilmore -+ * and Rolf G. Tews -+ * for testing these extensively. -+ * Maciej W. Rozycki : Various updates and fixes. -+ * Mikael Pettersson : Power Management for UP-APIC. -+ * Pavel Machek and -+ * Mikael Pettersson : PM converted to driver model. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "io_ports.h" -+ -+#ifndef CONFIG_XEN -+/* -+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as -+ * IPIs in place of local APIC timers -+ */ -+static cpumask_t timer_bcast_ipi; -+#endif -+ -+/* -+ * Knob to control our willingness to enable the local APIC. -+ */ -+int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */ -+ -+/* -+ * Debug level -+ */ -+int apic_verbosity; -+ -+#ifndef CONFIG_XEN -+static int modern_apic(void) -+{ -+ unsigned int lvr, version; -+ /* AMD systems use old APIC versions, so check the CPU */ -+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && -+ boot_cpu_data.x86 >= 0xf) -+ return 1; -+ lvr = apic_read(APIC_LVR); -+ version = GET_APIC_VERSION(lvr); -+ return version >= 0x14; -+} -+#endif /* !CONFIG_XEN */ -+ -+/* -+ * 'what should we do if we get a hw irq event on an illegal vector'. -+ * each architecture has to answer this themselves. -+ */ -+void ack_bad_irq(unsigned int irq) -+{ -+ printk("unexpected IRQ trap at vector %02x\n", irq); -+ /* -+ * Currently unexpected vectors happen only on SMP and APIC. -+ * We _must_ ack these because every local APIC has only N -+ * irq slots per priority level, and a 'hanging, unacked' IRQ -+ * holds up an irq slot - in excessive cases (when multiple -+ * unexpected vectors occur) that might lock up the APIC -+ * completely. -+ * But only ack when the APIC is enabled -AK -+ */ -+ if (cpu_has_apic) -+ ack_APIC_irq(); -+} -+ -+int get_physical_broadcast(void) -+{ -+ return 0xff; -+} -+ -+#ifndef CONFIG_XEN -+#ifndef CONFIG_SMP -+static void up_apic_timer_interrupt_call(struct pt_regs *regs) -+{ -+ int cpu = smp_processor_id(); -+ -+ /* -+ * the NMI deadlock-detector uses this. -+ */ -+ per_cpu(irq_stat, cpu).apic_timer_irqs++; -+ -+ smp_local_timer_interrupt(regs); -+} -+#endif -+ -+void smp_send_timer_broadcast_ipi(struct pt_regs *regs) -+{ -+ cpumask_t mask; -+ -+ cpus_and(mask, cpu_online_map, timer_bcast_ipi); -+ if (!cpus_empty(mask)) { -+#ifdef CONFIG_SMP -+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR); -+#else -+ /* -+ * We can directly call the apic timer interrupt handler -+ * in UP case. Minus all irq related functions -+ */ -+ up_apic_timer_interrupt_call(regs); -+#endif -+ } -+} -+#endif -+ -+int setup_profiling_timer(unsigned int multiplier) -+{ -+ return -EINVAL; -+} -+ -+/* -+ * This initializes the IO-APIC and APIC hardware if this is -+ * a UP kernel. -+ */ -+int __init APIC_init_uniprocessor (void) -+{ -+#ifdef CONFIG_X86_IO_APIC -+ if (smp_found_config) -+ if (!skip_ioapic_setup && nr_ioapics) -+ setup_IO_APIC(); -+#endif -+ -+ return 0; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/cpu/common-xen.c 2009-05-19 09:16:41.000000000 +0200 -@@ -0,0 +1,745 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#ifdef CONFIG_X86_LOCAL_APIC -+#include -+#include -+#include -+#else -+#ifdef CONFIG_XEN -+#define phys_pkg_id(a,b) a -+#endif -+#endif -+#include -+ -+#include "cpu.h" -+ -+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); -+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); -+ -+#ifndef CONFIG_XEN -+DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); -+EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); -+#endif -+ -+static int cachesize_override __cpuinitdata = -1; -+static int disable_x86_fxsr __cpuinitdata; -+static int disable_x86_serial_nr __cpuinitdata = 1; -+static int disable_x86_sep __cpuinitdata; -+ -+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; -+ -+extern int disable_pse; -+ -+static void default_init(struct cpuinfo_x86 * c) -+{ -+ /* Not much we can do here... */ -+ /* Check if at least it has cpuid */ -+ if (c->cpuid_level == -1) { -+ /* No cpuid. It must be an ancient CPU */ -+ if (c->x86 == 4) -+ strcpy(c->x86_model_id, "486"); -+ else if (c->x86 == 3) -+ strcpy(c->x86_model_id, "386"); -+ } -+} -+ -+static struct cpu_dev default_cpu = { -+ .c_init = default_init, -+ .c_vendor = "Unknown", -+}; -+static struct cpu_dev * this_cpu = &default_cpu; -+ -+static int __init cachesize_setup(char *str) -+{ -+ get_option (&str, &cachesize_override); -+ return 1; -+} -+__setup("cachesize=", cachesize_setup); -+ -+int __cpuinit get_model_name(struct cpuinfo_x86 *c) -+{ -+ unsigned int *v; -+ char *p, *q; -+ -+ if (cpuid_eax(0x80000000) < 0x80000004) -+ return 0; -+ -+ v = (unsigned int *) c->x86_model_id; -+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); -+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); -+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); -+ c->x86_model_id[48] = 0; -+ -+ /* Intel chips right-justify this string for some dumb reason; -+ undo that brain damage */ -+ p = q = &c->x86_model_id[0]; -+ while ( *p == ' ' ) -+ p++; -+ if ( p != q ) { -+ while ( *p ) -+ *q++ = *p++; -+ while ( q <= &c->x86_model_id[48] ) -+ *q++ = '\0'; /* Zero-pad the rest */ -+ } -+ -+ return 1; -+} -+ -+ -+void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) -+{ -+ unsigned int n, dummy, ecx, edx, l2size; -+ -+ n = cpuid_eax(0x80000000); -+ -+ if (n >= 0x80000005) { -+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); -+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", -+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); -+ c->x86_cache_size=(ecx>>24)+(edx>>24); -+ } -+ -+ if (n < 0x80000006) /* Some chips just has a large L1. */ -+ return; -+ -+ ecx = cpuid_ecx(0x80000006); -+ l2size = ecx >> 16; -+ -+ /* do processor-specific cache resizing */ -+ if (this_cpu->c_size_cache) -+ l2size = this_cpu->c_size_cache(c,l2size); -+ -+ /* Allow user to override all this if necessary. */ -+ if (cachesize_override != -1) -+ l2size = cachesize_override; -+ -+ if ( l2size == 0 ) -+ return; /* Again, no L2 cache is possible */ -+ -+ c->x86_cache_size = l2size; -+ -+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", -+ l2size, ecx & 0xFF); -+} -+ -+/* Naming convention should be: [()] */ -+/* This table only is used unless init_() below doesn't set it; */ -+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ -+ -+/* Look up CPU names by table lookup. */ -+static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) -+{ -+ struct cpu_model_info *info; -+ -+ if ( c->x86_model >= 16 ) -+ return NULL; /* Range check */ -+ -+ if (!this_cpu) -+ return NULL; -+ -+ info = this_cpu->c_models; -+ -+ while (info && info->family) { -+ if (info->family == c->x86) -+ return info->model_names[c->x86_model]; -+ info++; -+ } -+ return NULL; /* Not found */ -+} -+ -+ -+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) -+{ -+ char *v = c->x86_vendor_id; -+ int i; -+ static int printed; -+ -+ for (i = 0; i < X86_VENDOR_NUM; i++) { -+ if (cpu_devs[i]) { -+ if (!strcmp(v,cpu_devs[i]->c_ident[0]) || -+ (cpu_devs[i]->c_ident[1] && -+ !strcmp(v,cpu_devs[i]->c_ident[1]))) { -+ c->x86_vendor = i; -+ if (!early) -+ this_cpu = cpu_devs[i]; -+ return; -+ } -+ } -+ } -+ if (!printed) { -+ printed++; -+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); -+ printk(KERN_ERR "CPU: Your system may be unstable.\n"); -+ } -+ c->x86_vendor = X86_VENDOR_UNKNOWN; -+ this_cpu = &default_cpu; -+} -+ -+ -+static int __init x86_fxsr_setup(char * s) -+{ -+ disable_x86_fxsr = 1; -+ return 1; -+} -+__setup("nofxsr", x86_fxsr_setup); -+ -+ -+static int __init x86_sep_setup(char * s) -+{ -+ disable_x86_sep = 1; -+ return 1; -+} -+__setup("nosep", x86_sep_setup); -+ -+ -+/* Standard macro to see if a specific flag is changeable */ -+static inline int flag_is_changeable_p(u32 flag) -+{ -+ u32 f1, f2; -+ -+ asm("pushfl\n\t" -+ "pushfl\n\t" -+ "popl %0\n\t" -+ "movl %0,%1\n\t" -+ "xorl %2,%0\n\t" -+ "pushl %0\n\t" -+ "popfl\n\t" -+ "pushfl\n\t" -+ "popl %0\n\t" -+ "popfl\n\t" -+ : "=&r" (f1), "=&r" (f2) -+ : "ir" (flag)); -+ -+ return ((f1^f2) & flag) != 0; -+} -+ -+ -+/* Probe for the CPUID instruction */ -+static int __cpuinit have_cpuid_p(void) -+{ -+ return flag_is_changeable_p(X86_EFLAGS_ID); -+} -+ -+/* Do minimum CPU detection early. -+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. -+ The others are not touched to avoid unwanted side effects. -+ -+ WARNING: this function is only called on the BP. Don't add code here -+ that is supposed to run on all CPUs. */ -+static void __init early_cpu_detect(void) -+{ -+ struct cpuinfo_x86 *c = &boot_cpu_data; -+ -+ c->x86_cache_alignment = 32; -+ -+ if (!have_cpuid_p()) -+ return; -+ -+ /* Get vendor name */ -+ cpuid(0x00000000, &c->cpuid_level, -+ (int *)&c->x86_vendor_id[0], -+ (int *)&c->x86_vendor_id[8], -+ (int *)&c->x86_vendor_id[4]); -+ -+ get_cpu_vendor(c, 1); -+ -+ c->x86 = 4; -+ if (c->cpuid_level >= 0x00000001) { -+ u32 junk, tfms, cap0, misc; -+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0); -+ c->x86 = (tfms >> 8) & 15; -+ c->x86_model = (tfms >> 4) & 15; -+ if (c->x86 == 0xf) -+ c->x86 += (tfms >> 20) & 0xff; -+ if (c->x86 >= 0x6) -+ c->x86_model += ((tfms >> 16) & 0xF) << 4; -+ c->x86_mask = tfms & 15; -+ if (cap0 & (1<<19)) -+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; -+ } -+} -+ -+void __cpuinit generic_identify(struct cpuinfo_x86 * c) -+{ -+ u32 tfms, xlvl; -+ int ebx; -+ -+ if (have_cpuid_p()) { -+ /* Get vendor name */ -+ cpuid(0x00000000, &c->cpuid_level, -+ (int *)&c->x86_vendor_id[0], -+ (int *)&c->x86_vendor_id[8], -+ (int *)&c->x86_vendor_id[4]); -+ -+ get_cpu_vendor(c, 0); -+ /* Initialize the standard set of capabilities */ -+ /* Note that the vendor-specific code below might override */ -+ -+ /* Intel-defined flags: level 0x00000001 */ -+ if ( c->cpuid_level >= 0x00000001 ) { -+ u32 capability, excap; -+ cpuid(0x00000001, &tfms, &ebx, &excap, &capability); -+ c->x86_capability[0] = capability; -+ c->x86_capability[4] = excap; -+ c->x86 = (tfms >> 8) & 15; -+ c->x86_model = (tfms >> 4) & 15; -+ if (c->x86 == 0xf) -+ c->x86 += (tfms >> 20) & 0xff; -+ if (c->x86 >= 0x6) -+ c->x86_model += ((tfms >> 16) & 0xF) << 4; -+ c->x86_mask = tfms & 15; -+#ifndef CONFIG_XEN -+#ifdef CONFIG_X86_HT -+ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); -+#else -+ c->apicid = (ebx >> 24) & 0xFF; -+#endif -+#endif -+ } else { -+ /* Have CPUID level 0 only - unheard of */ -+ c->x86 = 4; -+ } -+ -+ /* AMD-defined flags: level 0x80000001 */ -+ xlvl = cpuid_eax(0x80000000); -+ if ( (xlvl & 0xffff0000) == 0x80000000 ) { -+ if ( xlvl >= 0x80000001 ) { -+ c->x86_capability[1] = cpuid_edx(0x80000001); -+ c->x86_capability[6] = cpuid_ecx(0x80000001); -+ } -+ if ( xlvl >= 0x80000004 ) -+ get_model_name(c); /* Default name */ -+ } -+ } -+ -+ early_intel_workaround(c); -+ -+#ifdef CONFIG_X86_HT -+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; -+#endif -+} -+ -+static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) -+{ -+ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { -+ /* Disable processor serial number */ -+ unsigned long lo,hi; -+ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); -+ lo |= 0x200000; -+ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); -+ printk(KERN_NOTICE "CPU serial number disabled.\n"); -+ clear_bit(X86_FEATURE_PN, c->x86_capability); -+ -+ /* Disabling the serial number may affect the cpuid level */ -+ c->cpuid_level = cpuid_eax(0); -+ } -+} -+ -+static int __init x86_serial_nr_setup(char *s) -+{ -+ disable_x86_serial_nr = 0; -+ return 1; -+} -+__setup("serialnumber", x86_serial_nr_setup); -+ -+ -+ -+/* -+ * This does the hard work of actually picking apart the CPU stuff... -+ */ -+void __cpuinit identify_cpu(struct cpuinfo_x86 *c) -+{ -+ int i; -+ -+ c->loops_per_jiffy = loops_per_jiffy; -+ c->x86_cache_size = -1; -+ c->x86_vendor = X86_VENDOR_UNKNOWN; -+ c->cpuid_level = -1; /* CPUID not detected */ -+ c->x86_model = c->x86_mask = 0; /* So far unknown... */ -+ c->x86_vendor_id[0] = '\0'; /* Unset */ -+ c->x86_model_id[0] = '\0'; /* Unset */ -+ c->x86_max_cores = 1; -+ memset(&c->x86_capability, 0, sizeof c->x86_capability); -+ -+ if (!have_cpuid_p()) { -+ /* First of all, decide if this is a 486 or higher */ -+ /* It's a 486 if we can modify the AC flag */ -+ if ( flag_is_changeable_p(X86_EFLAGS_AC) ) -+ c->x86 = 4; -+ else -+ c->x86 = 3; -+ } -+ -+ generic_identify(c); -+ -+ printk(KERN_DEBUG "CPU: After generic identify, caps:"); -+ for (i = 0; i < NCAPINTS; i++) -+ printk(" %08lx", c->x86_capability[i]); -+ printk("\n"); -+ -+ if (this_cpu->c_identify) { -+ this_cpu->c_identify(c); -+ -+ printk(KERN_DEBUG "CPU: After vendor identify, caps:"); -+ for (i = 0; i < NCAPINTS; i++) -+ printk(" %08lx", c->x86_capability[i]); -+ printk("\n"); -+ } -+ -+ /* -+ * Vendor-specific initialization. In this section we -+ * canonicalize the feature flags, meaning if there are -+ * features a certain CPU supports which CPUID doesn't -+ * tell us, CPUID claiming incorrect flags, or other bugs, -+ * we handle them here. -+ * -+ * At the end of this section, c->x86_capability better -+ * indicate the features this CPU genuinely supports! -+ */ -+ if (this_cpu->c_init) -+ this_cpu->c_init(c); -+ -+ /* Disable the PN if appropriate */ -+ squash_the_stupid_serial_number(c); -+ -+ /* -+ * The vendor-specific functions might have changed features. Now -+ * we do "generic changes." -+ */ -+ -+ /* TSC disabled? */ -+ if ( tsc_disable ) -+ clear_bit(X86_FEATURE_TSC, c->x86_capability); -+ -+ /* FXSR disabled? */ -+ if (disable_x86_fxsr) { -+ clear_bit(X86_FEATURE_FXSR, c->x86_capability); -+ clear_bit(X86_FEATURE_XMM, c->x86_capability); -+ } -+ -+ /* SEP disabled? */ -+ if (disable_x86_sep) -+ clear_bit(X86_FEATURE_SEP, c->x86_capability); -+ -+ if (disable_pse) -+ clear_bit(X86_FEATURE_PSE, c->x86_capability); -+ -+ /* If the model name is still unset, do table lookup. */ -+ if ( !c->x86_model_id[0] ) { -+ char *p; -+ p = table_lookup_model(c); -+ if ( p ) -+ strcpy(c->x86_model_id, p); -+ else -+ /* Last resort... */ -+ sprintf(c->x86_model_id, "%02x/%02x", -+ c->x86, c->x86_model); -+ } -+ -+ /* Now the feature flags better reflect actual CPU features! */ -+ -+ printk(KERN_DEBUG "CPU: After all inits, caps:"); -+ for (i = 0; i < NCAPINTS; i++) -+ printk(" %08lx", c->x86_capability[i]); -+ printk("\n"); -+ -+ /* -+ * On SMP, boot_cpu_data holds the common feature set between -+ * all CPUs; so make sure that we indicate which features are -+ * common between the CPUs. The first time this routine gets -+ * executed, c == &boot_cpu_data. -+ */ -+ if ( c != &boot_cpu_data ) { -+ /* AND the already accumulated flags with these */ -+ for ( i = 0 ; i < NCAPINTS ; i++ ) -+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; -+ } -+ -+ /* Init Machine Check Exception if available. */ -+ mcheck_init(c); -+ -+ if (c == &boot_cpu_data) -+ sysenter_setup(); -+ enable_sep_cpu(); -+ -+ if (c == &boot_cpu_data) -+ mtrr_bp_init(); -+ else -+ mtrr_ap_init(); -+} -+ -+#ifdef CONFIG_X86_HT -+void __cpuinit detect_ht(struct cpuinfo_x86 *c) -+{ -+ u32 eax, ebx, ecx, edx; -+ int index_msb, core_bits; -+ -+ cpuid(1, &eax, &ebx, &ecx, &edx); -+ -+ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) -+ return; -+ -+ smp_num_siblings = (ebx & 0xff0000) >> 16; -+ -+ if (smp_num_siblings == 1) { -+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); -+ } else if (smp_num_siblings > 1 ) { -+ -+ if (smp_num_siblings > NR_CPUS) { -+ printk(KERN_WARNING "CPU: Unsupported number of the " -+ "siblings %d", smp_num_siblings); -+ smp_num_siblings = 1; -+ return; -+ } -+ -+ index_msb = get_count_order(smp_num_siblings); -+ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); -+ -+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", -+ c->phys_proc_id); -+ -+ smp_num_siblings = smp_num_siblings / c->x86_max_cores; -+ -+ index_msb = get_count_order(smp_num_siblings) ; -+ -+ core_bits = get_count_order(c->x86_max_cores); -+ -+ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & -+ ((1 << core_bits) - 1); -+ -+ if (c->x86_max_cores > 1) -+ printk(KERN_INFO "CPU: Processor Core ID: %d\n", -+ c->cpu_core_id); -+ } -+} -+#endif -+ -+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) -+{ -+ char *vendor = NULL; -+ -+ if (c->x86_vendor < X86_VENDOR_NUM) -+ vendor = this_cpu->c_vendor; -+ else if (c->cpuid_level >= 0) -+ vendor = c->x86_vendor_id; -+ -+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) -+ printk("%s ", vendor); -+ -+ if (!c->x86_model_id[0]) -+ printk("%d86", c->x86); -+ else -+ printk("%s", c->x86_model_id); -+ -+ if (c->x86_mask || c->cpuid_level >= 0) -+ printk(" stepping %02x\n", c->x86_mask); -+ else -+ printk("\n"); -+} -+ -+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; -+ -+/* This is hacky. :) -+ * We're emulating future behavior. -+ * In the future, the cpu-specific init functions will be called implicitly -+ * via the magic of initcalls. -+ * They will insert themselves into the cpu_devs structure. -+ * Then, when cpu_init() is called, we can just iterate over that array. -+ */ -+ -+extern int intel_cpu_init(void); -+extern int cyrix_init_cpu(void); -+extern int nsc_init_cpu(void); -+extern int amd_init_cpu(void); -+extern int centaur_init_cpu(void); -+extern int transmeta_init_cpu(void); -+extern int rise_init_cpu(void); -+extern int nexgen_init_cpu(void); -+extern int umc_init_cpu(void); -+ -+void __init early_cpu_init(void) -+{ -+ intel_cpu_init(); -+ cyrix_init_cpu(); -+ nsc_init_cpu(); -+ amd_init_cpu(); -+ centaur_init_cpu(); -+ transmeta_init_cpu(); -+ rise_init_cpu(); -+ nexgen_init_cpu(); -+ umc_init_cpu(); -+ early_cpu_detect(); -+ -+#ifdef CONFIG_DEBUG_PAGEALLOC -+ /* pse is not compatible with on-the-fly unmapping, -+ * disable it even if the cpus claim to support it. -+ */ -+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); -+ disable_pse = 1; -+#endif -+} -+ -+static void __cpuinit cpu_gdt_init(const struct Xgt_desc_struct *gdt_descr) -+{ -+ unsigned long frames[16]; -+ unsigned long va; -+ int f; -+ -+ for (va = gdt_descr->address, f = 0; -+ va < gdt_descr->address + gdt_descr->size; -+ va += PAGE_SIZE, f++) { -+ frames[f] = virt_to_mfn(va); -+ make_lowmem_page_readonly( -+ (void *)va, XENFEAT_writable_descriptor_tables); -+ } -+ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) / 8)) -+ BUG(); -+} -+ -+/* -+ * cpu_init() initializes state that is per-CPU. Some data is already -+ * initialized (naturally) in the bootstrap process, such as the GDT -+ * and IDT. We reload them nevertheless, this function acts as a -+ * 'CPU state barrier', nothing should get across. -+ */ -+void __cpuinit cpu_init(void) -+{ -+ int cpu = smp_processor_id(); -+#ifndef CONFIG_X86_NO_TSS -+ struct tss_struct * t = &per_cpu(init_tss, cpu); -+#endif -+ struct thread_struct *thread = ¤t->thread; -+ struct desc_struct *gdt; -+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); -+ -+ if (cpu_test_and_set(cpu, cpu_initialized)) { -+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); -+ for (;;) local_irq_enable(); -+ } -+ printk(KERN_INFO "Initializing CPU#%d\n", cpu); -+ -+ if (cpu_has_vme || cpu_has_de) -+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); -+ if (tsc_disable && cpu_has_tsc) { -+ printk(KERN_NOTICE "Disabling TSC...\n"); -+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ -+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); -+ set_in_cr4(X86_CR4_TSD); -+ } -+ -+#ifndef CONFIG_XEN -+ /* The CPU hotplug case */ -+ if (cpu_gdt_descr->address) { -+ gdt = (struct desc_struct *)cpu_gdt_descr->address; -+ memset(gdt, 0, PAGE_SIZE); -+ goto old_gdt; -+ } -+ /* -+ * This is a horrible hack to allocate the GDT. The problem -+ * is that cpu_init() is called really early for the boot CPU -+ * (and hence needs bootmem) but much later for the secondary -+ * CPUs, when bootmem will have gone away -+ */ -+ if (NODE_DATA(0)->bdata->node_bootmem_map) { -+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); -+ /* alloc_bootmem_pages panics on failure, so no check */ -+ memset(gdt, 0, PAGE_SIZE); -+ } else { -+ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); -+ if (unlikely(!gdt)) { -+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); -+ for (;;) -+ local_irq_enable(); -+ } -+ } -+old_gdt: -+ /* -+ * Initialize the per-CPU GDT with the boot GDT, -+ * and set up the GDT descriptor: -+ */ -+ memcpy(gdt, cpu_gdt_table, GDT_SIZE); -+ -+ /* Set up GDT entry for 16bit stack */ -+ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |= -+ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | -+ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | -+ (CPU_16BIT_STACK_SIZE - 1); -+ -+ cpu_gdt_descr->size = GDT_SIZE - 1; -+ cpu_gdt_descr->address = (unsigned long)gdt; -+#else -+ if (cpu == 0 && cpu_gdt_descr->address == 0) { -+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); -+ /* alloc_bootmem_pages panics on failure, so no check */ -+ memset(gdt, 0, PAGE_SIZE); -+ -+ memcpy(gdt, cpu_gdt_table, GDT_SIZE); -+ -+ cpu_gdt_descr->size = GDT_SIZE; -+ cpu_gdt_descr->address = (unsigned long)gdt; -+ } -+#endif -+ -+ cpu_gdt_init(cpu_gdt_descr); -+ -+ /* -+ * Set up and load the per-CPU TSS and LDT -+ */ -+ atomic_inc(&init_mm.mm_count); -+ current->active_mm = &init_mm; -+ if (current->mm) -+ BUG(); -+ enter_lazy_tlb(&init_mm, current); -+ -+ load_esp0(t, thread); -+ -+ load_LDT(&init_mm.context); -+ -+#ifdef CONFIG_DOUBLEFAULT -+ /* Set up doublefault TSS pointer in the GDT */ -+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); -+#endif -+ -+ /* Clear %fs and %gs. */ -+ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); -+ -+ /* Clear all 6 debug registers: */ -+ set_debugreg(0, 0); -+ set_debugreg(0, 1); -+ set_debugreg(0, 2); -+ set_debugreg(0, 3); -+ set_debugreg(0, 6); -+ set_debugreg(0, 7); -+ -+ /* -+ * Force FPU initialization: -+ */ -+ current_thread_info()->status = 0; -+ clear_used_math(); -+ mxcsr_feature_mask_init(); -+} -+ -+#ifdef CONFIG_HOTPLUG_CPU -+void __cpuinit cpu_uninit(void) -+{ -+ int cpu = raw_smp_processor_id(); -+ cpu_clear(cpu, cpu_initialized); -+ -+ /* lazy TLB state */ -+ per_cpu(cpu_tlbstate, cpu).state = 0; -+ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; -+} -+#endif ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/cpu/mcheck/mce_dom0.c 2009-10-01 11:00:47.000000000 +0200 -@@ -0,0 +1,134 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static int convert_log(struct mc_info *mi) -+{ -+ struct mcinfo_common *mic = NULL; -+ struct mcinfo_global *mc_global; -+ struct mcinfo_bank *mc_bank; -+ struct mce m; -+ -+ x86_mcinfo_lookup(mic, mi, MC_TYPE_GLOBAL); -+ if (mic == NULL) -+ { -+ printk(KERN_ERR "DOM0_MCE_LOG: global data is NULL\n"); -+ return -1; -+ } -+ -+ mc_global = (struct mcinfo_global*)mic; -+ m.mcgstatus = mc_global->mc_gstatus; -+ m.cpu = mc_global->mc_coreid;/*for test*/ -+ x86_mcinfo_lookup(mic, mi, MC_TYPE_BANK); -+ do -+ { -+ if (mic == NULL || mic->size == 0) -+ break; -+ if (mic->type == MC_TYPE_BANK) -+ { -+ mc_bank = (struct mcinfo_bank*)mic; -+ m.misc = mc_bank->mc_misc; -+ m.status = mc_bank->mc_status; -+ m.addr = mc_bank->mc_addr; -+ m.tsc = mc_bank->mc_tsc; -+ m.res1 = mc_bank->mc_ctrl2; -+ m.bank = mc_bank->mc_bank; -+ printk(KERN_DEBUG "[CPU%d, BANK%d, addr %llx, state %llx]\n", -+ m.bank, m.cpu, m.addr, m.status); -+ /*log this record*/ -+ mce_log(&m); -+ } -+ mic = x86_mcinfo_next(mic); -+ }while (1); -+ -+ return 0; -+} -+ -+static struct mc_info *g_mi; -+ -+/*dom0 mce virq handler, logging physical mce error info*/ -+ -+static irqreturn_t mce_dom0_interrupt(int irq, void *dev_id, -+ struct pt_regs *regs) -+{ -+ xen_mc_t mc_op; -+ int result = 0; -+ -+ printk(KERN_DEBUG "MCE_DOM0_LOG: enter dom0 mce vIRQ handler\n"); -+ mc_op.cmd = XEN_MC_fetch; -+ mc_op.interface_version = XEN_MCA_INTERFACE_VERSION; -+ set_xen_guest_handle(mc_op.u.mc_fetch.data, g_mi); -+urgent: -+ mc_op.u.mc_fetch.flags = XEN_MC_URGENT; -+ result = HYPERVISOR_mca(&mc_op); -+ if (result || mc_op.u.mc_fetch.flags & XEN_MC_NODATA || -+ mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED) -+ { -+ printk(KERN_DEBUG "MCE_DOM0_LOG: No more urgent data\n"); -+ goto nonurgent; -+ } -+ else -+ { -+ result = convert_log(g_mi); -+ if (result) { -+ printk(KERN_ERR "MCE_DOM0_LOG: Log conversion failed\n"); -+ goto end; -+ } -+ /* After fetching the telem from DOM0, we need to dec the telem's -+ * refcnt and release the entry. The telem is reserved and inc -+ * refcnt when filling the telem. -+ */ -+ mc_op.u.mc_fetch.flags = XEN_MC_URGENT | XEN_MC_ACK; -+ result = HYPERVISOR_mca(&mc_op); -+ -+ goto urgent; -+ } -+nonurgent: -+ mc_op.u.mc_fetch.flags = XEN_MC_NONURGENT; -+ result = HYPERVISOR_mca(&mc_op); -+ if (result || mc_op.u.mc_fetch.flags & XEN_MC_NODATA || -+ mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED) -+ { -+ printk(KERN_DEBUG "MCE_DOM0_LOG: No more nonurgent data\n"); -+ goto end; -+ } -+ else -+ { -+ result = convert_log(g_mi); -+ if (result) { -+ printk(KERN_ERR "MCE_DOM0_LOG: Log conversion failed\n"); -+ goto end; -+ } -+ /* After fetching the telem from DOM0, we need to dec the telem's -+ * refcnt and release the entry. The telem is reserved and inc -+ * refcnt when filling the telem. -+ */ -+ mc_op.u.mc_fetch.flags = XEN_MC_NONURGENT | XEN_MC_ACK; -+ result = HYPERVISOR_mca(&mc_op); -+ -+ goto nonurgent; -+ } -+end: -+ return IRQ_HANDLED; -+} -+ -+void bind_virq_for_mce(void) -+{ -+ int ret; -+ -+ ret = bind_virq_to_irqhandler(VIRQ_MCA, 0, -+ mce_dom0_interrupt, 0, "mce", NULL); -+ -+ g_mi = kmalloc(sizeof(struct mc_info), GFP_KERNEL); -+ if (ret < 0) -+ printk(KERN_ERR "MCE_DOM0_LOG: bind_virq for DOM0 failed\n"); -+ -+ /* Log the machine checks left over from the previous reset. */ -+ mce_dom0_interrupt(VIRQ_MCA, NULL, NULL); -+} -+ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/cpu/mtrr/main-xen.c 2008-01-28 12:24:18.000000000 +0100 -@@ -0,0 +1,198 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include "mtrr.h" -+ -+static DEFINE_MUTEX(mtrr_mutex); -+ -+void generic_get_mtrr(unsigned int reg, unsigned long *base, -+ unsigned int *size, mtrr_type * type) -+{ -+ struct xen_platform_op op; -+ -+ op.cmd = XENPF_read_memtype; -+ op.u.read_memtype.reg = reg; -+ if (unlikely(HYPERVISOR_platform_op(&op))) -+ memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype)); -+ -+ *size = op.u.read_memtype.nr_mfns; -+ *base = op.u.read_memtype.mfn; -+ *type = op.u.read_memtype.type; -+} -+ -+struct mtrr_ops generic_mtrr_ops = { -+ .use_intel_if = 1, -+ .get = generic_get_mtrr, -+}; -+ -+struct mtrr_ops *mtrr_if = &generic_mtrr_ops; -+unsigned int num_var_ranges; -+unsigned int *usage_table; -+ -+static void __init set_num_var_ranges(void) -+{ -+ struct xen_platform_op op; -+ -+ for (num_var_ranges = 0; ; num_var_ranges++) { -+ op.cmd = XENPF_read_memtype; -+ op.u.read_memtype.reg = num_var_ranges; -+ if (HYPERVISOR_platform_op(&op) != 0) -+ break; -+ } -+} -+ -+static void __init init_table(void) -+{ -+ int i, max; -+ -+ max = num_var_ranges; -+ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL)) -+ == NULL) { -+ printk(KERN_ERR "mtrr: could not allocate\n"); -+ return; -+ } -+ for (i = 0; i < max; i++) -+ usage_table[i] = 0; -+} -+ -+int mtrr_add_page(unsigned long base, unsigned long size, -+ unsigned int type, char increment) -+{ -+ int error; -+ struct xen_platform_op op; -+ -+ mutex_lock(&mtrr_mutex); -+ -+ op.cmd = XENPF_add_memtype; -+ op.u.add_memtype.mfn = base; -+ op.u.add_memtype.nr_mfns = size; -+ op.u.add_memtype.type = type; -+ error = HYPERVISOR_platform_op(&op); -+ if (error) { -+ mutex_unlock(&mtrr_mutex); -+ BUG_ON(error > 0); -+ return error; -+ } -+ -+ if (increment) -+ ++usage_table[op.u.add_memtype.reg]; -+ -+ mutex_unlock(&mtrr_mutex); -+ -+ return op.u.add_memtype.reg; -+} -+ -+static int mtrr_check(unsigned long base, unsigned long size) -+{ -+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { -+ printk(KERN_WARNING -+ "mtrr: size and base must be multiples of 4 kiB\n"); -+ printk(KERN_DEBUG -+ "mtrr: size: 0x%lx base: 0x%lx\n", size, base); -+ dump_stack(); -+ return -1; -+ } -+ return 0; -+} -+ -+int -+mtrr_add(unsigned long base, unsigned long size, unsigned int type, -+ char increment) -+{ -+ if (mtrr_check(base, size)) -+ return -EINVAL; -+ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, -+ increment); -+} -+ -+int mtrr_del_page(int reg, unsigned long base, unsigned long size) -+{ -+ unsigned i; -+ mtrr_type ltype; -+ unsigned long lbase; -+ unsigned int lsize; -+ int error = -EINVAL; -+ struct xen_platform_op op; -+ -+ mutex_lock(&mtrr_mutex); -+ -+ if (reg < 0) { -+ /* Search for existing MTRR */ -+ for (i = 0; i < num_var_ranges; ++i) { -+ mtrr_if->get(i, &lbase, &lsize, <ype); -+ if (lbase == base && lsize == size) { -+ reg = i; -+ break; -+ } -+ } -+ if (reg < 0) { -+ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, -+ size); -+ goto out; -+ } -+ } -+ if (usage_table[reg] < 1) { -+ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); -+ goto out; -+ } -+ if (--usage_table[reg] < 1) { -+ op.cmd = XENPF_del_memtype; -+ op.u.del_memtype.handle = 0; -+ op.u.del_memtype.reg = reg; -+ error = HYPERVISOR_platform_op(&op); -+ if (error) { -+ BUG_ON(error > 0); -+ goto out; -+ } -+ } -+ error = reg; -+ out: -+ mutex_unlock(&mtrr_mutex); -+ return error; -+} -+ -+int -+mtrr_del(int reg, unsigned long base, unsigned long size) -+{ -+ if (mtrr_check(base, size)) -+ return -EINVAL; -+ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); -+} -+ -+EXPORT_SYMBOL(mtrr_add); -+EXPORT_SYMBOL(mtrr_del); -+ -+void __init mtrr_bp_init(void) -+{ -+} -+ -+void mtrr_ap_init(void) -+{ -+} -+ -+static int __init mtrr_init(void) -+{ -+ struct cpuinfo_x86 *c = &boot_cpu_data; -+ -+ if (!is_initial_xendomain()) -+ return -ENODEV; -+ -+ if ((!cpu_has(c, X86_FEATURE_MTRR)) && -+ (!cpu_has(c, X86_FEATURE_K6_MTRR)) && -+ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) && -+ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) -+ return -ENODEV; -+ -+ set_num_var_ranges(); -+ init_table(); -+ -+ return 0; -+} -+ -+subsys_initcall(mtrr_init); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/entry_32-xen.S 2009-05-19 09:16:41.000000000 +0200 -@@ -0,0 +1,1242 @@ -+/* -+ * linux/arch/i386/entry.S -+ * -+ * Copyright (C) 1991, 1992 Linus Torvalds -+ */ -+ -+/* -+ * entry.S contains the system-call and fault low-level handling routines. -+ * This also contains the timer-interrupt handler, as well as all interrupts -+ * and faults that can result in a task-switch. -+ * -+ * NOTE: This code handles signal-recognition, which happens every time -+ * after a timer-interrupt and after each system call. -+ * -+ * I changed all the .align's to 4 (16 byte alignment), as that's faster -+ * on a 486. -+ * -+ * Stack layout in 'ret_from_system_call': -+ * ptrace needs to have all regs on the stack. -+ * if the order here is changed, it needs to be -+ * updated in fork.c:copy_process, signal.c:do_signal, -+ * ptrace.c and ptrace.h -+ * -+ * 0(%esp) - %ebx -+ * 4(%esp) - %ecx -+ * 8(%esp) - %edx -+ * C(%esp) - %esi -+ * 10(%esp) - %edi -+ * 14(%esp) - %ebp -+ * 18(%esp) - %eax -+ * 1C(%esp) - %ds -+ * 20(%esp) - %es -+ * 24(%esp) - orig_eax -+ * 28(%esp) - %eip -+ * 2C(%esp) - %cs -+ * 30(%esp) - %eflags -+ * 34(%esp) - %oldesp -+ * 38(%esp) - %oldss -+ * -+ * "current" is in register %ebx during any slow entries. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "irq_vectors.h" -+#include -+ -+#define nr_syscalls ((syscall_table_size)/4) -+ -+EBX = 0x00 -+ECX = 0x04 -+EDX = 0x08 -+ESI = 0x0C -+EDI = 0x10 -+EBP = 0x14 -+EAX = 0x18 -+DS = 0x1C -+ES = 0x20 -+ORIG_EAX = 0x24 -+EIP = 0x28 -+CS = 0x2C -+EFLAGS = 0x30 -+OLDESP = 0x34 -+OLDSS = 0x38 -+ -+CF_MASK = 0x00000001 -+TF_MASK = 0x00000100 -+IF_MASK = 0x00000200 -+DF_MASK = 0x00000400 -+NT_MASK = 0x00004000 -+VM_MASK = 0x00020000 -+/* Pseudo-eflags. */ -+NMI_MASK = 0x80000000 -+ -+#ifndef CONFIG_XEN -+#define DISABLE_INTERRUPTS cli -+#define ENABLE_INTERRUPTS sti -+#else -+/* Offsets into shared_info_t. */ -+#define evtchn_upcall_pending /* 0 */ -+#define evtchn_upcall_mask 1 -+ -+#define sizeof_vcpu_shift 6 -+ -+#ifdef CONFIG_SMP -+#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \ -+ shl $sizeof_vcpu_shift,%esi ; \ -+ addl HYPERVISOR_shared_info,%esi -+#else -+#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi -+#endif -+ -+#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi) -+#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi) -+#define DISABLE_INTERRUPTS GET_VCPU_INFO ; \ -+ __DISABLE_INTERRUPTS -+#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \ -+ __ENABLE_INTERRUPTS -+#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi) -+#endif -+ -+#ifdef CONFIG_PREEMPT -+#define preempt_stop cli; TRACE_IRQS_OFF -+#else -+#define preempt_stop -+#define resume_kernel restore_nocheck -+#endif -+ -+.macro TRACE_IRQS_IRET -+#ifdef CONFIG_TRACE_IRQFLAGS -+ testl $IF_MASK,EFLAGS(%esp) # interrupts off? -+ jz 1f -+ TRACE_IRQS_ON -+1: -+#endif -+.endm -+ -+#ifdef CONFIG_VM86 -+#define resume_userspace_sig check_userspace -+#else -+#define resume_userspace_sig resume_userspace -+#endif -+ -+#define SAVE_ALL \ -+ cld; \ -+ pushl %es; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ /*CFI_REL_OFFSET es, 0;*/\ -+ pushl %ds; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ /*CFI_REL_OFFSET ds, 0;*/\ -+ pushl %eax; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ CFI_REL_OFFSET eax, 0;\ -+ pushl %ebp; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ CFI_REL_OFFSET ebp, 0;\ -+ pushl %edi; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ CFI_REL_OFFSET edi, 0;\ -+ pushl %esi; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ CFI_REL_OFFSET esi, 0;\ -+ pushl %edx; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ CFI_REL_OFFSET edx, 0;\ -+ pushl %ecx; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ CFI_REL_OFFSET ecx, 0;\ -+ pushl %ebx; \ -+ CFI_ADJUST_CFA_OFFSET 4;\ -+ CFI_REL_OFFSET ebx, 0;\ -+ movl $(__USER_DS), %edx; \ -+ movl %edx, %ds; \ -+ movl %edx, %es; -+ -+#define RESTORE_INT_REGS \ -+ popl %ebx; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ CFI_RESTORE ebx;\ -+ popl %ecx; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ CFI_RESTORE ecx;\ -+ popl %edx; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ CFI_RESTORE edx;\ -+ popl %esi; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ CFI_RESTORE esi;\ -+ popl %edi; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ CFI_RESTORE edi;\ -+ popl %ebp; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ CFI_RESTORE ebp;\ -+ popl %eax; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ CFI_RESTORE eax -+ -+#define RESTORE_REGS \ -+ RESTORE_INT_REGS; \ -+1: popl %ds; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ /*CFI_RESTORE ds;*/\ -+2: popl %es; \ -+ CFI_ADJUST_CFA_OFFSET -4;\ -+ /*CFI_RESTORE es;*/\ -+.section .fixup,"ax"; \ -+3: movl $0,(%esp); \ -+ jmp 1b; \ -+4: movl $0,(%esp); \ -+ jmp 2b; \ -+.previous; \ -+.section __ex_table,"a";\ -+ .align 4; \ -+ .long 1b,3b; \ -+ .long 2b,4b; \ -+.previous -+ -+#define RING0_INT_FRAME \ -+ CFI_STARTPROC simple;\ -+ CFI_DEF_CFA esp, 3*4;\ -+ /*CFI_OFFSET cs, -2*4;*/\ -+ CFI_OFFSET eip, -3*4 -+ -+#define RING0_EC_FRAME \ -+ CFI_STARTPROC simple;\ -+ CFI_DEF_CFA esp, 4*4;\ -+ /*CFI_OFFSET cs, -2*4;*/\ -+ CFI_OFFSET eip, -3*4 -+ -+#define RING0_PTREGS_FRAME \ -+ CFI_STARTPROC simple;\ -+ CFI_DEF_CFA esp, OLDESP-EBX;\ -+ /*CFI_OFFSET cs, CS-OLDESP;*/\ -+ CFI_OFFSET eip, EIP-OLDESP;\ -+ /*CFI_OFFSET es, ES-OLDESP;*/\ -+ /*CFI_OFFSET ds, DS-OLDESP;*/\ -+ CFI_OFFSET eax, EAX-OLDESP;\ -+ CFI_OFFSET ebp, EBP-OLDESP;\ -+ CFI_OFFSET edi, EDI-OLDESP;\ -+ CFI_OFFSET esi, ESI-OLDESP;\ -+ CFI_OFFSET edx, EDX-OLDESP;\ -+ CFI_OFFSET ecx, ECX-OLDESP;\ -+ CFI_OFFSET ebx, EBX-OLDESP -+ -+ENTRY(ret_from_fork) -+ CFI_STARTPROC -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ call schedule_tail -+ GET_THREAD_INFO(%ebp) -+ popl %eax -+ CFI_ADJUST_CFA_OFFSET -4 -+ pushl $0x0202 # Reset kernel eflags -+ CFI_ADJUST_CFA_OFFSET 4 -+ popfl -+ CFI_ADJUST_CFA_OFFSET -4 -+ jmp syscall_exit -+ CFI_ENDPROC -+ -+/* -+ * Return to user mode is not as complex as all this looks, -+ * but we want the default path for a system call return to -+ * go as quickly as possible which is why some of this is -+ * less clear than it otherwise should be. -+ */ -+ -+ # userspace resumption stub bypassing syscall exit tracing -+ ALIGN -+ RING0_PTREGS_FRAME -+ret_from_exception: -+ preempt_stop -+ret_from_intr: -+ GET_THREAD_INFO(%ebp) -+check_userspace: -+ movl EFLAGS(%esp), %eax # mix EFLAGS and CS -+ movb CS(%esp), %al -+ testl $(VM_MASK | 2), %eax -+ jz resume_kernel -+ENTRY(resume_userspace) -+ DISABLE_INTERRUPTS # make sure we don't miss an interrupt -+ # setting need_resched or sigpending -+ # between sampling and the iret -+ movl TI_flags(%ebp), %ecx -+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on -+ # int/exception return? -+ jne work_pending -+ jmp restore_all -+ -+#ifdef CONFIG_PREEMPT -+ENTRY(resume_kernel) -+ cli -+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? -+ jnz restore_nocheck -+need_resched: -+ movl TI_flags(%ebp), %ecx # need_resched set ? -+ testb $_TIF_NEED_RESCHED, %cl -+ jz restore_all -+ testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ? -+ jz restore_all -+ call preempt_schedule_irq -+ jmp need_resched -+#endif -+ CFI_ENDPROC -+ -+/* SYSENTER_RETURN points to after the "sysenter" instruction in -+ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ -+ -+ # sysenter call handler stub -+ENTRY(sysenter_entry) -+ CFI_STARTPROC simple -+ CFI_DEF_CFA esp, 0 -+ CFI_REGISTER esp, ebp -+ movl SYSENTER_stack_esp0(%esp),%esp -+sysenter_past_esp: -+ /* -+ * No need to follow this irqs on/off section: the syscall -+ * disabled irqs and here we enable it straight after entry: -+ */ -+ sti -+ pushl $(__USER_DS) -+ CFI_ADJUST_CFA_OFFSET 4 -+ /*CFI_REL_OFFSET ss, 0*/ -+ pushl %ebp -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET esp, 0 -+ pushfl -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $(__USER_CS) -+ CFI_ADJUST_CFA_OFFSET 4 -+ /*CFI_REL_OFFSET cs, 0*/ -+ /* -+ * Push current_thread_info()->sysenter_return to the stack. -+ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words -+ * pushed above; +8 corresponds to copy_thread's esp0 setting. -+ */ -+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET eip, 0 -+ -+/* -+ * Load the potential sixth argument from user stack. -+ * Careful about security. -+ */ -+ cmpl $__PAGE_OFFSET-3,%ebp -+ jae syscall_fault -+1: movl (%ebp),%ebp -+.section __ex_table,"a" -+ .align 4 -+ .long 1b,syscall_fault -+.previous -+ -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ GET_THREAD_INFO(%ebp) -+ -+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ -+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) -+ jnz syscall_trace_entry -+ cmpl $(nr_syscalls), %eax -+ jae syscall_badsys -+ call *sys_call_table(,%eax,4) -+ movl %eax,EAX(%esp) -+ DISABLE_INTERRUPTS -+ TRACE_IRQS_OFF -+ movl TI_flags(%ebp), %ecx -+ testw $_TIF_ALLWORK_MASK, %cx -+ jne syscall_exit_work -+/* if something modifies registers it must also disable sysexit */ -+ movl EIP(%esp), %edx -+ movl OLDESP(%esp), %ecx -+ xorl %ebp,%ebp -+#ifdef CONFIG_XEN -+ TRACE_IRQS_ON -+ __ENABLE_INTERRUPTS -+sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ -+ __TEST_PENDING -+ jnz 14f # process more events if necessary... -+ movl ESI(%esp), %esi -+ sysexit -+14: __DISABLE_INTERRUPTS -+ TRACE_IRQS_OFF -+sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ -+ push %esp -+ call evtchn_do_upcall -+ add $4,%esp -+ jmp ret_from_intr -+#else -+ TRACE_IRQS_ON -+ sti -+ sysexit -+#endif /* !CONFIG_XEN */ -+ CFI_ENDPROC -+ -+ # pv sysenter call handler stub -+ENTRY(sysenter_entry_pv) -+ RING0_INT_FRAME -+ movl $__USER_DS,16(%esp) -+ movl %ebp,12(%esp) -+ movl $__USER_CS,4(%esp) -+ addl $4,%esp -+ CFI_ADJUST_CFA_OFFSET -4 -+ /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */ -+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) -+ CFI_ADJUST_CFA_OFFSET 4 -+/* -+ * Load the potential sixth argument from user stack. -+ * Careful about security. -+ */ -+ cmpl $__PAGE_OFFSET-3,%ebp -+ jae syscall_fault -+1: movl (%ebp),%ebp -+.section __ex_table,"a" -+ .align 4 -+ .long 1b,syscall_fault -+.previous -+ /* fall through */ -+ CFI_ENDPROC -+ENDPROC(sysenter_entry_pv) -+ -+ # system call handler stub -+ENTRY(system_call) -+ RING0_INT_FRAME # can't unwind into user space anyway -+ pushl %eax # save orig_eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ GET_THREAD_INFO(%ebp) -+ testl $TF_MASK,EFLAGS(%esp) -+ jz no_singlestep -+ orl $_TIF_SINGLESTEP,TI_flags(%ebp) -+no_singlestep: -+ # system call tracing in operation / emulation -+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ -+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) -+ jnz syscall_trace_entry -+ cmpl $(nr_syscalls), %eax -+ jae syscall_badsys -+syscall_call: -+ call *sys_call_table(,%eax,4) -+ movl %eax,EAX(%esp) # store the return value -+syscall_exit: -+ DISABLE_INTERRUPTS # make sure we don't miss an interrupt -+ # setting need_resched or sigpending -+ # between sampling and the iret -+ TRACE_IRQS_OFF -+ movl TI_flags(%ebp), %ecx -+ testw $_TIF_ALLWORK_MASK, %cx # current->work -+ jne syscall_exit_work -+ -+restore_all: -+#ifndef CONFIG_XEN -+ movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS -+ # Warning: OLDSS(%esp) contains the wrong/random values if we -+ # are returning to the kernel. -+ # See comments in process.c:copy_thread() for details. -+ movb OLDSS(%esp), %ah -+ movb CS(%esp), %al -+ andl $(VM_MASK | (4 << 8) | 3), %eax -+ cmpl $((4 << 8) | 3), %eax -+ CFI_REMEMBER_STATE -+ je ldt_ss # returning to user-space with LDT SS -+restore_nocheck: -+#else -+restore_nocheck: -+ movl EFLAGS(%esp), %eax -+ testl $(VM_MASK|NMI_MASK), %eax -+ CFI_REMEMBER_STATE -+ jnz hypervisor_iret -+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF -+ GET_VCPU_INFO -+ andb evtchn_upcall_mask(%esi),%al -+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask -+ CFI_REMEMBER_STATE -+ jnz restore_all_enable_events # != 0 => enable event delivery -+#endif -+ TRACE_IRQS_IRET -+restore_nocheck_notrace: -+ RESTORE_REGS -+ addl $4, %esp -+ CFI_ADJUST_CFA_OFFSET -4 -+1: iret -+.section .fixup,"ax" -+iret_exc: -+#ifndef CONFIG_XEN -+ TRACE_IRQS_ON -+ sti -+#endif -+ pushl $0 # no error code -+ pushl $do_iret_error -+ jmp error_code -+.previous -+.section __ex_table,"a" -+ .align 4 -+ .long 1b,iret_exc -+.previous -+ -+ CFI_RESTORE_STATE -+#ifndef CONFIG_XEN -+ldt_ss: -+ larl OLDSS(%esp), %eax -+ jnz restore_nocheck -+ testl $0x00400000, %eax # returning to 32bit stack? -+ jnz restore_nocheck # allright, normal return -+ /* If returning to userspace with 16bit stack, -+ * try to fix the higher word of ESP, as the CPU -+ * won't restore it. -+ * This is an "official" bug of all the x86-compatible -+ * CPUs, which we can try to work around to make -+ * dosemu and wine happy. */ -+ subl $8, %esp # reserve space for switch16 pointer -+ CFI_ADJUST_CFA_OFFSET 8 -+ cli -+ TRACE_IRQS_OFF -+ movl %esp, %eax -+ /* Set up the 16bit stack frame with switch32 pointer on top, -+ * and a switch16 pointer on top of the current frame. */ -+ call setup_x86_bogus_stack -+ CFI_ADJUST_CFA_OFFSET -8 # frame has moved -+ TRACE_IRQS_IRET -+ RESTORE_REGS -+ lss 20+4(%esp), %esp # switch to 16bit stack -+1: iret -+.section __ex_table,"a" -+ .align 4 -+ .long 1b,iret_exc -+.previous -+#else -+ ALIGN -+restore_all_enable_events: -+ TRACE_IRQS_ON -+ __ENABLE_INTERRUPTS -+scrit: /**** START OF CRITICAL REGION ****/ -+ __TEST_PENDING -+ jnz 14f # process more events if necessary... -+ RESTORE_REGS -+ addl $4, %esp -+ CFI_ADJUST_CFA_OFFSET -4 -+1: iret -+.section __ex_table,"a" -+ .align 4 -+ .long 1b,iret_exc -+.previous -+14: __DISABLE_INTERRUPTS -+ TRACE_IRQS_OFF -+ecrit: /**** END OF CRITICAL REGION ****/ -+ jmp .Ldo_upcall -+ -+ CFI_RESTORE_STATE -+hypervisor_iret: -+ andl $~NMI_MASK, EFLAGS(%esp) -+ RESTORE_REGS -+ addl $4, %esp -+ CFI_ADJUST_CFA_OFFSET -4 -+ jmp hypercall_page + (__HYPERVISOR_iret * 32) -+#endif -+ CFI_ENDPROC -+ -+ # perform work that needs to be done immediately before resumption -+ ALIGN -+ RING0_PTREGS_FRAME # can't unwind into user space anyway -+work_pending: -+ testb $_TIF_NEED_RESCHED, %cl -+ jz work_notifysig -+work_resched: -+ call schedule -+ DISABLE_INTERRUPTS # make sure we don't miss an interrupt -+ # setting need_resched or sigpending -+ # between sampling and the iret -+ TRACE_IRQS_OFF -+ movl TI_flags(%ebp), %ecx -+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other -+ # than syscall tracing? -+ jz restore_all -+ testb $_TIF_NEED_RESCHED, %cl -+ jnz work_resched -+ -+work_notifysig: # deal with pending signals and -+ # notify-resume requests -+ testl $VM_MASK, EFLAGS(%esp) -+ movl %esp, %eax -+ jne work_notifysig_v86 # returning to kernel-space or -+ # vm86-space -+ xorl %edx, %edx -+ call do_notify_resume -+ jmp resume_userspace_sig -+ -+ ALIGN -+work_notifysig_v86: -+#ifdef CONFIG_VM86 -+ pushl %ecx # save ti_flags for do_notify_resume -+ CFI_ADJUST_CFA_OFFSET 4 -+ call save_v86_state # %eax contains pt_regs pointer -+ popl %ecx -+ CFI_ADJUST_CFA_OFFSET -4 -+ movl %eax, %esp -+ xorl %edx, %edx -+ call do_notify_resume -+ jmp resume_userspace_sig -+#endif -+ -+ # perform syscall exit tracing -+ ALIGN -+syscall_trace_entry: -+ movl $-ENOSYS,EAX(%esp) -+ movl %esp, %eax -+ xorl %edx,%edx -+ call do_syscall_trace -+ cmpl $0, %eax -+ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, -+ # so must skip actual syscall -+ movl ORIG_EAX(%esp), %eax -+ cmpl $(nr_syscalls), %eax -+ jnae syscall_call -+ jmp syscall_exit -+ -+ # perform syscall exit tracing -+ ALIGN -+syscall_exit_work: -+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl -+ jz work_pending -+ TRACE_IRQS_ON -+ ENABLE_INTERRUPTS # could let do_syscall_trace() call -+ # schedule() instead -+ movl %esp, %eax -+ movl $1, %edx -+ call do_syscall_trace -+ jmp resume_userspace -+ CFI_ENDPROC -+ -+ RING0_INT_FRAME # can't unwind into user space anyway -+syscall_fault: -+ pushl %eax # save orig_eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ GET_THREAD_INFO(%ebp) -+ movl $-EFAULT,EAX(%esp) -+ jmp resume_userspace -+ -+syscall_badsys: -+ movl $-ENOSYS,EAX(%esp) -+ jmp resume_userspace -+ CFI_ENDPROC -+ -+#ifndef CONFIG_XEN -+#define FIXUP_ESPFIX_STACK \ -+ movl %esp, %eax; \ -+ /* switch to 32bit stack using the pointer on top of 16bit stack */ \ -+ lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \ -+ /* copy data from 16bit stack to 32bit stack */ \ -+ call fixup_x86_bogus_stack; \ -+ /* put ESP to the proper location */ \ -+ movl %eax, %esp; -+#define UNWIND_ESPFIX_STACK \ -+ pushl %eax; \ -+ CFI_ADJUST_CFA_OFFSET 4; \ -+ movl %ss, %eax; \ -+ /* see if on 16bit stack */ \ -+ cmpw $__ESPFIX_SS, %ax; \ -+ je 28f; \ -+27: popl %eax; \ -+ CFI_ADJUST_CFA_OFFSET -4; \ -+.section .fixup,"ax"; \ -+28: movl $__KERNEL_DS, %eax; \ -+ movl %eax, %ds; \ -+ movl %eax, %es; \ -+ /* switch to 32bit stack */ \ -+ FIXUP_ESPFIX_STACK; \ -+ jmp 27b; \ -+.previous -+ -+/* -+ * Build the entry stubs and pointer table with -+ * some assembler magic. -+ */ -+.data -+ENTRY(interrupt) -+.text -+ -+vector=0 -+ENTRY(irq_entries_start) -+ RING0_INT_FRAME -+.rept NR_IRQS -+ ALIGN -+ .if vector -+ CFI_ADJUST_CFA_OFFSET -4 -+ .endif -+1: pushl $~(vector) -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp common_interrupt -+.data -+ .long 1b -+.text -+vector=vector+1 -+.endr -+ -+/* -+ * the CPU automatically disables interrupts when executing an IRQ vector, -+ * so IRQ-flags tracing has to follow that: -+ */ -+ ALIGN -+common_interrupt: -+ SAVE_ALL -+ TRACE_IRQS_OFF -+ movl %esp,%eax -+ call do_IRQ -+ jmp ret_from_intr -+ CFI_ENDPROC -+ -+#define BUILD_INTERRUPT(name, nr) \ -+ENTRY(name) \ -+ RING0_INT_FRAME; \ -+ pushl $~(nr); \ -+ CFI_ADJUST_CFA_OFFSET 4; \ -+ SAVE_ALL; \ -+ TRACE_IRQS_OFF \ -+ movl %esp,%eax; \ -+ call smp_/**/name; \ -+ jmp ret_from_intr; \ -+ CFI_ENDPROC -+ -+/* The include is where all of the SMP etc. interrupts come from */ -+#include "entry_arch.h" -+#else -+#define UNWIND_ESPFIX_STACK -+#endif -+ -+ENTRY(divide_error) -+ RING0_INT_FRAME -+ pushl $0 # no error code -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_divide_error -+ CFI_ADJUST_CFA_OFFSET 4 -+ ALIGN -+error_code: -+ pushl %ds -+ CFI_ADJUST_CFA_OFFSET 4 -+ /*CFI_REL_OFFSET ds, 0*/ -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET eax, 0 -+ xorl %eax, %eax -+ pushl %ebp -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET ebp, 0 -+ pushl %edi -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET edi, 0 -+ pushl %esi -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET esi, 0 -+ pushl %edx -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET edx, 0 -+ decl %eax # eax = -1 -+ pushl %ecx -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET ecx, 0 -+ pushl %ebx -+ CFI_ADJUST_CFA_OFFSET 4 -+ CFI_REL_OFFSET ebx, 0 -+ cld -+ pushl %es -+ CFI_ADJUST_CFA_OFFSET 4 -+ /*CFI_REL_OFFSET es, 0*/ -+ UNWIND_ESPFIX_STACK -+ popl %ecx -+ CFI_ADJUST_CFA_OFFSET -4 -+ /*CFI_REGISTER es, ecx*/ -+ movl ES(%esp), %edi # get the function address -+ movl ORIG_EAX(%esp), %edx # get the error code -+ movl %eax, ORIG_EAX(%esp) -+ movl %ecx, ES(%esp) -+ /*CFI_REL_OFFSET es, ES*/ -+ movl $(__USER_DS), %ecx -+ movl %ecx, %ds -+ movl %ecx, %es -+ movl %esp,%eax # pt_regs pointer -+ call *%edi -+ jmp ret_from_exception -+ CFI_ENDPROC -+ -+#ifdef CONFIG_XEN -+# A note on the "critical region" in our callback handler. -+# We want to avoid stacking callback handlers due to events occurring -+# during handling of the last event. To do this, we keep events disabled -+# until we've done all processing. HOWEVER, we must enable events before -+# popping the stack frame (can't be done atomically) and so it would still -+# be possible to get enough handler activations to overflow the stack. -+# Although unlikely, bugs of that kind are hard to track down, so we'd -+# like to avoid the possibility. -+# So, on entry to the handler we detect whether we interrupted an -+# existing activation in its critical region -- if so, we pop the current -+# activation and restart the handler using the previous one. -+# -+# The sysexit critical region is slightly different. sysexit -+# atomically removes the entire stack frame. If we interrupt in the -+# critical region we know that the entire frame is present and correct -+# so we can simply throw away the new one. -+ENTRY(hypervisor_callback) -+ RING0_INT_FRAME -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ testb $2,CS(%esp) -+ movl EIP(%esp),%eax -+ jnz .Ldo_upcall -+ cmpl $scrit,%eax -+ jb 0f -+ cmpl $ecrit,%eax -+ jb critical_region_fixup -+0: -+#ifdef CONFIG_XEN_SUPERVISOR_MODE_KERNEL -+ cmpl $sysexit_scrit,%eax -+ jb .Ldo_upcall -+ cmpl $sysexit_ecrit,%eax -+ ja .Ldo_upcall -+ addl $OLDESP,%esp # Remove eflags...ebx from stack frame. -+#endif -+.Ldo_upcall: -+ push %esp -+ CFI_ADJUST_CFA_OFFSET 4 -+ call evtchn_do_upcall -+ add $4,%esp -+ CFI_ADJUST_CFA_OFFSET -4 -+ jmp ret_from_intr -+ CFI_ENDPROC -+ -+# [How we do the fixup]. We want to merge the current stack frame with the -+# just-interrupted frame. How we do this depends on where in the critical -+# region the interrupted handler was executing, and so how many saved -+# registers are in each frame. We do this quickly using the lookup table -+# 'critical_fixup_table'. For each byte offset in the critical region, it -+# provides the number of bytes which have already been popped from the -+# interrupted stack frame. -+critical_region_fixup: -+ movsbl critical_fixup_table-scrit(%eax),%ecx # %ecx contains num slots popped -+ testl %ecx,%ecx -+ leal (%esp,%ecx,4),%esi # %esi points at end of src region -+ leal OLDESP(%esp),%edi # %edi points at end of dst region -+ jle 17f # skip loop if nothing to copy -+16: subl $4,%esi # pre-decrementing copy loop -+ subl $4,%edi -+ movl (%esi),%eax -+ movl %eax,(%edi) -+ loop 16b -+17: movl %edi,%esp # final %edi is top of merged stack -+ jmp .Ldo_upcall -+ -+.section .rodata,"a" -+critical_fixup_table: -+ .byte -1,-1,-1 # testb $0xff,(%esi) = __TEST_PENDING -+ .byte -1,-1 # jnz 14f -+ .byte 0 # pop %ebx -+ .byte 1 # pop %ecx -+ .byte 2 # pop %edx -+ .byte 3 # pop %esi -+ .byte 4 # pop %edi -+ .byte 5 # pop %ebp -+ .byte 6 # pop %eax -+ .byte 7 # pop %ds -+ .byte 8 # pop %es -+ .byte 9,9,9 # add $4,%esp -+ .byte 10 # iret -+ .byte -1,-1,-1,-1 # movb $1,1(%esi) = __DISABLE_INTERRUPTS -+.previous -+ -+# Hypervisor uses this for application faults while it executes. -+# We get here for two reasons: -+# 1. Fault while reloading DS, ES, FS or GS -+# 2. Fault while executing IRET -+# Category 1 we fix up by reattempting the load, and zeroing the segment -+# register if the load fails. -+# Category 2 we fix up by jumping to do_iret_error. We cannot use the -+# normal Linux return path in this case because if we use the IRET hypercall -+# to pop the stack frame we end up in an infinite loop of failsafe callbacks. -+# We distinguish between categories by maintaining a status value in EAX. -+ENTRY(failsafe_callback) -+ pushl %eax -+ movl $1,%eax -+1: mov 4(%esp),%ds -+2: mov 8(%esp),%es -+3: mov 12(%esp),%fs -+4: mov 16(%esp),%gs -+ testl %eax,%eax -+ popl %eax -+ jz 5f -+ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET) -+ jmp iret_exc -+5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment) -+ RING0_INT_FRAME -+ pushl $0 -+ SAVE_ALL -+ jmp ret_from_exception -+.section .fixup,"ax"; \ -+6: xorl %eax,%eax; \ -+ movl %eax,4(%esp); \ -+ jmp 1b; \ -+7: xorl %eax,%eax; \ -+ movl %eax,8(%esp); \ -+ jmp 2b; \ -+8: xorl %eax,%eax; \ -+ movl %eax,12(%esp); \ -+ jmp 3b; \ -+9: xorl %eax,%eax; \ -+ movl %eax,16(%esp); \ -+ jmp 4b; \ -+.previous; \ -+.section __ex_table,"a"; \ -+ .align 4; \ -+ .long 1b,6b; \ -+ .long 2b,7b; \ -+ .long 3b,8b; \ -+ .long 4b,9b; \ -+.previous -+#endif -+ CFI_ENDPROC -+ -+ENTRY(coprocessor_error) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_coprocessor_error -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(simd_coprocessor_error) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_simd_coprocessor_error -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(device_not_available) -+ RING0_INT_FRAME -+ pushl $-1 # mark this as an int -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+#ifndef CONFIG_XEN -+ movl %cr0, %eax -+ testl $0x4, %eax # EM (math emulation bit) -+ je device_available_emulate -+ pushl $0 # temporary storage for ORIG_EIP -+ CFI_ADJUST_CFA_OFFSET 4 -+ call math_emulate -+ addl $4, %esp -+ CFI_ADJUST_CFA_OFFSET -4 -+ jmp ret_from_exception -+device_available_emulate: -+#endif -+ preempt_stop -+ call math_state_restore -+ jmp ret_from_exception -+ CFI_ENDPROC -+ -+#ifndef CONFIG_XEN -+/* -+ * Debug traps and NMI can happen at the one SYSENTER instruction -+ * that sets up the real kernel stack. Check here, since we can't -+ * allow the wrong stack to be used. -+ * -+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have -+ * already pushed 3 words if it hits on the sysenter instruction: -+ * eflags, cs and eip. -+ * -+ * We just load the right stack, and push the three (known) values -+ * by hand onto the new stack - while updating the return eip past -+ * the instruction that would have done it for sysenter. -+ */ -+#define FIX_STACK(offset, ok, label) \ -+ cmpw $__KERNEL_CS,4(%esp); \ -+ jne ok; \ -+label: \ -+ movl SYSENTER_stack_esp0+offset(%esp),%esp; \ -+ pushfl; \ -+ pushl $__KERNEL_CS; \ -+ pushl $sysenter_past_esp -+#endif /* CONFIG_XEN */ -+ -+KPROBE_ENTRY(debug) -+ RING0_INT_FRAME -+#ifndef CONFIG_XEN -+ cmpl $sysenter_entry,(%esp) -+ jne debug_stack_correct -+ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) -+debug_stack_correct: -+#endif /* !CONFIG_XEN */ -+ pushl $-1 # mark this as an int -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ xorl %edx,%edx # error code 0 -+ movl %esp,%eax # pt_regs pointer -+ call do_debug -+ jmp ret_from_exception -+ CFI_ENDPROC -+ .previous .text -+#ifndef CONFIG_XEN -+/* -+ * NMI is doubly nasty. It can happen _while_ we're handling -+ * a debug fault, and the debug fault hasn't yet been able to -+ * clear up the stack. So we first check whether we got an -+ * NMI on the sysenter entry path, but after that we need to -+ * check whether we got an NMI on the debug path where the debug -+ * fault happened on the sysenter path. -+ */ -+ENTRY(nmi) -+ RING0_INT_FRAME -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ movl %ss, %eax -+ cmpw $__ESPFIX_SS, %ax -+ popl %eax -+ CFI_ADJUST_CFA_OFFSET -4 -+ je nmi_16bit_stack -+ cmpl $sysenter_entry,(%esp) -+ je nmi_stack_fixup -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ movl %esp,%eax -+ /* Do not access memory above the end of our stack page, -+ * it might not exist. -+ */ -+ andl $(THREAD_SIZE-1),%eax -+ cmpl $(THREAD_SIZE-20),%eax -+ popl %eax -+ CFI_ADJUST_CFA_OFFSET -4 -+ jae nmi_stack_correct -+ cmpl $sysenter_entry,12(%esp) -+ je nmi_debug_stack_check -+nmi_stack_correct: -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ xorl %edx,%edx # zero error code -+ movl %esp,%eax # pt_regs pointer -+ call do_nmi -+ jmp restore_nocheck_notrace -+ CFI_ENDPROC -+ -+nmi_stack_fixup: -+ FIX_STACK(12,nmi_stack_correct, 1) -+ jmp nmi_stack_correct -+nmi_debug_stack_check: -+ cmpw $__KERNEL_CS,16(%esp) -+ jne nmi_stack_correct -+ cmpl $debug,(%esp) -+ jb nmi_stack_correct -+ cmpl $debug_esp_fix_insn,(%esp) -+ ja nmi_stack_correct -+ FIX_STACK(24,nmi_stack_correct, 1) -+ jmp nmi_stack_correct -+ -+nmi_16bit_stack: -+ RING0_INT_FRAME -+ /* create the pointer to lss back */ -+ pushl %ss -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl %esp -+ CFI_ADJUST_CFA_OFFSET 4 -+ movzwl %sp, %esp -+ addw $4, (%esp) -+ /* copy the iret frame of 12 bytes */ -+ .rept 3 -+ pushl 16(%esp) -+ CFI_ADJUST_CFA_OFFSET 4 -+ .endr -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ FIXUP_ESPFIX_STACK # %eax == %esp -+ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved -+ xorl %edx,%edx # zero error code -+ call do_nmi -+ RESTORE_REGS -+ lss 12+4(%esp), %esp # back to 16bit stack -+1: iret -+ CFI_ENDPROC -+.section __ex_table,"a" -+ .align 4 -+ .long 1b,iret_exc -+.previous -+#else -+ENTRY(nmi) -+ RING0_INT_FRAME -+ pushl %eax -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ xorl %edx,%edx # zero error code -+ movl %esp,%eax # pt_regs pointer -+ call do_nmi -+ orl $NMI_MASK, EFLAGS(%esp) -+ jmp restore_all -+ CFI_ENDPROC -+#endif -+ -+KPROBE_ENTRY(int3) -+ RING0_INT_FRAME -+ pushl $-1 # mark this as an int -+ CFI_ADJUST_CFA_OFFSET 4 -+ SAVE_ALL -+ xorl %edx,%edx # zero error code -+ movl %esp,%eax # pt_regs pointer -+ call do_int3 -+ jmp ret_from_exception -+ CFI_ENDPROC -+ .previous .text -+ -+ENTRY(overflow) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_overflow -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(bounds) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_bounds -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(invalid_op) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_invalid_op -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(coprocessor_segment_overrun) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_coprocessor_segment_overrun -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(invalid_TSS) -+ RING0_EC_FRAME -+ pushl $do_invalid_TSS -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(segment_not_present) -+ RING0_EC_FRAME -+ pushl $do_segment_not_present -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+ENTRY(stack_segment) -+ RING0_EC_FRAME -+ pushl $do_stack_segment -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+KPROBE_ENTRY(general_protection) -+ RING0_EC_FRAME -+ pushl $do_general_protection -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ .previous .text -+ -+ENTRY(alignment_check) -+ RING0_EC_FRAME -+ pushl $do_alignment_check -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+KPROBE_ENTRY(page_fault) -+ RING0_EC_FRAME -+ pushl $do_page_fault -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ .previous .text -+ -+#ifdef CONFIG_X86_MCE -+ENTRY(machine_check) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl machine_check_vector -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+#endif -+ -+#ifndef CONFIG_XEN -+ENTRY(spurious_interrupt_bug) -+ RING0_INT_FRAME -+ pushl $0 -+ CFI_ADJUST_CFA_OFFSET 4 -+ pushl $do_spurious_interrupt_bug -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+#endif /* !CONFIG_XEN */ -+ -+#ifdef CONFIG_STACK_UNWIND -+ENTRY(arch_unwind_init_running) -+ CFI_STARTPROC -+ movl 4(%esp), %edx -+ movl (%esp), %ecx -+ leal 4(%esp), %eax -+ movl %ebx, EBX(%edx) -+ xorl %ebx, %ebx -+ movl %ebx, ECX(%edx) -+ movl %ebx, EDX(%edx) -+ movl %esi, ESI(%edx) -+ movl %edi, EDI(%edx) -+ movl %ebp, EBP(%edx) -+ movl %ebx, EAX(%edx) -+ movl $__USER_DS, DS(%edx) -+ movl $__USER_DS, ES(%edx) -+ movl %ebx, ORIG_EAX(%edx) -+ movl %ecx, EIP(%edx) -+ movl 12(%esp), %ecx -+ movl $__KERNEL_CS, CS(%edx) -+ movl %ebx, EFLAGS(%edx) -+ movl %eax, OLDESP(%edx) -+ movl 8(%esp), %eax -+ movl %ecx, 8(%esp) -+ movl EBX(%edx), %ebx -+ movl $__KERNEL_DS, OLDSS(%edx) -+ jmpl *%eax -+ CFI_ENDPROC -+ENDPROC(arch_unwind_init_running) -+#endif -+ -+ENTRY(fixup_4gb_segment) -+ RING0_EC_FRAME -+ pushl $do_fixup_4gb_segment -+ CFI_ADJUST_CFA_OFFSET 4 -+ jmp error_code -+ CFI_ENDPROC -+ -+.section .rodata,"a" -+#include "syscall_table.S" -+ -+syscall_table_size=(.-sys_call_table) ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/fixup.c 2008-01-28 12:24:18.000000000 +0100 -@@ -0,0 +1,88 @@ -+/****************************************************************************** -+ * fixup.c -+ * -+ * Binary-rewriting of certain IA32 instructions, on notification by Xen. -+ * Used to avoid repeated slow emulation of common instructions used by the -+ * user-space TLS (Thread-Local Storage) libraries. -+ * -+ * **** NOTE **** -+ * Issues with the binary rewriting have caused it to be removed. Instead -+ * we rely on Xen's emulator to boot the kernel, and then print a banner -+ * message recommending that the user disables /lib/tls. -+ * -+ * Copyright (c) 2004, K A Fraser -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args ) -+ -+fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code) -+{ -+ static unsigned long printed = 0; -+ char info[100]; -+ int i; -+ -+ /* Ignore statically-linked init. */ -+ if (current->tgid == 1) -+ return; -+ -+ VOID(HYPERVISOR_vm_assist(VMASST_CMD_disable, -+ VMASST_TYPE_4gb_segments_notify)); -+ -+ if (test_and_set_bit(0, &printed)) -+ return; -+ -+ sprintf(info, "%s (pid=%d)", current->comm, current->tgid); -+ -+ DP(""); -+ DP("***************************************************************"); -+ DP("***************************************************************"); -+ DP("** WARNING: Currently emulating unsupported memory accesses **"); -+ DP("** in /lib/tls glibc libraries. The emulation is **"); -+ DP("** slow. To ensure full performance you should **"); -+ DP("** install a 'xen-friendly' (nosegneg) version of **"); -+ DP("** the library, or disable tls support by executing **"); -+ DP("** the following as root: **"); -+ DP("** mv /lib/tls /lib/tls.disabled **"); -+ DP("** Offending process: %-38.38s **", info); -+ DP("***************************************************************"); -+ DP("***************************************************************"); -+ DP(""); -+ -+ for (i = 5; i > 0; i--) { -+ touch_softlockup_watchdog(); -+ printk("Pausing... %d", i); -+ mdelay(1000); -+ printk("\b\b\b\b\b\b\b\b\b\b\b\b"); -+ } -+ -+ printk("Continuing...\n\n"); -+} -+ -+static int __init fixup_init(void) -+{ -+ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, -+ VMASST_TYPE_4gb_segments_notify)); -+ return 0; -+} -+__initcall(fixup_init); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/head_32-xen.S 2007-06-12 13:12:48.000000000 +0200 -@@ -0,0 +1,207 @@ -+ -+ -+.text -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * References to members of the new_cpu_data structure. -+ */ -+ -+#define X86 new_cpu_data+CPUINFO_x86 -+#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor -+#define X86_MODEL new_cpu_data+CPUINFO_x86_model -+#define X86_MASK new_cpu_data+CPUINFO_x86_mask -+#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math -+#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level -+#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability -+#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id -+ -+#define VIRT_ENTRY_OFFSET 0x0 -+.org VIRT_ENTRY_OFFSET -+ENTRY(startup_32) -+ movl %esi,xen_start_info -+ cld -+ -+ /* Set up the stack pointer */ -+ movl $(init_thread_union+THREAD_SIZE),%esp -+ -+ /* get vendor info */ -+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID -+ XEN_CPUID -+ movl %eax,X86_CPUID # save CPUID level -+ movl %ebx,X86_VENDOR_ID # lo 4 chars -+ movl %edx,X86_VENDOR_ID+4 # next 4 chars -+ movl %ecx,X86_VENDOR_ID+8 # last 4 chars -+ -+ movl $1,%eax # Use the CPUID instruction to get CPU type -+ XEN_CPUID -+ movb %al,%cl # save reg for future use -+ andb $0x0f,%ah # mask processor family -+ movb %ah,X86 -+ andb $0xf0,%al # mask model -+ shrb $4,%al -+ movb %al,X86_MODEL -+ andb $0x0f,%cl # mask mask revision -+ movb %cl,X86_MASK -+ movl %edx,X86_CAPABILITY -+ -+ movb $1,X86_HARD_MATH -+ -+ xorl %eax,%eax # Clear FS/GS and LDT -+ movl %eax,%fs -+ movl %eax,%gs -+ cld # gcc2 wants the direction flag cleared at all times -+ -+ pushl %eax # fake return address -+ jmp start_kernel -+ -+#define HYPERCALL_PAGE_OFFSET 0x1000 -+.org HYPERCALL_PAGE_OFFSET -+ENTRY(hypercall_page) -+ CFI_STARTPROC -+.skip 0x1000 -+ CFI_ENDPROC -+ -+/* -+ * Real beginning of normal "text" segment -+ */ -+ENTRY(stext) -+ENTRY(_stext) -+ -+/* -+ * BSS section -+ */ -+.section ".bss.page_aligned","w" -+ENTRY(empty_zero_page) -+ .fill 4096,1,0 -+ -+/* -+ * This starts the data section. -+ */ -+.data -+ -+/* -+ * The Global Descriptor Table contains 28 quadwords, per-CPU. -+ */ -+ .align L1_CACHE_BYTES -+ENTRY(cpu_gdt_table) -+ .quad 0x0000000000000000 /* NULL descriptor */ -+ .quad 0x0000000000000000 /* 0x0b reserved */ -+ .quad 0x0000000000000000 /* 0x13 reserved */ -+ .quad 0x0000000000000000 /* 0x1b reserved */ -+ .quad 0x0000000000000000 /* 0x20 unused */ -+ .quad 0x0000000000000000 /* 0x28 unused */ -+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ -+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ -+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ -+ .quad 0x0000000000000000 /* 0x4b reserved */ -+ .quad 0x0000000000000000 /* 0x53 reserved */ -+ .quad 0x0000000000000000 /* 0x5b reserved */ -+ -+ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ -+ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ -+ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */ -+ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */ -+ -+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */ -+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */ -+ -+ /* -+ * Segments used for calling PnP BIOS have byte granularity. -+ * They code segments and data segments have fixed 64k limits, -+ * the transfer segment sizes are set at run time. -+ */ -+ .quad 0x0000000000000000 /* 0x90 32-bit code */ -+ .quad 0x0000000000000000 /* 0x98 16-bit code */ -+ .quad 0x0000000000000000 /* 0xa0 16-bit data */ -+ .quad 0x0000000000000000 /* 0xa8 16-bit data */ -+ .quad 0x0000000000000000 /* 0xb0 16-bit data */ -+ -+ /* -+ * The APM segments have byte granularity and their bases -+ * are set at run time. All have 64k limits. -+ */ -+ .quad 0x0000000000000000 /* 0xb8 APM CS code */ -+ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */ -+ .quad 0x0000000000000000 /* 0xc8 APM DS data */ -+ -+ .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */ -+ .quad 0x0000000000000000 /* 0xd8 - unused */ -+ .quad 0x0000000000000000 /* 0xe0 - unused */ -+ .quad 0x0000000000000000 /* 0xe8 - unused */ -+ .quad 0x0000000000000000 /* 0xf0 - unused */ -+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ -+ -+#if CONFIG_XEN_COMPAT <= 0x030002 -+/* -+ * __xen_guest information -+ */ -+.macro utoa value -+ .if (\value) < 0 || (\value) >= 0x10 -+ utoa (((\value)>>4)&0x0fffffff) -+ .endif -+ .if ((\value) & 0xf) < 10 -+ .byte '0' + ((\value) & 0xf) -+ .else -+ .byte 'A' + ((\value) & 0xf) - 10 -+ .endif -+.endm -+ -+.section __xen_guest -+ .ascii "GUEST_OS=linux,GUEST_VER=2.6" -+ .ascii ",XEN_VER=xen-3.0" -+ .ascii ",VIRT_BASE=0x" -+ utoa __PAGE_OFFSET -+ .ascii ",ELF_PADDR_OFFSET=0x" -+ utoa __PAGE_OFFSET -+ .ascii ",VIRT_ENTRY=0x" -+ utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET) -+ .ascii ",HYPERCALL_PAGE=0x" -+ utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT) -+ .ascii ",FEATURES=writable_page_tables" -+ .ascii "|writable_descriptor_tables" -+ .ascii "|auto_translated_physmap" -+ .ascii "|pae_pgdir_above_4gb" -+ .ascii "|supervisor_mode_kernel" -+#ifdef CONFIG_X86_PAE -+ .ascii ",PAE=yes[extended-cr3]" -+#else -+ .ascii ",PAE=no" -+#endif -+ .ascii ",LOADER=generic" -+ .byte 0 -+#endif /* CONFIG_XEN_COMPAT <= 0x030002 */ -+ -+ -+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux") -+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6") -+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0") -+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET) -+#if CONFIG_XEN_COMPAT <= 0x030002 -+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET) -+#else -+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0) -+#endif -+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32) -+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page) -+ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START) -+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel") -+#ifdef CONFIG_X86_PAE -+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes") -+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT) -+#else -+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no") -+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT) -+#endif -+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic") -+ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1) ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/io_apic_32-xen.c 2009-03-18 10:39:31.000000000 +0100 -@@ -0,0 +1,2786 @@ -+/* -+ * Intel IO-APIC support for multi-Pentium hosts. -+ * -+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo -+ * -+ * Many thanks to Stig Venaas for trying out countless experimental -+ * patches and reporting/debugging problems patiently! -+ * -+ * (c) 1999, Multiple IO-APIC support, developed by -+ * Ken-ichi Yaku and -+ * Hidemi Kishimoto , -+ * further tested and cleaned up by Zach Brown -+ * and Ingo Molnar -+ * -+ * Fixes -+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs; -+ * thanks to Eric Gilmore -+ * and Rolf G. Tews -+ * for testing these extensively -+ * Paul Diefenbaugh : Added full ACPI support -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "io_ports.h" -+ -+#ifdef CONFIG_XEN -+ -+#include -+#include -+#include -+ -+/* Fake i8259 */ -+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq))) -+#define disable_8259A_irq(_irq) ((void)0) -+#define i8259A_irq_pending(_irq) (0) -+ -+unsigned long io_apic_irqs; -+ -+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg) -+{ -+ struct physdev_apic apic_op; -+ int ret; -+ -+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; -+ apic_op.reg = reg; -+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); -+ if (ret) -+ return ret; -+ return apic_op.value; -+} -+ -+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) -+{ -+ struct physdev_apic apic_op; -+ -+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; -+ apic_op.reg = reg; -+ apic_op.value = value; -+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op)); -+} -+ -+#define io_apic_read(a,r) xen_io_apic_read(a,r) -+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v) -+ -+#endif /* CONFIG_XEN */ -+ -+int (*ioapic_renumber_irq)(int ioapic, int irq); -+atomic_t irq_mis_count; -+ -+#ifndef CONFIG_XEN -+/* Where if anywhere is the i8259 connect in external int mode */ -+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; -+#endif -+ -+static DEFINE_SPINLOCK(ioapic_lock); -+static DEFINE_SPINLOCK(vector_lock); -+ -+int timer_over_8254 __initdata = 1; -+ -+/* -+ * Is the SiS APIC rmw bug present ? -+ * -1 = don't know, 0 = no, 1 = yes -+ */ -+int sis_apic_bug = -1; -+ -+/* -+ * # of IRQ routing registers -+ */ -+int nr_ioapic_registers[MAX_IO_APICS]; -+ -+int disable_timer_pin_1 __initdata; -+ -+/* -+ * Rough estimation of how many shared IRQs there are, can -+ * be changed anytime. -+ */ -+#define MAX_PLUS_SHARED_IRQS NR_IRQS -+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) -+ -+/* -+ * This is performance-critical, we want to do it O(1) -+ * -+ * the indexing order of this array favors 1:1 mappings -+ * between pins and IRQs. -+ */ -+ -+static struct irq_pin_list { -+ int apic, pin, next; -+} irq_2_pin[PIN_MAP_SIZE]; -+ -+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1}; -+#ifdef CONFIG_PCI_MSI -+#define vector_to_irq(vector) \ -+ (platform_legacy_irq(vector) ? vector : vector_irq[vector]) -+#else -+#define vector_to_irq(vector) (vector) -+#endif -+ -+/* -+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are -+ * shared ISA-space IRQs, so we have to support them. We are super -+ * fast in the common case, and fast for shared ISA-space IRQs. -+ */ -+static void add_pin_to_irq(unsigned int irq, int apic, int pin) -+{ -+ static int first_free_entry = NR_IRQS; -+ struct irq_pin_list *entry = irq_2_pin + irq; -+ -+ while (entry->next) -+ entry = irq_2_pin + entry->next; -+ -+ if (entry->pin != -1) { -+ entry->next = first_free_entry; -+ entry = irq_2_pin + entry->next; -+ if (++first_free_entry >= PIN_MAP_SIZE) -+ panic("io_apic.c: whoops"); -+ } -+ entry->apic = apic; -+ entry->pin = pin; -+} -+ -+#ifdef CONFIG_XEN -+#define clear_IO_APIC() ((void)0) -+#else -+/* -+ * Reroute an IRQ to a different pin. -+ */ -+static void __init replace_pin_at_irq(unsigned int irq, -+ int oldapic, int oldpin, -+ int newapic, int newpin) -+{ -+ struct irq_pin_list *entry = irq_2_pin + irq; -+ -+ while (1) { -+ if (entry->apic == oldapic && entry->pin == oldpin) { -+ entry->apic = newapic; -+ entry->pin = newpin; -+ } -+ if (!entry->next) -+ break; -+ entry = irq_2_pin + entry->next; -+ } -+} -+ -+static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable) -+{ -+ struct irq_pin_list *entry = irq_2_pin + irq; -+ unsigned int pin, reg; -+ -+ for (;;) { -+ pin = entry->pin; -+ if (pin == -1) -+ break; -+ reg = io_apic_read(entry->apic, 0x10 + pin*2); -+ reg &= ~disable; -+ reg |= enable; -+ io_apic_modify(entry->apic, 0x10 + pin*2, reg); -+ if (!entry->next) -+ break; -+ entry = irq_2_pin + entry->next; -+ } -+} -+ -+/* mask = 1 */ -+static void __mask_IO_APIC_irq (unsigned int irq) -+{ -+ __modify_IO_APIC_irq(irq, 0x00010000, 0); -+} -+ -+/* mask = 0 */ -+static void __unmask_IO_APIC_irq (unsigned int irq) -+{ -+ __modify_IO_APIC_irq(irq, 0, 0x00010000); -+} -+ -+/* mask = 1, trigger = 0 */ -+static void __mask_and_edge_IO_APIC_irq (unsigned int irq) -+{ -+ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000); -+} -+ -+/* mask = 0, trigger = 1 */ -+static void __unmask_and_level_IO_APIC_irq (unsigned int irq) -+{ -+ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000); -+} -+ -+static void mask_IO_APIC_irq (unsigned int irq) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ __mask_IO_APIC_irq(irq); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+} -+ -+static void unmask_IO_APIC_irq (unsigned int irq) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ __unmask_IO_APIC_irq(irq); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+} -+ -+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) -+{ -+ struct IO_APIC_route_entry entry; -+ unsigned long flags; -+ -+ /* Check delivery_mode to be sure we're not clearing an SMI pin */ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); -+ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ if (entry.delivery_mode == dest_SMI) -+ return; -+ -+ /* -+ * Disable it in the IO-APIC irq-routing table: -+ */ -+ memset(&entry, 0, sizeof(entry)); -+ entry.mask = 1; -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0)); -+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1)); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+} -+ -+static void clear_IO_APIC (void) -+{ -+ int apic, pin; -+ -+ for (apic = 0; apic < nr_ioapics; apic++) -+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) -+ clear_IO_APIC_pin(apic, pin); -+} -+ -+#ifdef CONFIG_SMP -+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) -+{ -+ unsigned long flags; -+ int pin; -+ struct irq_pin_list *entry = irq_2_pin + irq; -+ unsigned int apicid_value; -+ cpumask_t tmp; -+ -+ cpus_and(tmp, cpumask, cpu_online_map); -+ if (cpus_empty(tmp)) -+ tmp = TARGET_CPUS; -+ -+ cpus_and(cpumask, tmp, CPU_MASK_ALL); -+ -+ apicid_value = cpu_mask_to_apicid(cpumask); -+ /* Prepare to do the io_apic_write */ -+ apicid_value = apicid_value << 24; -+ spin_lock_irqsave(&ioapic_lock, flags); -+ for (;;) { -+ pin = entry->pin; -+ if (pin == -1) -+ break; -+ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value); -+ if (!entry->next) -+ break; -+ entry = irq_2_pin + entry->next; -+ } -+ set_irq_info(irq, cpumask); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+} -+ -+#if defined(CONFIG_IRQBALANCE) -+# include /* kernel_thread() */ -+# include /* kstat */ -+# include /* kmalloc() */ -+# include /* time_after() */ -+ -+#ifdef CONFIG_BALANCED_IRQ_DEBUG -+# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0) -+# define Dprintk(x...) do { TDprintk(x); } while (0) -+# else -+# define TDprintk(x...) -+# define Dprintk(x...) -+# endif -+ -+#define IRQBALANCE_CHECK_ARCH -999 -+#define MAX_BALANCED_IRQ_INTERVAL (5*HZ) -+#define MIN_BALANCED_IRQ_INTERVAL (HZ/2) -+#define BALANCED_IRQ_MORE_DELTA (HZ/10) -+#define BALANCED_IRQ_LESS_DELTA (HZ) -+ -+static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH; -+static int physical_balance __read_mostly; -+static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL; -+ -+static struct irq_cpu_info { -+ unsigned long * last_irq; -+ unsigned long * irq_delta; -+ unsigned long irq; -+} irq_cpu_data[NR_CPUS]; -+ -+#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq) -+#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq]) -+#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq]) -+ -+#define IDLE_ENOUGH(cpu,now) \ -+ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1)) -+ -+#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) -+ -+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) -+ -+static cpumask_t balance_irq_affinity[NR_IRQS] = { -+ [0 ... NR_IRQS-1] = CPU_MASK_ALL -+}; -+ -+void set_balance_irq_affinity(unsigned int irq, cpumask_t mask) -+{ -+ balance_irq_affinity[irq] = mask; -+} -+ -+static unsigned long move(int curr_cpu, cpumask_t allowed_mask, -+ unsigned long now, int direction) -+{ -+ int search_idle = 1; -+ int cpu = curr_cpu; -+ -+ goto inside; -+ -+ do { -+ if (unlikely(cpu == curr_cpu)) -+ search_idle = 0; -+inside: -+ if (direction == 1) { -+ cpu++; -+ if (cpu >= NR_CPUS) -+ cpu = 0; -+ } else { -+ cpu--; -+ if (cpu == -1) -+ cpu = NR_CPUS-1; -+ } -+ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) || -+ (search_idle && !IDLE_ENOUGH(cpu,now))); -+ -+ return cpu; -+} -+ -+static inline void balance_irq(int cpu, int irq) -+{ -+ unsigned long now = jiffies; -+ cpumask_t allowed_mask; -+ unsigned int new_cpu; -+ -+ if (irqbalance_disabled) -+ return; -+ -+ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]); -+ new_cpu = move(cpu, allowed_mask, now, 1); -+ if (cpu != new_cpu) { -+ set_pending_irq(irq, cpumask_of_cpu(new_cpu)); -+ } -+} -+ -+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) -+{ -+ int i, j; -+ Dprintk("Rotating IRQs among CPUs.\n"); -+ for_each_online_cpu(i) { -+ for (j = 0; j < NR_IRQS; j++) { -+ if (!irq_desc[j].action) -+ continue; -+ /* Is it a significant load ? */ -+ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) < -+ useful_load_threshold) -+ continue; -+ balance_irq(i, j); -+ } -+ } -+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, -+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); -+ return; -+} -+ -+static void do_irq_balance(void) -+{ -+ int i, j; -+ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0); -+ unsigned long move_this_load = 0; -+ int max_loaded = 0, min_loaded = 0; -+ int load; -+ unsigned long useful_load_threshold = balanced_irq_interval + 10; -+ int selected_irq; -+ int tmp_loaded, first_attempt = 1; -+ unsigned long tmp_cpu_irq; -+ unsigned long imbalance = 0; -+ cpumask_t allowed_mask, target_cpu_mask, tmp; -+ -+ for_each_possible_cpu(i) { -+ int package_index; -+ CPU_IRQ(i) = 0; -+ if (!cpu_online(i)) -+ continue; -+ package_index = CPU_TO_PACKAGEINDEX(i); -+ for (j = 0; j < NR_IRQS; j++) { -+ unsigned long value_now, delta; -+ /* Is this an active IRQ? */ -+ if (!irq_desc[j].action) -+ continue; -+ if ( package_index == i ) -+ IRQ_DELTA(package_index,j) = 0; -+ /* Determine the total count per processor per IRQ */ -+ value_now = (unsigned long) kstat_cpu(i).irqs[j]; -+ -+ /* Determine the activity per processor per IRQ */ -+ delta = value_now - LAST_CPU_IRQ(i,j); -+ -+ /* Update last_cpu_irq[][] for the next time */ -+ LAST_CPU_IRQ(i,j) = value_now; -+ -+ /* Ignore IRQs whose rate is less than the clock */ -+ if (delta < useful_load_threshold) -+ continue; -+ /* update the load for the processor or package total */ -+ IRQ_DELTA(package_index,j) += delta; -+ -+ /* Keep track of the higher numbered sibling as well */ -+ if (i != package_index) -+ CPU_IRQ(i) += delta; -+ /* -+ * We have sibling A and sibling B in the package -+ * -+ * cpu_irq[A] = load for cpu A + load for cpu B -+ * cpu_irq[B] = load for cpu B -+ */ -+ CPU_IRQ(package_index) += delta; -+ } -+ } -+ /* Find the least loaded processor package */ -+ for_each_online_cpu(i) { -+ if (i != CPU_TO_PACKAGEINDEX(i)) -+ continue; -+ if (min_cpu_irq > CPU_IRQ(i)) { -+ min_cpu_irq = CPU_IRQ(i); -+ min_loaded = i; -+ } -+ } -+ max_cpu_irq = ULONG_MAX; -+ -+tryanothercpu: -+ /* Look for heaviest loaded processor. -+ * We may come back to get the next heaviest loaded processor. -+ * Skip processors with trivial loads. -+ */ -+ tmp_cpu_irq = 0; -+ tmp_loaded = -1; -+ for_each_online_cpu(i) { -+ if (i != CPU_TO_PACKAGEINDEX(i)) -+ continue; -+ if (max_cpu_irq <= CPU_IRQ(i)) -+ continue; -+ if (tmp_cpu_irq < CPU_IRQ(i)) { -+ tmp_cpu_irq = CPU_IRQ(i); -+ tmp_loaded = i; -+ } -+ } -+ -+ if (tmp_loaded == -1) { -+ /* In the case of small number of heavy interrupt sources, -+ * loading some of the cpus too much. We use Ingo's original -+ * approach to rotate them around. -+ */ -+ if (!first_attempt && imbalance >= useful_load_threshold) { -+ rotate_irqs_among_cpus(useful_load_threshold); -+ return; -+ } -+ goto not_worth_the_effort; -+ } -+ -+ first_attempt = 0; /* heaviest search */ -+ max_cpu_irq = tmp_cpu_irq; /* load */ -+ max_loaded = tmp_loaded; /* processor */ -+ imbalance = (max_cpu_irq - min_cpu_irq) / 2; -+ -+ Dprintk("max_loaded cpu = %d\n", max_loaded); -+ Dprintk("min_loaded cpu = %d\n", min_loaded); -+ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq); -+ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq); -+ Dprintk("load imbalance = %lu\n", imbalance); -+ -+ /* if imbalance is less than approx 10% of max load, then -+ * observe diminishing returns action. - quit -+ */ -+ if (imbalance < (max_cpu_irq >> 3)) { -+ Dprintk("Imbalance too trivial\n"); -+ goto not_worth_the_effort; -+ } -+ -+tryanotherirq: -+ /* if we select an IRQ to move that can't go where we want, then -+ * see if there is another one to try. -+ */ -+ move_this_load = 0; -+ selected_irq = -1; -+ for (j = 0; j < NR_IRQS; j++) { -+ /* Is this an active IRQ? */ -+ if (!irq_desc[j].action) -+ continue; -+ if (imbalance <= IRQ_DELTA(max_loaded,j)) -+ continue; -+ /* Try to find the IRQ that is closest to the imbalance -+ * without going over. -+ */ -+ if (move_this_load < IRQ_DELTA(max_loaded,j)) { -+ move_this_load = IRQ_DELTA(max_loaded,j); -+ selected_irq = j; -+ } -+ } -+ if (selected_irq == -1) { -+ goto tryanothercpu; -+ } -+ -+ imbalance = move_this_load; -+ -+ /* For physical_balance case, we accumlated both load -+ * values in the one of the siblings cpu_irq[], -+ * to use the same code for physical and logical processors -+ * as much as possible. -+ * -+ * NOTE: the cpu_irq[] array holds the sum of the load for -+ * sibling A and sibling B in the slot for the lowest numbered -+ * sibling (A), _AND_ the load for sibling B in the slot for -+ * the higher numbered sibling. -+ * -+ * We seek the least loaded sibling by making the comparison -+ * (A+B)/2 vs B -+ */ -+ load = CPU_IRQ(min_loaded) >> 1; -+ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { -+ if (load > CPU_IRQ(j)) { -+ /* This won't change cpu_sibling_map[min_loaded] */ -+ load = CPU_IRQ(j); -+ min_loaded = j; -+ } -+ } -+ -+ cpus_and(allowed_mask, -+ cpu_online_map, -+ balance_irq_affinity[selected_irq]); -+ target_cpu_mask = cpumask_of_cpu(min_loaded); -+ cpus_and(tmp, target_cpu_mask, allowed_mask); -+ -+ if (!cpus_empty(tmp)) { -+ -+ Dprintk("irq = %d moved to cpu = %d\n", -+ selected_irq, min_loaded); -+ /* mark for change destination */ -+ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded)); -+ -+ /* Since we made a change, come back sooner to -+ * check for more variation. -+ */ -+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, -+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); -+ return; -+ } -+ goto tryanotherirq; -+ -+not_worth_the_effort: -+ /* -+ * if we did not find an IRQ to move, then adjust the time interval -+ * upward -+ */ -+ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, -+ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); -+ Dprintk("IRQ worth rotating not found\n"); -+ return; -+} -+ -+static int balanced_irq(void *unused) -+{ -+ int i; -+ unsigned long prev_balance_time = jiffies; -+ long time_remaining = balanced_irq_interval; -+ -+ daemonize("kirqd"); -+ -+ /* push everything to CPU 0 to give us a starting point. */ -+ for (i = 0 ; i < NR_IRQS ; i++) { -+ irq_desc[i].pending_mask = cpumask_of_cpu(0); -+ set_pending_irq(i, cpumask_of_cpu(0)); -+ } -+ -+ for ( ; ; ) { -+ time_remaining = schedule_timeout_interruptible(time_remaining); -+ try_to_freeze(); -+ if (time_after(jiffies, -+ prev_balance_time+balanced_irq_interval)) { -+ preempt_disable(); -+ do_irq_balance(); -+ prev_balance_time = jiffies; -+ time_remaining = balanced_irq_interval; -+ preempt_enable(); -+ } -+ } -+ return 0; -+} -+ -+static int __init balanced_irq_init(void) -+{ -+ int i; -+ struct cpuinfo_x86 *c; -+ cpumask_t tmp; -+ -+ cpus_shift_right(tmp, cpu_online_map, 2); -+ c = &boot_cpu_data; -+ /* When not overwritten by the command line ask subarchitecture. */ -+ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH) -+ irqbalance_disabled = NO_BALANCE_IRQ; -+ if (irqbalance_disabled) -+ return 0; -+ -+ /* disable irqbalance completely if there is only one processor online */ -+ if (num_online_cpus() < 2) { -+ irqbalance_disabled = 1; -+ return 0; -+ } -+ /* -+ * Enable physical balance only if more than 1 physical processor -+ * is present -+ */ -+ if (smp_num_siblings > 1 && !cpus_empty(tmp)) -+ physical_balance = 1; -+ -+ for_each_online_cpu(i) { -+ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); -+ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); -+ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { -+ printk(KERN_ERR "balanced_irq_init: out of memory"); -+ goto failed; -+ } -+ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS); -+ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS); -+ } -+ -+ printk(KERN_INFO "Starting balanced_irq\n"); -+ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0) -+ return 0; -+ else -+ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); -+failed: -+ for_each_possible_cpu(i) { -+ kfree(irq_cpu_data[i].irq_delta); -+ irq_cpu_data[i].irq_delta = NULL; -+ kfree(irq_cpu_data[i].last_irq); -+ irq_cpu_data[i].last_irq = NULL; -+ } -+ return 0; -+} -+ -+int __init irqbalance_disable(char *str) -+{ -+ irqbalance_disabled = 1; -+ return 1; -+} -+ -+__setup("noirqbalance", irqbalance_disable); -+ -+late_initcall(balanced_irq_init); -+#endif /* CONFIG_IRQBALANCE */ -+#endif /* CONFIG_SMP */ -+#endif -+ -+#ifndef CONFIG_SMP -+void fastcall send_IPI_self(int vector) -+{ -+#ifndef CONFIG_XEN -+ unsigned int cfg; -+ -+ /* -+ * Wait for idle. -+ */ -+ apic_wait_icr_idle(); -+ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; -+ /* -+ * Send the IPI. The write to APIC_ICR fires this off. -+ */ -+ apic_write_around(APIC_ICR, cfg); -+#endif -+} -+#endif /* !CONFIG_SMP */ -+ -+ -+/* -+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to -+ * specific CPU-side IRQs. -+ */ -+ -+#define MAX_PIRQS 8 -+static int pirq_entries [MAX_PIRQS]; -+static int pirqs_enabled; -+int skip_ioapic_setup; -+ -+static int __init ioapic_setup(char *str) -+{ -+ skip_ioapic_setup = 1; -+ return 1; -+} -+ -+__setup("noapic", ioapic_setup); -+ -+static int __init ioapic_pirq_setup(char *str) -+{ -+ int i, max; -+ int ints[MAX_PIRQS+1]; -+ -+ get_options(str, ARRAY_SIZE(ints), ints); -+ -+ for (i = 0; i < MAX_PIRQS; i++) -+ pirq_entries[i] = -1; -+ -+ pirqs_enabled = 1; -+ apic_printk(APIC_VERBOSE, KERN_INFO -+ "PIRQ redirection, working around broken MP-BIOS.\n"); -+ max = MAX_PIRQS; -+ if (ints[0] < MAX_PIRQS) -+ max = ints[0]; -+ -+ for (i = 0; i < max; i++) { -+ apic_printk(APIC_VERBOSE, KERN_DEBUG -+ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); -+ /* -+ * PIRQs are mapped upside down, usually. -+ */ -+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; -+ } -+ return 1; -+} -+ -+__setup("pirq=", ioapic_pirq_setup); -+ -+/* -+ * Find the IRQ entry number of a certain pin. -+ */ -+static int find_irq_entry(int apic, int pin, int type) -+{ -+ int i; -+ -+ for (i = 0; i < mp_irq_entries; i++) -+ if (mp_irqs[i].mpc_irqtype == type && -+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid || -+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) && -+ mp_irqs[i].mpc_dstirq == pin) -+ return i; -+ -+ return -1; -+} -+ -+#ifndef CONFIG_XEN -+/* -+ * Find the pin to which IRQ[irq] (ISA) is connected -+ */ -+static int __init find_isa_irq_pin(int irq, int type) -+{ -+ int i; -+ -+ for (i = 0; i < mp_irq_entries; i++) { -+ int lbus = mp_irqs[i].mpc_srcbus; -+ -+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || -+ mp_bus_id_to_type[lbus] == MP_BUS_EISA || -+ mp_bus_id_to_type[lbus] == MP_BUS_MCA || -+ mp_bus_id_to_type[lbus] == MP_BUS_NEC98 -+ ) && -+ (mp_irqs[i].mpc_irqtype == type) && -+ (mp_irqs[i].mpc_srcbusirq == irq)) -+ -+ return mp_irqs[i].mpc_dstirq; -+ } -+ return -1; -+} -+ -+static int __init find_isa_irq_apic(int irq, int type) -+{ -+ int i; -+ -+ for (i = 0; i < mp_irq_entries; i++) { -+ int lbus = mp_irqs[i].mpc_srcbus; -+ -+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || -+ mp_bus_id_to_type[lbus] == MP_BUS_EISA || -+ mp_bus_id_to_type[lbus] == MP_BUS_MCA || -+ mp_bus_id_to_type[lbus] == MP_BUS_NEC98 -+ ) && -+ (mp_irqs[i].mpc_irqtype == type) && -+ (mp_irqs[i].mpc_srcbusirq == irq)) -+ break; -+ } -+ if (i < mp_irq_entries) { -+ int apic; -+ for(apic = 0; apic < nr_ioapics; apic++) { -+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic) -+ return apic; -+ } -+ } -+ -+ return -1; -+} -+#endif -+ -+/* -+ * Find a specific PCI IRQ entry. -+ * Not an __init, possibly needed by modules -+ */ -+static int pin_2_irq(int idx, int apic, int pin); -+ -+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) -+{ -+ int apic, i, best_guess = -1; -+ -+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, " -+ "slot:%d, pin:%d.\n", bus, slot, pin); -+ if (mp_bus_id_to_pci_bus[bus] == -1) { -+ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus); -+ return -1; -+ } -+ for (i = 0; i < mp_irq_entries; i++) { -+ int lbus = mp_irqs[i].mpc_srcbus; -+ -+ for (apic = 0; apic < nr_ioapics; apic++) -+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic || -+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) -+ break; -+ -+ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) && -+ !mp_irqs[i].mpc_irqtype && -+ (bus == lbus) && -+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { -+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq); -+ -+ if (!(apic || IO_APIC_IRQ(irq))) -+ continue; -+ -+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3)) -+ return irq; -+ /* -+ * Use the first all-but-pin matching entry as a -+ * best-guess fuzzy result for broken mptables. -+ */ -+ if (best_guess < 0) -+ best_guess = irq; -+ } -+ } -+ return best_guess; -+} -+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); -+ -+/* -+ * This function currently is only a helper for the i386 smp boot process where -+ * we need to reprogram the ioredtbls to cater for the cpus which have come online -+ * so mask in all cases should simply be TARGET_CPUS -+ */ -+#ifdef CONFIG_SMP -+#ifndef CONFIG_XEN -+void __init setup_ioapic_dest(void) -+{ -+ int pin, ioapic, irq, irq_entry; -+ -+ if (skip_ioapic_setup == 1) -+ return; -+ -+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { -+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { -+ irq_entry = find_irq_entry(ioapic, pin, mp_INT); -+ if (irq_entry == -1) -+ continue; -+ irq = pin_2_irq(irq_entry, ioapic, pin); -+ set_ioapic_affinity_irq(irq, TARGET_CPUS); -+ } -+ -+ } -+} -+#endif /* !CONFIG_XEN */ -+#endif -+ -+/* -+ * EISA Edge/Level control register, ELCR -+ */ -+static int EISA_ELCR(unsigned int irq) -+{ -+ if (irq < 16) { -+ unsigned int port = 0x4d0 + (irq >> 3); -+ return (inb(port) >> (irq & 7)) & 1; -+ } -+ apic_printk(APIC_VERBOSE, KERN_INFO -+ "Broken MPtable reports ISA irq %d\n", irq); -+ return 0; -+} -+ -+/* EISA interrupts are always polarity zero and can be edge or level -+ * trigger depending on the ELCR value. If an interrupt is listed as -+ * EISA conforming in the MP table, that means its trigger type must -+ * be read in from the ELCR */ -+ -+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq)) -+#define default_EISA_polarity(idx) (0) -+ -+/* ISA interrupts are always polarity zero edge triggered, -+ * when listed as conforming in the MP table. */ -+ -+#define default_ISA_trigger(idx) (0) -+#define default_ISA_polarity(idx) (0) -+ -+/* PCI interrupts are always polarity one level triggered, -+ * when listed as conforming in the MP table. */ -+ -+#define default_PCI_trigger(idx) (1) -+#define default_PCI_polarity(idx) (1) -+ -+/* MCA interrupts are always polarity zero level triggered, -+ * when listed as conforming in the MP table. */ -+ -+#define default_MCA_trigger(idx) (1) -+#define default_MCA_polarity(idx) (0) -+ -+/* NEC98 interrupts are always polarity zero edge triggered, -+ * when listed as conforming in the MP table. */ -+ -+#define default_NEC98_trigger(idx) (0) -+#define default_NEC98_polarity(idx) (0) -+ -+static int __init MPBIOS_polarity(int idx) -+{ -+ int bus = mp_irqs[idx].mpc_srcbus; -+ int polarity; -+ -+ /* -+ * Determine IRQ line polarity (high active or low active): -+ */ -+ switch (mp_irqs[idx].mpc_irqflag & 3) -+ { -+ case 0: /* conforms, ie. bus-type dependent polarity */ -+ { -+ switch (mp_bus_id_to_type[bus]) -+ { -+ case MP_BUS_ISA: /* ISA pin */ -+ { -+ polarity = default_ISA_polarity(idx); -+ break; -+ } -+ case MP_BUS_EISA: /* EISA pin */ -+ { -+ polarity = default_EISA_polarity(idx); -+ break; -+ } -+ case MP_BUS_PCI: /* PCI pin */ -+ { -+ polarity = default_PCI_polarity(idx); -+ break; -+ } -+ case MP_BUS_MCA: /* MCA pin */ -+ { -+ polarity = default_MCA_polarity(idx); -+ break; -+ } -+ case MP_BUS_NEC98: /* NEC 98 pin */ -+ { -+ polarity = default_NEC98_polarity(idx); -+ break; -+ } -+ default: -+ { -+ printk(KERN_WARNING "broken BIOS!!\n"); -+ polarity = 1; -+ break; -+ } -+ } -+ break; -+ } -+ case 1: /* high active */ -+ { -+ polarity = 0; -+ break; -+ } -+ case 2: /* reserved */ -+ { -+ printk(KERN_WARNING "broken BIOS!!\n"); -+ polarity = 1; -+ break; -+ } -+ case 3: /* low active */ -+ { -+ polarity = 1; -+ break; -+ } -+ default: /* invalid */ -+ { -+ printk(KERN_WARNING "broken BIOS!!\n"); -+ polarity = 1; -+ break; -+ } -+ } -+ return polarity; -+} -+ -+static int MPBIOS_trigger(int idx) -+{ -+ int bus = mp_irqs[idx].mpc_srcbus; -+ int trigger; -+ -+ /* -+ * Determine IRQ trigger mode (edge or level sensitive): -+ */ -+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) -+ { -+ case 0: /* conforms, ie. bus-type dependent */ -+ { -+ switch (mp_bus_id_to_type[bus]) -+ { -+ case MP_BUS_ISA: /* ISA pin */ -+ { -+ trigger = default_ISA_trigger(idx); -+ break; -+ } -+ case MP_BUS_EISA: /* EISA pin */ -+ { -+ trigger = default_EISA_trigger(idx); -+ break; -+ } -+ case MP_BUS_PCI: /* PCI pin */ -+ { -+ trigger = default_PCI_trigger(idx); -+ break; -+ } -+ case MP_BUS_MCA: /* MCA pin */ -+ { -+ trigger = default_MCA_trigger(idx); -+ break; -+ } -+ case MP_BUS_NEC98: /* NEC 98 pin */ -+ { -+ trigger = default_NEC98_trigger(idx); -+ break; -+ } -+ default: -+ { -+ printk(KERN_WARNING "broken BIOS!!\n"); -+ trigger = 1; -+ break; -+ } -+ } -+ break; -+ } -+ case 1: /* edge */ -+ { -+ trigger = 0; -+ break; -+ } -+ case 2: /* reserved */ -+ { -+ printk(KERN_WARNING "broken BIOS!!\n"); -+ trigger = 1; -+ break; -+ } -+ case 3: /* level */ -+ { -+ trigger = 1; -+ break; -+ } -+ default: /* invalid */ -+ { -+ printk(KERN_WARNING "broken BIOS!!\n"); -+ trigger = 0; -+ break; -+ } -+ } -+ return trigger; -+} -+ -+static inline int irq_polarity(int idx) -+{ -+ return MPBIOS_polarity(idx); -+} -+ -+static inline int irq_trigger(int idx) -+{ -+ return MPBIOS_trigger(idx); -+} -+ -+static int pin_2_irq(int idx, int apic, int pin) -+{ -+ int irq, i; -+ int bus = mp_irqs[idx].mpc_srcbus; -+ -+ /* -+ * Debugging check, we are in big trouble if this message pops up! -+ */ -+ if (mp_irqs[idx].mpc_dstirq != pin) -+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); -+ -+ switch (mp_bus_id_to_type[bus]) -+ { -+ case MP_BUS_ISA: /* ISA pin */ -+ case MP_BUS_EISA: -+ case MP_BUS_MCA: -+ case MP_BUS_NEC98: -+ { -+ irq = mp_irqs[idx].mpc_srcbusirq; -+ break; -+ } -+ case MP_BUS_PCI: /* PCI pin */ -+ { -+ /* -+ * PCI IRQs are mapped in order -+ */ -+ i = irq = 0; -+ while (i < apic) -+ irq += nr_ioapic_registers[i++]; -+ irq += pin; -+ -+ /* -+ * For MPS mode, so far only needed by ES7000 platform -+ */ -+ if (ioapic_renumber_irq) -+ irq = ioapic_renumber_irq(apic, irq); -+ -+ break; -+ } -+ default: -+ { -+ printk(KERN_ERR "unknown bus type %d.\n",bus); -+ irq = 0; -+ break; -+ } -+ } -+ -+ /* -+ * PCI IRQ command line redirection. Yes, limits are hardcoded. -+ */ -+ if ((pin >= 16) && (pin <= 23)) { -+ if (pirq_entries[pin-16] != -1) { -+ if (!pirq_entries[pin-16]) { -+ apic_printk(APIC_VERBOSE, KERN_DEBUG -+ "disabling PIRQ%d\n", pin-16); -+ } else { -+ irq = pirq_entries[pin-16]; -+ apic_printk(APIC_VERBOSE, KERN_DEBUG -+ "using PIRQ%d -> IRQ %d\n", -+ pin-16, irq); -+ } -+ } -+ } -+ return irq; -+} -+ -+static inline int IO_APIC_irq_trigger(int irq) -+{ -+ int apic, idx, pin; -+ -+ for (apic = 0; apic < nr_ioapics; apic++) { -+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { -+ idx = find_irq_entry(apic,pin,mp_INT); -+ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin))) -+ return irq_trigger(idx); -+ } -+ } -+ /* -+ * nonexistent IRQs are edge default -+ */ -+ return 0; -+} -+ -+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ -+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */ -+ -+int assign_irq_vector(int irq) -+{ -+ unsigned long flags; -+ int vector; -+ struct physdev_irq irq_op; -+ -+ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); -+ -+ if (irq < PIRQ_BASE || irq - PIRQ_BASE >= NR_PIRQS) -+ return -EINVAL; -+ -+ spin_lock_irqsave(&vector_lock, flags); -+ -+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) { -+ spin_unlock_irqrestore(&vector_lock, flags); -+ return IO_APIC_VECTOR(irq); -+ } -+ -+ irq_op.irq = irq; -+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { -+ spin_unlock_irqrestore(&vector_lock, flags); -+ return -ENOSPC; -+ } -+ -+ vector = irq_op.vector; -+ vector_irq[vector] = irq; -+ if (irq != AUTO_ASSIGN) -+ IO_APIC_VECTOR(irq) = vector; -+ -+ spin_unlock_irqrestore(&vector_lock, flags); -+ -+ return vector; -+} -+ -+#ifndef CONFIG_XEN -+static struct hw_interrupt_type ioapic_level_type; -+static struct hw_interrupt_type ioapic_edge_type; -+ -+#define IOAPIC_AUTO -1 -+#define IOAPIC_EDGE 0 -+#define IOAPIC_LEVEL 1 -+ -+static void ioapic_register_intr(int irq, int vector, unsigned long trigger) -+{ -+ unsigned idx; -+ -+ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq; -+ -+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || -+ trigger == IOAPIC_LEVEL) -+ irq_desc[idx].chip = &ioapic_level_type; -+ else -+ irq_desc[idx].chip = &ioapic_edge_type; -+ set_intr_gate(vector, interrupt[idx]); -+} -+#else -+#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq) -+#endif -+ -+static void __init setup_IO_APIC_irqs(void) -+{ -+ struct IO_APIC_route_entry entry; -+ int apic, pin, idx, irq, first_notcon = 1, vector; -+ unsigned long flags; -+ -+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); -+ -+ for (apic = 0; apic < nr_ioapics; apic++) { -+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { -+ -+ /* -+ * add it to the IO-APIC irq-routing table: -+ */ -+ memset(&entry,0,sizeof(entry)); -+ -+ entry.delivery_mode = INT_DELIVERY_MODE; -+ entry.dest_mode = INT_DEST_MODE; -+ entry.mask = 0; /* enable IRQ */ -+ entry.dest.logical.logical_dest = -+ cpu_mask_to_apicid(TARGET_CPUS); -+ -+ idx = find_irq_entry(apic,pin,mp_INT); -+ if (idx == -1) { -+ if (first_notcon) { -+ apic_printk(APIC_VERBOSE, KERN_DEBUG -+ " IO-APIC (apicid-pin) %d-%d", -+ mp_ioapics[apic].mpc_apicid, -+ pin); -+ first_notcon = 0; -+ } else -+ apic_printk(APIC_VERBOSE, ", %d-%d", -+ mp_ioapics[apic].mpc_apicid, pin); -+ continue; -+ } -+ -+ entry.trigger = irq_trigger(idx); -+ entry.polarity = irq_polarity(idx); -+ -+ if (irq_trigger(idx)) { -+ entry.trigger = 1; -+ entry.mask = 1; -+ } -+ -+ irq = pin_2_irq(idx, apic, pin); -+ /* -+ * skip adding the timer int on secondary nodes, which causes -+ * a small but painful rift in the time-space continuum -+ */ -+ if (multi_timer_check(apic, irq)) -+ continue; -+ else -+ add_pin_to_irq(irq, apic, pin); -+ -+ if (/*!apic &&*/ !IO_APIC_IRQ(irq)) -+ continue; -+ -+ if (IO_APIC_IRQ(irq)) { -+ vector = assign_irq_vector(irq); -+ entry.vector = vector; -+ ioapic_register_intr(irq, vector, IOAPIC_AUTO); -+ -+ if (!apic && (irq < 16)) -+ disable_8259A_irq(irq); -+ } -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); -+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); -+ set_native_irq_info(irq, TARGET_CPUS); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ } -+ } -+ -+ if (!first_notcon) -+ apic_printk(APIC_VERBOSE, " not connected.\n"); -+} -+ -+/* -+ * Set up the 8259A-master output pin: -+ */ -+#ifndef CONFIG_XEN -+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) -+{ -+ struct IO_APIC_route_entry entry; -+ unsigned long flags; -+ -+ memset(&entry,0,sizeof(entry)); -+ -+ disable_8259A_irq(0); -+ -+ /* mask LVT0 */ -+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); -+ -+ /* -+ * We use logical delivery to get the timer IRQ -+ * to the first CPU. -+ */ -+ entry.dest_mode = INT_DEST_MODE; -+ entry.mask = 0; /* unmask IRQ now */ -+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); -+ entry.delivery_mode = INT_DELIVERY_MODE; -+ entry.polarity = 0; -+ entry.trigger = 0; -+ entry.vector = vector; -+ -+ /* -+ * The timer IRQ doesn't have to know that behind the -+ * scene we have a 8259A-master in AEOI mode ... -+ */ -+ irq_desc[0].chip = &ioapic_edge_type; -+ -+ /* -+ * Add it to the IO-APIC irq-routing table: -+ */ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); -+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ enable_8259A_irq(0); -+} -+ -+static inline void UNEXPECTED_IO_APIC(void) -+{ -+} -+ -+void __init print_IO_APIC(void) -+{ -+ int apic, i; -+ union IO_APIC_reg_00 reg_00; -+ union IO_APIC_reg_01 reg_01; -+ union IO_APIC_reg_02 reg_02; -+ union IO_APIC_reg_03 reg_03; -+ unsigned long flags; -+ -+ if (apic_verbosity == APIC_QUIET) -+ return; -+ -+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); -+ for (i = 0; i < nr_ioapics; i++) -+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", -+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]); -+ -+ /* -+ * We are a bit conservative about what we expect. We have to -+ * know about every hardware change ASAP. -+ */ -+ printk(KERN_INFO "testing the IO APIC.......................\n"); -+ -+ for (apic = 0; apic < nr_ioapics; apic++) { -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_00.raw = io_apic_read(apic, 0); -+ reg_01.raw = io_apic_read(apic, 1); -+ if (reg_01.bits.version >= 0x10) -+ reg_02.raw = io_apic_read(apic, 2); -+ if (reg_01.bits.version >= 0x20) -+ reg_03.raw = io_apic_read(apic, 3); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid); -+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); -+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); -+ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); -+ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); -+ if (reg_00.bits.ID >= get_physical_broadcast()) -+ UNEXPECTED_IO_APIC(); -+ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2) -+ UNEXPECTED_IO_APIC(); -+ -+ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw); -+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); -+ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */ -+ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */ -+ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */ -+ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */ -+ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */ -+ (reg_01.bits.entries != 0x2E) && -+ (reg_01.bits.entries != 0x3F) -+ ) -+ UNEXPECTED_IO_APIC(); -+ -+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); -+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); -+ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */ -+ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */ -+ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */ -+ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */ -+ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */ -+ ) -+ UNEXPECTED_IO_APIC(); -+ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2) -+ UNEXPECTED_IO_APIC(); -+ -+ /* -+ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, -+ * but the value of reg_02 is read as the previous read register -+ * value, so ignore it if reg_02 == reg_01. -+ */ -+ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { -+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); -+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); -+ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2) -+ UNEXPECTED_IO_APIC(); -+ } -+ -+ /* -+ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 -+ * or reg_03, but the value of reg_0[23] is read as the previous read -+ * register value, so ignore it if reg_03 == reg_0[12]. -+ */ -+ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && -+ reg_03.raw != reg_01.raw) { -+ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); -+ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); -+ if (reg_03.bits.__reserved_1) -+ UNEXPECTED_IO_APIC(); -+ } -+ -+ printk(KERN_DEBUG ".... IRQ redirection table:\n"); -+ -+ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" -+ " Stat Dest Deli Vect: \n"); -+ -+ for (i = 0; i <= reg_01.bits.entries; i++) { -+ struct IO_APIC_route_entry entry; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2); -+ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ printk(KERN_DEBUG " %02x %03X %02X ", -+ i, -+ entry.dest.logical.logical_dest, -+ entry.dest.physical.physical_dest -+ ); -+ -+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", -+ entry.mask, -+ entry.trigger, -+ entry.irr, -+ entry.polarity, -+ entry.delivery_status, -+ entry.dest_mode, -+ entry.delivery_mode, -+ entry.vector -+ ); -+ } -+ } -+ if (use_pci_vector()) -+ printk(KERN_INFO "Using vector-based indexing\n"); -+ printk(KERN_DEBUG "IRQ to pin mappings:\n"); -+ for (i = 0; i < NR_IRQS; i++) { -+ struct irq_pin_list *entry = irq_2_pin + i; -+ if (entry->pin < 0) -+ continue; -+ if (use_pci_vector() && !platform_legacy_irq(i)) -+ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i)); -+ else -+ printk(KERN_DEBUG "IRQ%d ", i); -+ for (;;) { -+ printk("-> %d:%d", entry->apic, entry->pin); -+ if (!entry->next) -+ break; -+ entry = irq_2_pin + entry->next; -+ } -+ printk("\n"); -+ } -+ -+ printk(KERN_INFO ".................................... done.\n"); -+ -+ return; -+} -+ -+static void print_APIC_bitfield (int base) -+{ -+ unsigned int v; -+ int i, j; -+ -+ if (apic_verbosity == APIC_QUIET) -+ return; -+ -+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG); -+ for (i = 0; i < 8; i++) { -+ v = apic_read(base + i*0x10); -+ for (j = 0; j < 32; j++) { -+ if (v & (1< 3) /* Due to the Pentium erratum 3AP. */ -+ apic_write(APIC_ESR, 0); -+ v = apic_read(APIC_ESR); -+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v); -+ } -+ -+ v = apic_read(APIC_ICR); -+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v); -+ v = apic_read(APIC_ICR2); -+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v); -+ -+ v = apic_read(APIC_LVTT); -+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); -+ -+ if (maxlvt > 3) { /* PC is LVT#4. */ -+ v = apic_read(APIC_LVTPC); -+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); -+ } -+ v = apic_read(APIC_LVT0); -+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); -+ v = apic_read(APIC_LVT1); -+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); -+ -+ if (maxlvt > 2) { /* ERR is LVT#3. */ -+ v = apic_read(APIC_LVTERR); -+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); -+ } -+ -+ v = apic_read(APIC_TMICT); -+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); -+ v = apic_read(APIC_TMCCT); -+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); -+ v = apic_read(APIC_TDCR); -+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); -+ printk("\n"); -+} -+ -+void print_all_local_APICs (void) -+{ -+ on_each_cpu(print_local_APIC, NULL, 1, 1); -+} -+ -+void /*__init*/ print_PIC(void) -+{ -+ unsigned int v; -+ unsigned long flags; -+ -+ if (apic_verbosity == APIC_QUIET) -+ return; -+ -+ printk(KERN_DEBUG "\nprinting PIC contents\n"); -+ -+ spin_lock_irqsave(&i8259A_lock, flags); -+ -+ v = inb(0xa1) << 8 | inb(0x21); -+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v); -+ -+ v = inb(0xa0) << 8 | inb(0x20); -+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v); -+ -+ outb(0x0b,0xa0); -+ outb(0x0b,0x20); -+ v = inb(0xa0) << 8 | inb(0x20); -+ outb(0x0a,0xa0); -+ outb(0x0a,0x20); -+ -+ spin_unlock_irqrestore(&i8259A_lock, flags); -+ -+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v); -+ -+ v = inb(0x4d1) << 8 | inb(0x4d0); -+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); -+} -+#endif /* !CONFIG_XEN */ -+ -+static void __init enable_IO_APIC(void) -+{ -+ union IO_APIC_reg_01 reg_01; -+#ifndef CONFIG_XEN -+ int i8259_apic, i8259_pin; -+#endif -+ int i, apic; -+ unsigned long flags; -+ -+ for (i = 0; i < PIN_MAP_SIZE; i++) { -+ irq_2_pin[i].pin = -1; -+ irq_2_pin[i].next = 0; -+ } -+ if (!pirqs_enabled) -+ for (i = 0; i < MAX_PIRQS; i++) -+ pirq_entries[i] = -1; -+ -+ /* -+ * The number of IO-APIC IRQ registers (== #pins): -+ */ -+ for (apic = 0; apic < nr_ioapics; apic++) { -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_01.raw = io_apic_read(apic, 1); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ nr_ioapic_registers[apic] = reg_01.bits.entries+1; -+ } -+#ifndef CONFIG_XEN -+ for(apic = 0; apic < nr_ioapics; apic++) { -+ int pin; -+ /* See if any of the pins is in ExtINT mode */ -+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { -+ struct IO_APIC_route_entry entry; -+ spin_lock_irqsave(&ioapic_lock, flags); -+ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); -+ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ -+ /* If the interrupt line is enabled and in ExtInt mode -+ * I have found the pin where the i8259 is connected. -+ */ -+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { -+ ioapic_i8259.apic = apic; -+ ioapic_i8259.pin = pin; -+ goto found_i8259; -+ } -+ } -+ } -+ found_i8259: -+ /* Look to see what if the MP table has reported the ExtINT */ -+ /* If we could not find the appropriate pin by looking at the ioapic -+ * the i8259 probably is not connected the ioapic but give the -+ * mptable a chance anyway. -+ */ -+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT); -+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT); -+ /* Trust the MP table if nothing is setup in the hardware */ -+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { -+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); -+ ioapic_i8259.pin = i8259_pin; -+ ioapic_i8259.apic = i8259_apic; -+ } -+ /* Complain if the MP table and the hardware disagree */ -+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && -+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) -+ { -+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); -+ } -+#endif -+ -+ /* -+ * Do not trust the IO-APIC being empty at bootup -+ */ -+ clear_IO_APIC(); -+} -+ -+/* -+ * Not an __init, needed by the reboot code -+ */ -+void disable_IO_APIC(void) -+{ -+ /* -+ * Clear the IO-APIC before rebooting: -+ */ -+ clear_IO_APIC(); -+ -+#ifndef CONFIG_XEN -+ /* -+ * If the i8259 is routed through an IOAPIC -+ * Put that IOAPIC in virtual wire mode -+ * so legacy interrupts can be delivered. -+ */ -+ if (ioapic_i8259.pin != -1) { -+ struct IO_APIC_route_entry entry; -+ unsigned long flags; -+ -+ memset(&entry, 0, sizeof(entry)); -+ entry.mask = 0; /* Enabled */ -+ entry.trigger = 0; /* Edge */ -+ entry.irr = 0; -+ entry.polarity = 0; /* High */ -+ entry.delivery_status = 0; -+ entry.dest_mode = 0; /* Physical */ -+ entry.delivery_mode = dest_ExtINT; /* ExtInt */ -+ entry.vector = 0; -+ entry.dest.physical.physical_dest = -+ GET_APIC_ID(apic_read(APIC_ID)); -+ -+ /* -+ * Add it to the IO-APIC irq-routing table: -+ */ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin, -+ *(((int *)&entry)+1)); -+ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin, -+ *(((int *)&entry)+0)); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ } -+ disconnect_bsp_APIC(ioapic_i8259.pin != -1); -+#endif -+} -+ -+/* -+ * function to set the IO-APIC physical IDs based on the -+ * values stored in the MPC table. -+ * -+ * by Matt Domsch Tue Dec 21 12:25:05 CST 1999 -+ */ -+ -+#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ) -+static void __init setup_ioapic_ids_from_mpc(void) -+{ -+ union IO_APIC_reg_00 reg_00; -+ physid_mask_t phys_id_present_map; -+ int apic; -+ int i; -+ unsigned char old_id; -+ unsigned long flags; -+ -+ /* -+ * Don't check I/O APIC IDs for xAPIC systems. They have -+ * no meaning without the serial APIC bus. -+ */ -+ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) -+ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) -+ return; -+ /* -+ * This is broken; anything with a real cpu count has to -+ * circumvent this idiocy regardless. -+ */ -+ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); -+ -+ /* -+ * Set the IOAPIC ID to the value stored in the MPC table. -+ */ -+ for (apic = 0; apic < nr_ioapics; apic++) { -+ -+ /* Read the register 0 value */ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_00.raw = io_apic_read(apic, 0); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ old_id = mp_ioapics[apic].mpc_apicid; -+ -+ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { -+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", -+ apic, mp_ioapics[apic].mpc_apicid); -+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", -+ reg_00.bits.ID); -+ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID; -+ } -+ -+ /* -+ * Sanity check, is the ID really free? Every APIC in a -+ * system must have a unique ID or we get lots of nice -+ * 'stuck on smp_invalidate_needed IPI wait' messages. -+ */ -+ if (check_apicid_used(phys_id_present_map, -+ mp_ioapics[apic].mpc_apicid)) { -+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", -+ apic, mp_ioapics[apic].mpc_apicid); -+ for (i = 0; i < get_physical_broadcast(); i++) -+ if (!physid_isset(i, phys_id_present_map)) -+ break; -+ if (i >= get_physical_broadcast()) -+ panic("Max APIC ID exceeded!\n"); -+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", -+ i); -+ physid_set(i, phys_id_present_map); -+ mp_ioapics[apic].mpc_apicid = i; -+ } else { -+ physid_mask_t tmp; -+ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid); -+ apic_printk(APIC_VERBOSE, "Setting %d in the " -+ "phys_id_present_map\n", -+ mp_ioapics[apic].mpc_apicid); -+ physids_or(phys_id_present_map, phys_id_present_map, tmp); -+ } -+ -+ -+ /* -+ * We need to adjust the IRQ routing table -+ * if the ID changed. -+ */ -+ if (old_id != mp_ioapics[apic].mpc_apicid) -+ for (i = 0; i < mp_irq_entries; i++) -+ if (mp_irqs[i].mpc_dstapic == old_id) -+ mp_irqs[i].mpc_dstapic -+ = mp_ioapics[apic].mpc_apicid; -+ -+ /* -+ * Read the right value from the MPC table and -+ * write it into the ID register. -+ */ -+ apic_printk(APIC_VERBOSE, KERN_INFO -+ "...changing IO-APIC physical APIC ID to %d ...", -+ mp_ioapics[apic].mpc_apicid); -+ -+ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid; -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(apic, 0, reg_00.raw); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ /* -+ * Sanity check -+ */ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_00.raw = io_apic_read(apic, 0); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) -+ printk("could not set ID!\n"); -+ else -+ apic_printk(APIC_VERBOSE, " ok.\n"); -+ } -+} -+#else -+static void __init setup_ioapic_ids_from_mpc(void) { } -+#endif -+ -+#ifndef CONFIG_XEN -+/* -+ * There is a nasty bug in some older SMP boards, their mptable lies -+ * about the timer IRQ. We do the following to work around the situation: -+ * -+ * - timer IRQ defaults to IO-APIC IRQ -+ * - if this function detects that timer IRQs are defunct, then we fall -+ * back to ISA timer IRQs -+ */ -+static int __init timer_irq_works(void) -+{ -+ unsigned long t1 = jiffies; -+ -+ local_irq_enable(); -+ /* Let ten ticks pass... */ -+ mdelay((10 * 1000) / HZ); -+ -+ /* -+ * Expect a few ticks at least, to be sure some possible -+ * glue logic does not lock up after one or two first -+ * ticks in a non-ExtINT mode. Also the local APIC -+ * might have cached one ExtINT interrupt. Finally, at -+ * least one tick may be lost due to delays. -+ */ -+ if (jiffies - t1 > 4) -+ return 1; -+ -+ return 0; -+} -+ -+/* -+ * In the SMP+IOAPIC case it might happen that there are an unspecified -+ * number of pending IRQ events unhandled. These cases are very rare, -+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much -+ * better to do it this way as thus we do not have to be aware of -+ * 'pending' interrupts in the IRQ path, except at this point. -+ */ -+/* -+ * Edge triggered needs to resend any interrupt -+ * that was delayed but this is now handled in the device -+ * independent code. -+ */ -+ -+/* -+ * Starting up a edge-triggered IO-APIC interrupt is -+ * nasty - we need to make sure that we get the edge. -+ * If it is already asserted for some reason, we need -+ * return 1 to indicate that is was pending. -+ * -+ * This is not complete - we should be able to fake -+ * an edge even if it isn't on the 8259A... -+ */ -+static unsigned int startup_edge_ioapic_irq(unsigned int irq) -+{ -+ int was_pending = 0; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ if (irq < 16) { -+ disable_8259A_irq(irq); -+ if (i8259A_irq_pending(irq)) -+ was_pending = 1; -+ } -+ __unmask_IO_APIC_irq(irq); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ return was_pending; -+} -+ -+/* -+ * Once we have recorded IRQ_PENDING already, we can mask the -+ * interrupt for real. This prevents IRQ storms from unhandled -+ * devices. -+ */ -+static void ack_edge_ioapic_irq(unsigned int irq) -+{ -+ move_irq(irq); -+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) -+ == (IRQ_PENDING | IRQ_DISABLED)) -+ mask_IO_APIC_irq(irq); -+ ack_APIC_irq(); -+} -+ -+/* -+ * Level triggered interrupts can just be masked, -+ * and shutting down and starting up the interrupt -+ * is the same as enabling and disabling them -- except -+ * with a startup need to return a "was pending" value. -+ * -+ * Level triggered interrupts are special because we -+ * do not touch any IO-APIC register while handling -+ * them. We ack the APIC in the end-IRQ handler, not -+ * in the start-IRQ-handler. Protection against reentrance -+ * from the same interrupt is still provided, both by the -+ * generic IRQ layer and by the fact that an unacked local -+ * APIC does not accept IRQs. -+ */ -+static unsigned int startup_level_ioapic_irq (unsigned int irq) -+{ -+ unmask_IO_APIC_irq(irq); -+ -+ return 0; /* don't check for pending */ -+} -+ -+static void end_level_ioapic_irq (unsigned int irq) -+{ -+ unsigned long v; -+ int i; -+ -+ move_irq(irq); -+/* -+ * It appears there is an erratum which affects at least version 0x11 -+ * of I/O APIC (that's the 82093AA and cores integrated into various -+ * chipsets). Under certain conditions a level-triggered interrupt is -+ * erroneously delivered as edge-triggered one but the respective IRR -+ * bit gets set nevertheless. As a result the I/O unit expects an EOI -+ * message but it will never arrive and further interrupts are blocked -+ * from the source. The exact reason is so far unknown, but the -+ * phenomenon was observed when two consecutive interrupt requests -+ * from a given source get delivered to the same CPU and the source is -+ * temporarily disabled in between. -+ * -+ * A workaround is to simulate an EOI message manually. We achieve it -+ * by setting the trigger mode to edge and then to level when the edge -+ * trigger mode gets detected in the TMR of a local APIC for a -+ * level-triggered interrupt. We mask the source for the time of the -+ * operation to prevent an edge-triggered interrupt escaping meanwhile. -+ * The idea is from Manfred Spraul. --macro -+ */ -+ i = IO_APIC_VECTOR(irq); -+ -+ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); -+ -+ ack_APIC_irq(); -+ -+ if (!(v & (1 << (i & 0x1f)))) { -+ atomic_inc(&irq_mis_count); -+ spin_lock(&ioapic_lock); -+ __mask_and_edge_IO_APIC_irq(irq); -+ __unmask_and_level_IO_APIC_irq(irq); -+ spin_unlock(&ioapic_lock); -+ } -+} -+ -+#ifdef CONFIG_PCI_MSI -+static unsigned int startup_edge_ioapic_vector(unsigned int vector) -+{ -+ int irq = vector_to_irq(vector); -+ -+ return startup_edge_ioapic_irq(irq); -+} -+ -+static void ack_edge_ioapic_vector(unsigned int vector) -+{ -+ int irq = vector_to_irq(vector); -+ -+ move_native_irq(vector); -+ ack_edge_ioapic_irq(irq); -+} -+ -+static unsigned int startup_level_ioapic_vector (unsigned int vector) -+{ -+ int irq = vector_to_irq(vector); -+ -+ return startup_level_ioapic_irq (irq); -+} -+ -+static void end_level_ioapic_vector (unsigned int vector) -+{ -+ int irq = vector_to_irq(vector); -+ -+ move_native_irq(vector); -+ end_level_ioapic_irq(irq); -+} -+ -+static void mask_IO_APIC_vector (unsigned int vector) -+{ -+ int irq = vector_to_irq(vector); -+ -+ mask_IO_APIC_irq(irq); -+} -+ -+static void unmask_IO_APIC_vector (unsigned int vector) -+{ -+ int irq = vector_to_irq(vector); -+ -+ unmask_IO_APIC_irq(irq); -+} -+ -+#ifdef CONFIG_SMP -+static void set_ioapic_affinity_vector (unsigned int vector, -+ cpumask_t cpu_mask) -+{ -+ int irq = vector_to_irq(vector); -+ -+ set_native_irq_info(vector, cpu_mask); -+ set_ioapic_affinity_irq(irq, cpu_mask); -+} -+#endif -+#endif -+ -+static int ioapic_retrigger(unsigned int irq) -+{ -+ send_IPI_self(IO_APIC_VECTOR(irq)); -+ -+ return 1; -+} -+ -+/* -+ * Level and edge triggered IO-APIC interrupts need different handling, -+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be -+ * handled with the level-triggered descriptor, but that one has slightly -+ * more overhead. Level-triggered interrupts cannot be handled with the -+ * edge-triggered handler, without risking IRQ storms and other ugly -+ * races. -+ */ -+static struct hw_interrupt_type ioapic_edge_type __read_mostly = { -+ .typename = "IO-APIC-edge", -+ .startup = startup_edge_ioapic, -+ .shutdown = shutdown_edge_ioapic, -+ .enable = enable_edge_ioapic, -+ .disable = disable_edge_ioapic, -+ .ack = ack_edge_ioapic, -+ .end = end_edge_ioapic, -+#ifdef CONFIG_SMP -+ .set_affinity = set_ioapic_affinity, -+#endif -+ .retrigger = ioapic_retrigger, -+}; -+ -+static struct hw_interrupt_type ioapic_level_type __read_mostly = { -+ .typename = "IO-APIC-level", -+ .startup = startup_level_ioapic, -+ .shutdown = shutdown_level_ioapic, -+ .enable = enable_level_ioapic, -+ .disable = disable_level_ioapic, -+ .ack = mask_and_ack_level_ioapic, -+ .end = end_level_ioapic, -+#ifdef CONFIG_SMP -+ .set_affinity = set_ioapic_affinity, -+#endif -+ .retrigger = ioapic_retrigger, -+}; -+#endif /* !CONFIG_XEN */ -+ -+static inline void init_IO_APIC_traps(void) -+{ -+ int irq; -+ -+ /* -+ * NOTE! The local APIC isn't very good at handling -+ * multiple interrupts at the same interrupt level. -+ * As the interrupt level is determined by taking the -+ * vector number and shifting that right by 4, we -+ * want to spread these out a bit so that they don't -+ * all fall in the same interrupt level. -+ * -+ * Also, we've got to be careful not to trash gate -+ * 0x80, because int 0x80 is hm, kind of importantish. ;) -+ */ -+ for (irq = 0; irq < NR_IRQS ; irq++) { -+ int tmp = irq; -+ if (use_pci_vector()) { -+ if (!platform_legacy_irq(tmp)) -+ if ((tmp = vector_to_irq(tmp)) == -1) -+ continue; -+ } -+ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) { -+ /* -+ * Hmm.. We don't have an entry for this, -+ * so default to an old-fashioned 8259 -+ * interrupt if we can.. -+ */ -+ if (irq < 16) -+ make_8259A_irq(irq); -+#ifndef CONFIG_XEN -+ else -+ /* Strange. Oh, well.. */ -+ irq_desc[irq].chip = &no_irq_type; -+#endif -+ } -+ } -+} -+ -+#ifndef CONFIG_XEN -+static void enable_lapic_irq (unsigned int irq) -+{ -+ unsigned long v; -+ -+ v = apic_read(APIC_LVT0); -+ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED); -+} -+ -+static void disable_lapic_irq (unsigned int irq) -+{ -+ unsigned long v; -+ -+ v = apic_read(APIC_LVT0); -+ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED); -+} -+ -+static void ack_lapic_irq (unsigned int irq) -+{ -+ ack_APIC_irq(); -+} -+ -+static void end_lapic_irq (unsigned int i) { /* nothing */ } -+ -+static struct hw_interrupt_type lapic_irq_type __read_mostly = { -+ .typename = "local-APIC-edge", -+ .startup = NULL, /* startup_irq() not used for IRQ0 */ -+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ -+ .enable = enable_lapic_irq, -+ .disable = disable_lapic_irq, -+ .ack = ack_lapic_irq, -+ .end = end_lapic_irq -+}; -+ -+static void setup_nmi (void) -+{ -+ /* -+ * Dirty trick to enable the NMI watchdog ... -+ * We put the 8259A master into AEOI mode and -+ * unmask on all local APICs LVT0 as NMI. -+ * -+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') -+ * is from Maciej W. Rozycki - so we do not have to EOI from -+ * the NMI handler or the timer interrupt. -+ */ -+ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); -+ -+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1); -+ -+ apic_printk(APIC_VERBOSE, " done.\n"); -+} -+ -+/* -+ * This looks a bit hackish but it's about the only one way of sending -+ * a few INTA cycles to 8259As and any associated glue logic. ICR does -+ * not support the ExtINT mode, unfortunately. We need to send these -+ * cycles as some i82489DX-based boards have glue logic that keeps the -+ * 8259A interrupt line asserted until INTA. --macro -+ */ -+static inline void unlock_ExtINT_logic(void) -+{ -+ int apic, pin, i; -+ struct IO_APIC_route_entry entry0, entry1; -+ unsigned char save_control, save_freq_select; -+ unsigned long flags; -+ -+ pin = find_isa_irq_pin(8, mp_INT); -+ apic = find_isa_irq_apic(8, mp_INT); -+ if (pin == -1) -+ return; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin); -+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ clear_IO_APIC_pin(apic, pin); -+ -+ memset(&entry1, 0, sizeof(entry1)); -+ -+ entry1.dest_mode = 0; /* physical delivery */ -+ entry1.mask = 0; /* unmask IRQ now */ -+ entry1.dest.physical.physical_dest = hard_smp_processor_id(); -+ entry1.delivery_mode = dest_ExtINT; -+ entry1.polarity = entry0.polarity; -+ entry1.trigger = 0; -+ entry1.vector = 0; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); -+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ save_control = CMOS_READ(RTC_CONTROL); -+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); -+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, -+ RTC_FREQ_SELECT); -+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); -+ -+ i = 100; -+ while (i-- > 0) { -+ mdelay(10); -+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) -+ i -= 10; -+ } -+ -+ CMOS_WRITE(save_control, RTC_CONTROL); -+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); -+ clear_IO_APIC_pin(apic, pin); -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); -+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+} -+ -+int timer_uses_ioapic_pin_0; -+ -+/* -+ * This code may look a bit paranoid, but it's supposed to cooperate with -+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ -+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast -+ * fanatically on his truly buggy board. -+ */ -+static inline void check_timer(void) -+{ -+ int apic1, pin1, apic2, pin2; -+ int vector; -+ -+ /* -+ * get/set the timer IRQ vector: -+ */ -+ disable_8259A_irq(0); -+ vector = assign_irq_vector(0); -+ set_intr_gate(vector, interrupt[0]); -+ -+ /* -+ * Subtle, code in do_timer_interrupt() expects an AEOI -+ * mode for the 8259A whenever interrupts are routed -+ * through I/O APICs. Also IRQ0 has to be enabled in -+ * the 8259A which implies the virtual wire has to be -+ * disabled in the local APIC. -+ */ -+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); -+ init_8259A(1); -+ timer_ack = 1; -+ if (timer_over_8254 > 0) -+ enable_8259A_irq(0); -+ -+ pin1 = find_isa_irq_pin(0, mp_INT); -+ apic1 = find_isa_irq_apic(0, mp_INT); -+ pin2 = ioapic_i8259.pin; -+ apic2 = ioapic_i8259.apic; -+ -+ if (pin1 == 0) -+ timer_uses_ioapic_pin_0 = 1; -+ -+ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", -+ vector, apic1, pin1, apic2, pin2); -+ -+ if (pin1 != -1) { -+ /* -+ * Ok, does IRQ0 through the IOAPIC work? -+ */ -+ unmask_IO_APIC_irq(0); -+ if (timer_irq_works()) { -+ if (nmi_watchdog == NMI_IO_APIC) { -+ disable_8259A_irq(0); -+ setup_nmi(); -+ enable_8259A_irq(0); -+ } -+ if (disable_timer_pin_1 > 0) -+ clear_IO_APIC_pin(0, pin1); -+ return; -+ } -+ clear_IO_APIC_pin(apic1, pin1); -+ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " -+ "IO-APIC\n"); -+ } -+ -+ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); -+ if (pin2 != -1) { -+ printk("\n..... (found pin %d) ...", pin2); -+ /* -+ * legacy devices should be connected to IO APIC #0 -+ */ -+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector); -+ if (timer_irq_works()) { -+ printk("works.\n"); -+ if (pin1 != -1) -+ replace_pin_at_irq(0, apic1, pin1, apic2, pin2); -+ else -+ add_pin_to_irq(0, apic2, pin2); -+ if (nmi_watchdog == NMI_IO_APIC) { -+ setup_nmi(); -+ } -+ return; -+ } -+ /* -+ * Cleanup, just in case ... -+ */ -+ clear_IO_APIC_pin(apic2, pin2); -+ } -+ printk(" failed.\n"); -+ -+ if (nmi_watchdog == NMI_IO_APIC) { -+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); -+ nmi_watchdog = 0; -+ } -+ -+ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); -+ -+ disable_8259A_irq(0); -+ irq_desc[0].chip = &lapic_irq_type; -+ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ -+ enable_8259A_irq(0); -+ -+ if (timer_irq_works()) { -+ printk(" works.\n"); -+ return; -+ } -+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); -+ printk(" failed.\n"); -+ -+ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ..."); -+ -+ timer_ack = 0; -+ init_8259A(0); -+ make_8259A_irq(0); -+ apic_write_around(APIC_LVT0, APIC_DM_EXTINT); -+ -+ unlock_ExtINT_logic(); -+ -+ if (timer_irq_works()) { -+ printk(" works.\n"); -+ return; -+ } -+ printk(" failed :(.\n"); -+ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " -+ "report. Then try booting with the 'noapic' option"); -+} -+#else -+int timer_uses_ioapic_pin_0 = 0; -+#define check_timer() ((void)0) -+#endif -+ -+/* -+ * -+ * IRQ's that are handled by the PIC in the MPS IOAPIC case. -+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. -+ * Linux doesn't really care, as it's not actually used -+ * for any interrupt handling anyway. -+ */ -+#define PIC_IRQS (1 << PIC_CASCADE_IR) -+ -+void __init setup_IO_APIC(void) -+{ -+ enable_IO_APIC(); -+ -+ if (acpi_ioapic) -+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */ -+ else -+ io_apic_irqs = ~PIC_IRQS; -+ -+ printk("ENABLING IO-APIC IRQs\n"); -+ -+ /* -+ * Set up IO-APIC IRQ routing. -+ */ -+ if (!acpi_ioapic) -+ setup_ioapic_ids_from_mpc(); -+#ifndef CONFIG_XEN -+ sync_Arb_IDs(); -+#endif -+ setup_IO_APIC_irqs(); -+ init_IO_APIC_traps(); -+ check_timer(); -+ if (!acpi_ioapic) -+ print_IO_APIC(); -+} -+ -+static int __init setup_disable_8254_timer(char *s) -+{ -+ timer_over_8254 = -1; -+ return 1; -+} -+static int __init setup_enable_8254_timer(char *s) -+{ -+ timer_over_8254 = 2; -+ return 1; -+} -+ -+__setup("disable_8254_timer", setup_disable_8254_timer); -+__setup("enable_8254_timer", setup_enable_8254_timer); -+ -+/* -+ * Called after all the initialization is done. If we didnt find any -+ * APIC bugs then we can allow the modify fast path -+ */ -+ -+static int __init io_apic_bug_finalize(void) -+{ -+ if(sis_apic_bug == -1) -+ sis_apic_bug = 0; -+ if (is_initial_xendomain()) { -+ struct xen_platform_op op = { .cmd = XENPF_platform_quirk }; -+ op.u.platform_quirk.quirk_id = sis_apic_bug ? -+ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL; -+ VOID(HYPERVISOR_platform_op(&op)); -+ } -+ return 0; -+} -+ -+late_initcall(io_apic_bug_finalize); -+ -+#ifndef CONFIG_XEN -+ -+struct sysfs_ioapic_data { -+ struct sys_device dev; -+ struct IO_APIC_route_entry entry[0]; -+}; -+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; -+ -+static int ioapic_suspend(struct sys_device *dev, pm_message_t state) -+{ -+ struct IO_APIC_route_entry *entry; -+ struct sysfs_ioapic_data *data; -+ unsigned long flags; -+ int i; -+ -+ data = container_of(dev, struct sysfs_ioapic_data, dev); -+ entry = data->entry; -+ spin_lock_irqsave(&ioapic_lock, flags); -+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) { -+ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i); -+ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i); -+ } -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ return 0; -+} -+ -+static int ioapic_resume(struct sys_device *dev) -+{ -+ struct IO_APIC_route_entry *entry; -+ struct sysfs_ioapic_data *data; -+ unsigned long flags; -+ union IO_APIC_reg_00 reg_00; -+ int i; -+ -+ data = container_of(dev, struct sysfs_ioapic_data, dev); -+ entry = data->entry; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_00.raw = io_apic_read(dev->id, 0); -+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) { -+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid; -+ io_apic_write(dev->id, 0, reg_00.raw); -+ } -+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) { -+ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1)); -+ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0)); -+ } -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ return 0; -+} -+ -+static struct sysdev_class ioapic_sysdev_class = { -+ set_kset_name("ioapic"), -+ .suspend = ioapic_suspend, -+ .resume = ioapic_resume, -+}; -+ -+static int __init ioapic_init_sysfs(void) -+{ -+ struct sys_device * dev; -+ int i, size, error = 0; -+ -+ error = sysdev_class_register(&ioapic_sysdev_class); -+ if (error) -+ return error; -+ -+ for (i = 0; i < nr_ioapics; i++ ) { -+ size = sizeof(struct sys_device) + nr_ioapic_registers[i] -+ * sizeof(struct IO_APIC_route_entry); -+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); -+ if (!mp_ioapic_data[i]) { -+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); -+ continue; -+ } -+ memset(mp_ioapic_data[i], 0, size); -+ dev = &mp_ioapic_data[i]->dev; -+ dev->id = i; -+ dev->cls = &ioapic_sysdev_class; -+ error = sysdev_register(dev); -+ if (error) { -+ kfree(mp_ioapic_data[i]); -+ mp_ioapic_data[i] = NULL; -+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); -+ continue; -+ } -+ } -+ -+ return 0; -+} -+ -+device_initcall(ioapic_init_sysfs); -+ -+#endif /* CONFIG_XEN */ -+ -+/* -------------------------------------------------------------------------- -+ ACPI-based IOAPIC Configuration -+ -------------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_ACPI -+ -+int __init io_apic_get_unique_id (int ioapic, int apic_id) -+{ -+#ifndef CONFIG_XEN -+ union IO_APIC_reg_00 reg_00; -+ static physid_mask_t apic_id_map = PHYSID_MASK_NONE; -+ physid_mask_t tmp; -+ unsigned long flags; -+ int i = 0; -+ -+ /* -+ * The P4 platform supports up to 256 APIC IDs on two separate APIC -+ * buses (one for LAPICs, one for IOAPICs), where predecessors only -+ * supports up to 16 on one shared APIC bus. -+ * -+ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full -+ * advantage of new APIC bus architecture. -+ */ -+ -+ if (physids_empty(apic_id_map)) -+ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_00.raw = io_apic_read(ioapic, 0); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ if (apic_id >= get_physical_broadcast()) { -+ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " -+ "%d\n", ioapic, apic_id, reg_00.bits.ID); -+ apic_id = reg_00.bits.ID; -+ } -+ -+ /* -+ * Every APIC in a system must have a unique ID or we get lots of nice -+ * 'stuck on smp_invalidate_needed IPI wait' messages. -+ */ -+ if (check_apicid_used(apic_id_map, apic_id)) { -+ -+ for (i = 0; i < get_physical_broadcast(); i++) { -+ if (!check_apicid_used(apic_id_map, i)) -+ break; -+ } -+ -+ if (i == get_physical_broadcast()) -+ panic("Max apic_id exceeded!\n"); -+ -+ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " -+ "trying %d\n", ioapic, apic_id, i); -+ -+ apic_id = i; -+ } -+ -+ tmp = apicid_to_cpu_present(apic_id); -+ physids_or(apic_id_map, apic_id_map, tmp); -+ -+ if (reg_00.bits.ID != apic_id) { -+ reg_00.bits.ID = apic_id; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(ioapic, 0, reg_00.raw); -+ reg_00.raw = io_apic_read(ioapic, 0); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ /* Sanity check */ -+ if (reg_00.bits.ID != apic_id) { -+ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); -+ return -1; -+ } -+ } -+ -+ apic_printk(APIC_VERBOSE, KERN_INFO -+ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); -+#endif /* !CONFIG_XEN */ -+ -+ return apic_id; -+} -+ -+ -+int __init io_apic_get_version (int ioapic) -+{ -+ union IO_APIC_reg_01 reg_01; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_01.raw = io_apic_read(ioapic, 1); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ return reg_01.bits.version; -+} -+ -+ -+int __init io_apic_get_redir_entries (int ioapic) -+{ -+ union IO_APIC_reg_01 reg_01; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ reg_01.raw = io_apic_read(ioapic, 1); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ return reg_01.bits.entries; -+} -+ -+ -+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) -+{ -+ struct IO_APIC_route_entry entry; -+ unsigned long flags; -+ -+ if (!IO_APIC_IRQ(irq)) { -+ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", -+ ioapic); -+ return -EINVAL; -+ } -+ -+ /* -+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. -+ * Note that we mask (disable) IRQs now -- these get enabled when the -+ * corresponding device driver registers for this IRQ. -+ */ -+ -+ memset(&entry,0,sizeof(entry)); -+ -+ entry.delivery_mode = INT_DELIVERY_MODE; -+ entry.dest_mode = INT_DEST_MODE; -+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); -+ entry.trigger = edge_level; -+ entry.polarity = active_high_low; -+ entry.mask = 1; -+ -+ /* -+ * IRQs < 16 are already in the irq_2_pin[] map -+ */ -+ if (irq >= 16) -+ add_pin_to_irq(irq, ioapic, pin); -+ -+ entry.vector = assign_irq_vector(irq); -+ -+ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry " -+ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic, -+ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, -+ edge_level, active_high_low); -+ -+ ioapic_register_intr(irq, entry.vector, edge_level); -+ -+ if (!ioapic && (irq < 16)) -+ disable_8259A_irq(irq); -+ -+ spin_lock_irqsave(&ioapic_lock, flags); -+ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); -+ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); -+ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS); -+ spin_unlock_irqrestore(&ioapic_lock, flags); -+ -+ return 0; -+} -+ -+#endif /* CONFIG_ACPI */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/ioport_32-xen.c 2008-01-28 12:24:19.000000000 +0100 -@@ -0,0 +1,123 @@ -+/* -+ * linux/arch/i386/kernel/ioport.c -+ * -+ * This contains the io-permission bitmap code - written by obz, with changes -+ * by Linus. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ -+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value) -+{ -+ unsigned long mask; -+ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG); -+ unsigned int low_index = base & (BITS_PER_LONG-1); -+ int length = low_index + extent; -+ -+ if (low_index != 0) { -+ mask = (~0UL << low_index); -+ if (length < BITS_PER_LONG) -+ mask &= ~(~0UL << length); -+ if (new_value) -+ *bitmap_base++ |= mask; -+ else -+ *bitmap_base++ &= ~mask; -+ length -= BITS_PER_LONG; -+ } -+ -+ mask = (new_value ? ~0UL : 0UL); -+ while (length >= BITS_PER_LONG) { -+ *bitmap_base++ = mask; -+ length -= BITS_PER_LONG; -+ } -+ -+ if (length > 0) { -+ mask = ~(~0UL << length); -+ if (new_value) -+ *bitmap_base++ |= mask; -+ else -+ *bitmap_base++ &= ~mask; -+ } -+} -+ -+ -+/* -+ * this changes the io permissions bitmap in the current task. -+ */ -+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) -+{ -+ struct thread_struct * t = ¤t->thread; -+ unsigned long *bitmap; -+ struct physdev_set_iobitmap set_iobitmap; -+ -+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) -+ return -EINVAL; -+ if (turn_on && !capable(CAP_SYS_RAWIO)) -+ return -EPERM; -+ -+ /* -+ * If it's the first ioperm() call in this thread's lifetime, set the -+ * IO bitmap up. ioperm() is much less timing critical than clone(), -+ * this is why we delay this operation until now: -+ */ -+ if (!t->io_bitmap_ptr) { -+ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); -+ if (!bitmap) -+ return -ENOMEM; -+ -+ memset(bitmap, 0xff, IO_BITMAP_BYTES); -+ t->io_bitmap_ptr = bitmap; -+ set_thread_flag(TIF_IO_BITMAP); -+ -+ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap); -+ set_iobitmap.nr_ports = IO_BITMAP_BITS; -+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, -+ &set_iobitmap)); -+ } -+ -+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); -+ -+ return 0; -+} -+ -+/* -+ * sys_iopl has to be used when you want to access the IO ports -+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped -+ * you'd need 8kB of bitmaps/process, which is a bit excessive. -+ * -+ * Here we just change the eflags value on the stack: we allow -+ * only the super-user to do it. This depends on the stack-layout -+ * on system-call entry - see also fork() and the signal handling -+ * code. -+ */ -+ -+asmlinkage long sys_iopl(unsigned long unused) -+{ -+ volatile struct pt_regs * regs = (struct pt_regs *) &unused; -+ unsigned int level = regs->ebx; -+ struct thread_struct *t = ¤t->thread; -+ unsigned int old = (t->iopl >> 12) & 3; -+ -+ if (level > 3) -+ return -EINVAL; -+ /* Trying to gain more privileges? */ -+ if (level > old) { -+ if (!capable(CAP_SYS_RAWIO)) -+ return -EPERM; -+ } -+ t->iopl = level << 12; -+ set_iopl_mask(t->iopl); -+ return 0; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/ldt_32-xen.c 2007-06-12 13:12:48.000000000 +0200 -@@ -0,0 +1,270 @@ -+/* -+ * linux/kernel/ldt.c -+ * -+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds -+ * Copyright (C) 1999 Ingo Molnar -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ -+static void flush_ldt(void *null) -+{ -+ if (current->active_mm) -+ load_LDT(¤t->active_mm->context); -+} -+#endif -+ -+static int alloc_ldt(mm_context_t *pc, int mincount, int reload) -+{ -+ void *oldldt; -+ void *newldt; -+ int oldsize; -+ -+ if (mincount <= pc->size) -+ return 0; -+ oldsize = pc->size; -+ mincount = (mincount+511)&(~511); -+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) -+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE); -+ else -+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL); -+ -+ if (!newldt) -+ return -ENOMEM; -+ -+ if (oldsize) -+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); -+ oldldt = pc->ldt; -+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); -+ pc->ldt = newldt; -+ wmb(); -+ pc->size = mincount; -+ wmb(); -+ -+ if (reload) { -+#ifdef CONFIG_SMP -+ cpumask_t mask; -+ preempt_disable(); -+#endif -+ make_pages_readonly( -+ pc->ldt, -+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE, -+ XENFEAT_writable_descriptor_tables); -+ load_LDT(pc); -+#ifdef CONFIG_SMP -+ mask = cpumask_of_cpu(smp_processor_id()); -+ if (!cpus_equal(current->mm->cpu_vm_mask, mask)) -+ smp_call_function(flush_ldt, NULL, 1, 1); -+ preempt_enable(); -+#endif -+ } -+ if (oldsize) { -+ make_pages_writable( -+ oldldt, -+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE, -+ XENFEAT_writable_descriptor_tables); -+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE) -+ vfree(oldldt); -+ else -+ kfree(oldldt); -+ } -+ return 0; -+} -+ -+static inline int copy_ldt(mm_context_t *new, mm_context_t *old) -+{ -+ int err = alloc_ldt(new, old->size, 0); -+ if (err < 0) -+ return err; -+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); -+ make_pages_readonly( -+ new->ldt, -+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE, -+ XENFEAT_writable_descriptor_tables); -+ return 0; -+} -+ -+/* -+ * we do not have to muck with descriptors here, that is -+ * done in switch_mm() as needed. -+ */ -+int init_new_context(struct task_struct *tsk, struct mm_struct *mm) -+{ -+ struct mm_struct * old_mm; -+ int retval = 0; -+ -+ init_MUTEX(&mm->context.sem); -+ mm->context.size = 0; -+ mm->context.has_foreign_mappings = 0; -+ old_mm = current->mm; -+ if (old_mm && old_mm->context.size > 0) { -+ down(&old_mm->context.sem); -+ retval = copy_ldt(&mm->context, &old_mm->context); -+ up(&old_mm->context.sem); -+ } -+ return retval; -+} -+ -+/* -+ * No need to lock the MM as we are the last user -+ */ -+void destroy_context(struct mm_struct *mm) -+{ -+ if (mm->context.size) { -+ if (mm == current->active_mm) -+ clear_LDT(); -+ make_pages_writable( -+ mm->context.ldt, -+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE, -+ XENFEAT_writable_descriptor_tables); -+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) -+ vfree(mm->context.ldt); -+ else -+ kfree(mm->context.ldt); -+ mm->context.size = 0; -+ } -+} -+ -+static int read_ldt(void __user * ptr, unsigned long bytecount) -+{ -+ int err; -+ unsigned long size; -+ struct mm_struct * mm = current->mm; -+ -+ if (!mm->context.size) -+ return 0; -+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) -+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; -+ -+ down(&mm->context.sem); -+ size = mm->context.size*LDT_ENTRY_SIZE; -+ if (size > bytecount) -+ size = bytecount; -+ -+ err = 0; -+ if (copy_to_user(ptr, mm->context.ldt, size)) -+ err = -EFAULT; -+ up(&mm->context.sem); -+ if (err < 0) -+ goto error_return; -+ if (size != bytecount) { -+ /* zero-fill the rest */ -+ if (clear_user(ptr+size, bytecount-size) != 0) { -+ err = -EFAULT; -+ goto error_return; -+ } -+ } -+ return bytecount; -+error_return: -+ return err; -+} -+ -+static int read_default_ldt(void __user * ptr, unsigned long bytecount) -+{ -+ int err; -+ unsigned long size; -+ void *address; -+ -+ err = 0; -+ address = &default_ldt[0]; -+ size = 5*sizeof(struct desc_struct); -+ if (size > bytecount) -+ size = bytecount; -+ -+ err = size; -+ if (copy_to_user(ptr, address, size)) -+ err = -EFAULT; -+ -+ return err; -+} -+ -+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode) -+{ -+ struct mm_struct * mm = current->mm; -+ __u32 entry_1, entry_2; -+ int error; -+ struct user_desc ldt_info; -+ -+ error = -EINVAL; -+ if (bytecount != sizeof(ldt_info)) -+ goto out; -+ error = -EFAULT; -+ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) -+ goto out; -+ -+ error = -EINVAL; -+ if (ldt_info.entry_number >= LDT_ENTRIES) -+ goto out; -+ if (ldt_info.contents == 3) { -+ if (oldmode) -+ goto out; -+ if (ldt_info.seg_not_present == 0) -+ goto out; -+ } -+ -+ down(&mm->context.sem); -+ if (ldt_info.entry_number >= mm->context.size) { -+ error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); -+ if (error < 0) -+ goto out_unlock; -+ } -+ -+ /* Allow LDTs to be cleared by the user. */ -+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { -+ if (oldmode || LDT_empty(&ldt_info)) { -+ entry_1 = 0; -+ entry_2 = 0; -+ goto install; -+ } -+ } -+ -+ entry_1 = LDT_entry_a(&ldt_info); -+ entry_2 = LDT_entry_b(&ldt_info); -+ if (oldmode) -+ entry_2 &= ~(1 << 20); -+ -+ /* Install the new entry ... */ -+install: -+ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number, -+ entry_1, entry_2); -+ -+out_unlock: -+ up(&mm->context.sem); -+out: -+ return error; -+} -+ -+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) -+{ -+ int ret = -ENOSYS; -+ -+ switch (func) { -+ case 0: -+ ret = read_ldt(ptr, bytecount); -+ break; -+ case 1: -+ ret = write_ldt(ptr, bytecount, 1); -+ break; -+ case 2: -+ ret = read_default_ldt(ptr, bytecount); -+ break; -+ case 0x11: -+ ret = write_ldt(ptr, bytecount, 0); -+ break; -+ } -+ return ret; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/microcode-xen.c 2007-06-12 13:12:48.000000000 +0200 -@@ -0,0 +1,144 @@ -+/* -+ * Intel CPU Microcode Update Driver for Linux -+ * -+ * Copyright (C) 2000-2004 Tigran Aivazian -+ * -+ * This driver allows to upgrade microcode on Intel processors -+ * belonging to IA-32 family - PentiumPro, Pentium II, -+ * Pentium III, Xeon, Pentium 4, etc. -+ * -+ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual, -+ * Order Number 245472 or free download from: -+ * -+ * http://developer.intel.com/design/pentium4/manuals/245472.htm -+ * -+ * For more information, go to http://www.urbanmyth.org/microcode -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version -+ * 2 of the License, or (at your option) any later version. -+ */ -+ -+//#define DEBUG /* pr_debug */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver"); -+MODULE_AUTHOR("Tigran Aivazian "); -+MODULE_LICENSE("GPL"); -+ -+static int verbose; -+module_param(verbose, int, 0644); -+ -+#define MICROCODE_VERSION "1.14a-xen" -+ -+#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */ -+#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */ -+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */ -+ -+/* no concurrent ->write()s are allowed on /dev/cpu/microcode */ -+static DEFINE_MUTEX(microcode_mutex); -+ -+static int microcode_open (struct inode *unused1, struct file *unused2) -+{ -+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; -+} -+ -+ -+static int do_microcode_update (const void __user *ubuf, size_t len) -+{ -+ int err; -+ void *kbuf; -+ -+ kbuf = vmalloc(len); -+ if (!kbuf) -+ return -ENOMEM; -+ -+ if (copy_from_user(kbuf, ubuf, len) == 0) { -+ struct xen_platform_op op; -+ -+ op.cmd = XENPF_microcode_update; -+ set_xen_guest_handle(op.u.microcode.data, kbuf); -+ op.u.microcode.length = len; -+ err = HYPERVISOR_platform_op(&op); -+ } else -+ err = -EFAULT; -+ -+ vfree(kbuf); -+ -+ return err; -+} -+ -+static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos) -+{ -+ ssize_t ret; -+ -+ if (len < MC_HEADER_SIZE) { -+ printk(KERN_ERR "microcode: not enough data\n"); -+ return -EINVAL; -+ } -+ -+ mutex_lock(µcode_mutex); -+ -+ ret = do_microcode_update(buf, len); -+ if (!ret) -+ ret = (ssize_t)len; -+ -+ mutex_unlock(µcode_mutex); -+ -+ return ret; -+} -+ -+static struct file_operations microcode_fops = { -+ .owner = THIS_MODULE, -+ .write = microcode_write, -+ .open = microcode_open, -+}; -+ -+static struct miscdevice microcode_dev = { -+ .minor = MICROCODE_MINOR, -+ .name = "microcode", -+ .fops = µcode_fops, -+}; -+ -+static int __init microcode_init (void) -+{ -+ int error; -+ -+ error = misc_register(µcode_dev); -+ if (error) { -+ printk(KERN_ERR -+ "microcode: can't misc_register on minor=%d\n", -+ MICROCODE_MINOR); -+ return error; -+ } -+ -+ printk(KERN_INFO -+ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " \n"); -+ return 0; -+} -+ -+static void __exit microcode_exit (void) -+{ -+ misc_deregister(µcode_dev); -+} -+ -+module_init(microcode_init) -+module_exit(microcode_exit) -+MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/mpparse_32-xen.c 2007-06-12 13:12:48.000000000 +0200 -@@ -0,0 +1,1185 @@ -+/* -+ * Intel Multiprocessor Specification 1.1 and 1.4 -+ * compliant MP-table parsing routines. -+ * -+ * (c) 1995 Alan Cox, Building #3 -+ * (c) 1998, 1999, 2000 Ingo Molnar -+ * -+ * Fixes -+ * Erich Boleyn : MP v1.4 and additional changes. -+ * Alan Cox : Added EBDA scanning -+ * Ingo Molnar : various cleanups and rewrites -+ * Maciej W. Rozycki: Bits for default MP configurations -+ * Paul Diefenbaugh: Added full ACPI support -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+/* Have we found an MP table */ -+int smp_found_config; -+unsigned int __initdata maxcpus = NR_CPUS; -+ -+/* -+ * Various Linux-internal data structures created from the -+ * MP-table. -+ */ -+int apic_version [MAX_APICS]; -+int mp_bus_id_to_type [MAX_MP_BUSSES]; -+int mp_bus_id_to_node [MAX_MP_BUSSES]; -+int mp_bus_id_to_local [MAX_MP_BUSSES]; -+int quad_local_to_mp_bus_id [NR_CPUS/4][4]; -+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; -+static int mp_current_pci_id; -+ -+/* I/O APIC entries */ -+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; -+ -+/* # of MP IRQ source entries */ -+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; -+ -+/* MP IRQ source entries */ -+int mp_irq_entries; -+ -+int nr_ioapics; -+ -+int pic_mode; -+unsigned long mp_lapic_addr; -+ -+unsigned int def_to_bigsmp = 0; -+ -+/* Processor that is doing the boot up */ -+unsigned int boot_cpu_physical_apicid = -1U; -+/* Internal processor count */ -+static unsigned int __devinitdata num_processors; -+ -+/* Bitmask of physically existing CPUs */ -+physid_mask_t phys_cpu_present_map; -+ -+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; -+ -+/* -+ * Intel MP BIOS table parsing routines: -+ */ -+ -+ -+/* -+ * Checksum an MP configuration block. -+ */ -+ -+static int __init mpf_checksum(unsigned char *mp, int len) -+{ -+ int sum = 0; -+ -+ while (len--) -+ sum += *mp++; -+ -+ return sum & 0xFF; -+} -+ -+/* -+ * Have to match translation table entries to main table entries by counter -+ * hence the mpc_record variable .... can't see a less disgusting way of -+ * doing this .... -+ */ -+ -+static int mpc_record; -+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata; -+ -+#ifndef CONFIG_XEN -+static void __devinit MP_processor_info (struct mpc_config_processor *m) -+{ -+ int ver, apicid; -+ physid_mask_t phys_cpu; -+ -+ if (!(m->mpc_cpuflag & CPU_ENABLED)) -+ return; -+ -+ apicid = mpc_apic_id(m, translation_table[mpc_record]); -+ -+ if (m->mpc_featureflag&(1<<0)) -+ Dprintk(" Floating point unit present.\n"); -+ if (m->mpc_featureflag&(1<<7)) -+ Dprintk(" Machine Exception supported.\n"); -+ if (m->mpc_featureflag&(1<<8)) -+ Dprintk(" 64 bit compare & exchange supported.\n"); -+ if (m->mpc_featureflag&(1<<9)) -+ Dprintk(" Internal APIC present.\n"); -+ if (m->mpc_featureflag&(1<<11)) -+ Dprintk(" SEP present.\n"); -+ if (m->mpc_featureflag&(1<<12)) -+ Dprintk(" MTRR present.\n"); -+ if (m->mpc_featureflag&(1<<13)) -+ Dprintk(" PGE present.\n"); -+ if (m->mpc_featureflag&(1<<14)) -+ Dprintk(" MCA present.\n"); -+ if (m->mpc_featureflag&(1<<15)) -+ Dprintk(" CMOV present.\n"); -+ if (m->mpc_featureflag&(1<<16)) -+ Dprintk(" PAT present.\n"); -+ if (m->mpc_featureflag&(1<<17)) -+ Dprintk(" PSE present.\n"); -+ if (m->mpc_featureflag&(1<<18)) -+ Dprintk(" PSN present.\n"); -+ if (m->mpc_featureflag&(1<<19)) -+ Dprintk(" Cache Line Flush Instruction present.\n"); -+ /* 20 Reserved */ -+ if (m->mpc_featureflag&(1<<21)) -+ Dprintk(" Debug Trace and EMON Store present.\n"); -+ if (m->mpc_featureflag&(1<<22)) -+ Dprintk(" ACPI Thermal Throttle Registers present.\n"); -+ if (m->mpc_featureflag&(1<<23)) -+ Dprintk(" MMX present.\n"); -+ if (m->mpc_featureflag&(1<<24)) -+ Dprintk(" FXSR present.\n"); -+ if (m->mpc_featureflag&(1<<25)) -+ Dprintk(" XMM present.\n"); -+ if (m->mpc_featureflag&(1<<26)) -+ Dprintk(" Willamette New Instructions present.\n"); -+ if (m->mpc_featureflag&(1<<27)) -+ Dprintk(" Self Snoop present.\n"); -+ if (m->mpc_featureflag&(1<<28)) -+ Dprintk(" HT present.\n"); -+ if (m->mpc_featureflag&(1<<29)) -+ Dprintk(" Thermal Monitor present.\n"); -+ /* 30, 31 Reserved */ -+ -+ -+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { -+ Dprintk(" Bootup CPU\n"); -+ boot_cpu_physical_apicid = m->mpc_apicid; -+ } -+ -+ ver = m->mpc_apicver; -+ -+ /* -+ * Validate version -+ */ -+ if (ver == 0x0) { -+ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " -+ "fixing up to 0x10. (tell your hw vendor)\n", -+ m->mpc_apicid); -+ ver = 0x10; -+ } -+ apic_version[m->mpc_apicid] = ver; -+ -+ phys_cpu = apicid_to_cpu_present(apicid); -+ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); -+ -+ if (num_processors >= NR_CPUS) { -+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." -+ " Processor ignored.\n", NR_CPUS); -+ return; -+ } -+ -+ if (num_processors >= maxcpus) { -+ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." -+ " Processor ignored.\n", maxcpus); -+ return; -+ } -+ -+ cpu_set(num_processors, cpu_possible_map); -+ num_processors++; -+ -+ /* -+ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y -+ * but we need to work other dependencies like SMP_SUSPEND etc -+ * before this can be done without some confusion. -+ * if (CPU_HOTPLUG_ENABLED || num_processors > 8) -+ * - Ashok Raj -+ */ -+ if (num_processors > 8) { -+ switch (boot_cpu_data.x86_vendor) { -+ case X86_VENDOR_INTEL: -+ if (!APIC_XAPIC(ver)) { -+ def_to_bigsmp = 0; -+ break; -+ } -+ /* If P4 and above fall through */ -+ case X86_VENDOR_AMD: -+ def_to_bigsmp = 1; -+ } -+ } -+ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; -+} -+#else -+void __init MP_processor_info (struct mpc_config_processor *m) -+{ -+ num_processors++; -+} -+#endif /* CONFIG_XEN */ -+ -+static void __init MP_bus_info (struct mpc_config_bus *m) -+{ -+ char str[7]; -+ -+ memcpy(str, m->mpc_bustype, 6); -+ str[6] = 0; -+ -+ mpc_oem_bus_info(m, str, translation_table[mpc_record]); -+ -+ if (m->mpc_busid >= MAX_MP_BUSSES) { -+ printk(KERN_WARNING "MP table busid value (%d) for bustype %s " -+ " is too large, max. supported is %d\n", -+ m->mpc_busid, str, MAX_MP_BUSSES - 1); -+ return; -+ } -+ -+ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { -+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; -+ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { -+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; -+ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { -+ mpc_oem_pci_bus(m, translation_table[mpc_record]); -+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; -+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; -+ mp_current_pci_id++; -+ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { -+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; -+ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) { -+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98; -+ } else { -+ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); -+ } -+} -+ -+static void __init MP_ioapic_info (struct mpc_config_ioapic *m) -+{ -+ if (!(m->mpc_flags & MPC_APIC_USABLE)) -+ return; -+ -+ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n", -+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); -+ if (nr_ioapics >= MAX_IO_APICS) { -+ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", -+ MAX_IO_APICS, nr_ioapics); -+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n"); -+ } -+ if (!m->mpc_apicaddr) { -+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address" -+ " found in MP table, skipping!\n"); -+ return; -+ } -+ mp_ioapics[nr_ioapics] = *m; -+ nr_ioapics++; -+} -+ -+static void __init MP_intsrc_info (struct mpc_config_intsrc *m) -+{ -+ mp_irqs [mp_irq_entries] = *m; -+ Dprintk("Int: type %d, pol %d, trig %d, bus %d," -+ " IRQ %02x, APIC ID %x, APIC INT %02x\n", -+ m->mpc_irqtype, m->mpc_irqflag & 3, -+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, -+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); -+ if (++mp_irq_entries == MAX_IRQ_SOURCES) -+ panic("Max # of irq sources exceeded!!\n"); -+} -+ -+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) -+{ -+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d," -+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n", -+ m->mpc_irqtype, m->mpc_irqflag & 3, -+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, -+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); -+ /* -+ * Well it seems all SMP boards in existence -+ * use ExtINT/LVT1 == LINT0 and -+ * NMI/LVT2 == LINT1 - the following check -+ * will show us if this assumptions is false. -+ * Until then we do not have to add baggage. -+ */ -+ if ((m->mpc_irqtype == mp_ExtINT) && -+ (m->mpc_destapiclint != 0)) -+ BUG(); -+ if ((m->mpc_irqtype == mp_NMI) && -+ (m->mpc_destapiclint != 1)) -+ BUG(); -+} -+ -+#ifdef CONFIG_X86_NUMAQ -+static void __init MP_translation_info (struct mpc_config_translation *m) -+{ -+ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); -+ -+ if (mpc_record >= MAX_MPC_ENTRY) -+ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); -+ else -+ translation_table[mpc_record] = m; /* stash this for later */ -+ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) -+ node_set_online(m->trans_quad); -+} -+ -+/* -+ * Read/parse the MPC oem tables -+ */ -+ -+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \ -+ unsigned short oemsize) -+{ -+ int count = sizeof (*oemtable); /* the header size */ -+ unsigned char *oemptr = ((unsigned char *)oemtable)+count; -+ -+ mpc_record = 0; -+ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); -+ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) -+ { -+ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", -+ oemtable->oem_signature[0], -+ oemtable->oem_signature[1], -+ oemtable->oem_signature[2], -+ oemtable->oem_signature[3]); -+ return; -+ } -+ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length)) -+ { -+ printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); -+ return; -+ } -+ while (count < oemtable->oem_length) { -+ switch (*oemptr) { -+ case MP_TRANSLATION: -+ { -+ struct mpc_config_translation *m= -+ (struct mpc_config_translation *)oemptr; -+ MP_translation_info(m); -+ oemptr += sizeof(*m); -+ count += sizeof(*m); -+ ++mpc_record; -+ break; -+ } -+ default: -+ { -+ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr); -+ return; -+ } -+ } -+ } -+} -+ -+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, -+ char *productid) -+{ -+ if (strncmp(oem, "IBM NUMA", 8)) -+ printk("Warning! May not be a NUMA-Q system!\n"); -+ if (mpc->mpc_oemptr) -+ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, -+ mpc->mpc_oemsize); -+} -+#endif /* CONFIG_X86_NUMAQ */ -+ -+/* -+ * Read/parse the MPC -+ */ -+ -+static int __init smp_read_mpc(struct mp_config_table *mpc) -+{ -+ char str[16]; -+ char oem[10]; -+ int count=sizeof(*mpc); -+ unsigned char *mpt=((unsigned char *)mpc)+count; -+ -+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { -+ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", -+ *(u32 *)mpc->mpc_signature); -+ return 0; -+ } -+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { -+ printk(KERN_ERR "SMP mptable: checksum error!\n"); -+ return 0; -+ } -+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { -+ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", -+ mpc->mpc_spec); -+ return 0; -+ } -+ if (!mpc->mpc_lapic) { -+ printk(KERN_ERR "SMP mptable: null local APIC address!\n"); -+ return 0; -+ } -+ memcpy(oem,mpc->mpc_oem,8); -+ oem[8]=0; -+ printk(KERN_INFO "OEM ID: %s ",oem); -+ -+ memcpy(str,mpc->mpc_productid,12); -+ str[12]=0; -+ printk("Product ID: %s ",str); -+ -+ mps_oem_check(mpc, oem, str); -+ -+ printk("APIC at: 0x%lX\n",mpc->mpc_lapic); -+ -+ /* -+ * Save the local APIC address (it might be non-default) -- but only -+ * if we're not using ACPI. -+ */ -+ if (!acpi_lapic) -+ mp_lapic_addr = mpc->mpc_lapic; -+ -+ /* -+ * Now process the configuration blocks. -+ */ -+ mpc_record = 0; -+ while (count < mpc->mpc_length) { -+ switch(*mpt) { -+ case MP_PROCESSOR: -+ { -+ struct mpc_config_processor *m= -+ (struct mpc_config_processor *)mpt; -+ /* ACPI may have already provided this data */ -+ if (!acpi_lapic) -+ MP_processor_info(m); -+ mpt += sizeof(*m); -+ count += sizeof(*m); -+ break; -+ } -+ case MP_BUS: -+ { -+ struct mpc_config_bus *m= -+ (struct mpc_config_bus *)mpt; -+ MP_bus_info(m); -+ mpt += sizeof(*m); -+ count += sizeof(*m); -+ break; -+ } -+ case MP_IOAPIC: -+ { -+ struct mpc_config_ioapic *m= -+ (struct mpc_config_ioapic *)mpt; -+ MP_ioapic_info(m); -+ mpt+=sizeof(*m); -+ count+=sizeof(*m); -+ break; -+ } -+ case MP_INTSRC: -+ { -+ struct mpc_config_intsrc *m= -+ (struct mpc_config_intsrc *)mpt; -+ -+ MP_intsrc_info(m); -+ mpt+=sizeof(*m); -+ count+=sizeof(*m); -+ break; -+ } -+ case MP_LINTSRC: -+ { -+ struct mpc_config_lintsrc *m= -+ (struct mpc_config_lintsrc *)mpt; -+ MP_lintsrc_info(m); -+ mpt+=sizeof(*m); -+ count+=sizeof(*m); -+ break; -+ } -+ default: -+ { -+ count = mpc->mpc_length; -+ break; -+ } -+ } -+ ++mpc_record; -+ } -+ clustered_apic_check(); -+ if (!num_processors) -+ printk(KERN_ERR "SMP mptable: no processors registered!\n"); -+ return num_processors; -+} -+ -+static int __init ELCR_trigger(unsigned int irq) -+{ -+ unsigned int port; -+ -+ port = 0x4d0 + (irq >> 3); -+ return (inb(port) >> (irq & 7)) & 1; -+} -+ -+static void __init construct_default_ioirq_mptable(int mpc_default_type) -+{ -+ struct mpc_config_intsrc intsrc; -+ int i; -+ int ELCR_fallback = 0; -+ -+ intsrc.mpc_type = MP_INTSRC; -+ intsrc.mpc_irqflag = 0; /* conforming */ -+ intsrc.mpc_srcbus = 0; -+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; -+ -+ intsrc.mpc_irqtype = mp_INT; -+ -+ /* -+ * If true, we have an ISA/PCI system with no IRQ entries -+ * in the MP table. To prevent the PCI interrupts from being set up -+ * incorrectly, we try to use the ELCR. The sanity check to see if -+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can -+ * never be level sensitive, so we simply see if the ELCR agrees. -+ * If it does, we assume it's valid. -+ */ -+ if (mpc_default_type == 5) { -+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); -+ -+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) -+ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); -+ else { -+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); -+ ELCR_fallback = 1; -+ } -+ } -+ -+ for (i = 0; i < 16; i++) { -+ switch (mpc_default_type) { -+ case 2: -+ if (i == 0 || i == 13) -+ continue; /* IRQ0 & IRQ13 not connected */ -+ /* fall through */ -+ default: -+ if (i == 2) -+ continue; /* IRQ2 is never connected */ -+ } -+ -+ if (ELCR_fallback) { -+ /* -+ * If the ELCR indicates a level-sensitive interrupt, we -+ * copy that information over to the MP table in the -+ * irqflag field (level sensitive, active high polarity). -+ */ -+ if (ELCR_trigger(i)) -+ intsrc.mpc_irqflag = 13; -+ else -+ intsrc.mpc_irqflag = 0; -+ } -+ -+ intsrc.mpc_srcbusirq = i; -+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ -+ MP_intsrc_info(&intsrc); -+ } -+ -+ intsrc.mpc_irqtype = mp_ExtINT; -+ intsrc.mpc_srcbusirq = 0; -+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ -+ MP_intsrc_info(&intsrc); -+} -+ -+static inline void __init construct_default_ISA_mptable(int mpc_default_type) -+{ -+ struct mpc_config_processor processor; -+ struct mpc_config_bus bus; -+ struct mpc_config_ioapic ioapic; -+ struct mpc_config_lintsrc lintsrc; -+ int linttypes[2] = { mp_ExtINT, mp_NMI }; -+ int i; -+ -+ /* -+ * local APIC has default address -+ */ -+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; -+ -+ /* -+ * 2 CPUs, numbered 0 & 1. -+ */ -+ processor.mpc_type = MP_PROCESSOR; -+ /* Either an integrated APIC or a discrete 82489DX. */ -+ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; -+ processor.mpc_cpuflag = CPU_ENABLED; -+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | -+ (boot_cpu_data.x86_model << 4) | -+ boot_cpu_data.x86_mask; -+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; -+ processor.mpc_reserved[0] = 0; -+ processor.mpc_reserved[1] = 0; -+ for (i = 0; i < 2; i++) { -+ processor.mpc_apicid = i; -+ MP_processor_info(&processor); -+ } -+ -+ bus.mpc_type = MP_BUS; -+ bus.mpc_busid = 0; -+ switch (mpc_default_type) { -+ default: -+ printk("???\n"); -+ printk(KERN_ERR "Unknown standard configuration %d\n", -+ mpc_default_type); -+ /* fall through */ -+ case 1: -+ case 5: -+ memcpy(bus.mpc_bustype, "ISA ", 6); -+ break; -+ case 2: -+ case 6: -+ case 3: -+ memcpy(bus.mpc_bustype, "EISA ", 6); -+ break; -+ case 4: -+ case 7: -+ memcpy(bus.mpc_bustype, "MCA ", 6); -+ } -+ MP_bus_info(&bus); -+ if (mpc_default_type > 4) { -+ bus.mpc_busid = 1; -+ memcpy(bus.mpc_bustype, "PCI ", 6); -+ MP_bus_info(&bus); -+ } -+ -+ ioapic.mpc_type = MP_IOAPIC; -+ ioapic.mpc_apicid = 2; -+ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; -+ ioapic.mpc_flags = MPC_APIC_USABLE; -+ ioapic.mpc_apicaddr = 0xFEC00000; -+ MP_ioapic_info(&ioapic); -+ -+ /* -+ * We set up most of the low 16 IO-APIC pins according to MPS rules. -+ */ -+ construct_default_ioirq_mptable(mpc_default_type); -+ -+ lintsrc.mpc_type = MP_LINTSRC; -+ lintsrc.mpc_irqflag = 0; /* conforming */ -+ lintsrc.mpc_srcbusid = 0; -+ lintsrc.mpc_srcbusirq = 0; -+ lintsrc.mpc_destapic = MP_APIC_ALL; -+ for (i = 0; i < 2; i++) { -+ lintsrc.mpc_irqtype = linttypes[i]; -+ lintsrc.mpc_destapiclint = i; -+ MP_lintsrc_info(&lintsrc); -+ } -+} -+ -+static struct intel_mp_floating *mpf_found; -+ -+/* -+ * Scan the memory blocks for an SMP configuration block. -+ */ -+void __init get_smp_config (void) -+{ -+ struct intel_mp_floating *mpf = mpf_found; -+ -+ /* -+ * ACPI supports both logical (e.g. Hyper-Threading) and physical -+ * processors, where MPS only supports physical. -+ */ -+ if (acpi_lapic && acpi_ioapic) { -+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); -+ return; -+ } -+ else if (acpi_lapic) -+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); -+ -+ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); -+ if (mpf->mpf_feature2 & (1<<7)) { -+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); -+ pic_mode = 1; -+ } else { -+ printk(KERN_INFO " Virtual Wire compatibility mode.\n"); -+ pic_mode = 0; -+ } -+ -+ /* -+ * Now see if we need to read further. -+ */ -+ if (mpf->mpf_feature1 != 0) { -+ -+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); -+ construct_default_ISA_mptable(mpf->mpf_feature1); -+ -+ } else if (mpf->mpf_physptr) { -+ -+ /* -+ * Read the physical hardware table. Anything here will -+ * override the defaults. -+ */ -+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) { -+ smp_found_config = 0; -+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); -+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); -+ return; -+ } -+ /* -+ * If there are no explicit MP IRQ entries, then we are -+ * broken. We set up most of the low 16 IO-APIC pins to -+ * ISA defaults and hope it will work. -+ */ -+ if (!mp_irq_entries) { -+ struct mpc_config_bus bus; -+ -+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); -+ -+ bus.mpc_type = MP_BUS; -+ bus.mpc_busid = 0; -+ memcpy(bus.mpc_bustype, "ISA ", 6); -+ MP_bus_info(&bus); -+ -+ construct_default_ioirq_mptable(0); -+ } -+ -+ } else -+ BUG(); -+ -+ printk(KERN_INFO "Processors: %d\n", num_processors); -+ /* -+ * Only use the first configuration found. -+ */ -+} -+ -+static int __init smp_scan_config (unsigned long base, unsigned long length) -+{ -+ unsigned long *bp = isa_bus_to_virt(base); -+ struct intel_mp_floating *mpf; -+ -+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); -+ if (sizeof(*mpf) != 16) -+ printk("Error: MPF size\n"); -+ -+ while (length > 0) { -+ mpf = (struct intel_mp_floating *)bp; -+ if ((*bp == SMP_MAGIC_IDENT) && -+ (mpf->mpf_length == 1) && -+ !mpf_checksum((unsigned char *)bp, 16) && -+ ((mpf->mpf_specification == 1) -+ || (mpf->mpf_specification == 4)) ) { -+ -+ smp_found_config = 1; -+#ifndef CONFIG_XEN -+ printk(KERN_INFO "found SMP MP-table at %08lx\n", -+ virt_to_phys(mpf)); -+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE); -+ if (mpf->mpf_physptr) { -+ /* -+ * We cannot access to MPC table to compute -+ * table size yet, as only few megabytes from -+ * the bottom is mapped now. -+ * PC-9800's MPC table places on the very last -+ * of physical memory; so that simply reserving -+ * PAGE_SIZE from mpg->mpf_physptr yields BUG() -+ * in reserve_bootmem. -+ */ -+ unsigned long size = PAGE_SIZE; -+ unsigned long end = max_low_pfn * PAGE_SIZE; -+ if (mpf->mpf_physptr + size > end) -+ size = end - mpf->mpf_physptr; -+ reserve_bootmem(mpf->mpf_physptr, size); -+ } -+#else -+ printk(KERN_INFO "found SMP MP-table at %08lx\n", -+ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base); -+#endif -+ -+ mpf_found = mpf; -+ return 1; -+ } -+ bp += 4; -+ length -= 16; -+ } -+ return 0; -+} -+ -+void __init find_smp_config (void) -+{ -+#ifndef CONFIG_XEN -+ unsigned int address; -+#endif -+ -+ /* -+ * FIXME: Linux assumes you have 640K of base ram.. -+ * this continues the error... -+ * -+ * 1) Scan the bottom 1K for a signature -+ * 2) Scan the top 1K of base RAM -+ * 3) Scan the 64K of bios -+ */ -+ if (smp_scan_config(0x0,0x400) || -+ smp_scan_config(639*0x400,0x400) || -+ smp_scan_config(0xF0000,0x10000)) -+ return; -+ /* -+ * If it is an SMP machine we should know now, unless the -+ * configuration is in an EISA/MCA bus machine with an -+ * extended bios data area. -+ * -+ * there is a real-mode segmented pointer pointing to the -+ * 4K EBDA area at 0x40E, calculate and scan it here. -+ * -+ * NOTE! There are Linux loaders that will corrupt the EBDA -+ * area, and as such this kind of SMP config may be less -+ * trustworthy, simply because the SMP table may have been -+ * stomped on during early boot. These loaders are buggy and -+ * should be fixed. -+ * -+ * MP1.4 SPEC states to only scan first 1K of 4K EBDA. -+ */ -+ -+#ifndef CONFIG_XEN -+ address = get_bios_ebda(); -+ if (address) -+ smp_scan_config(address, 0x400); -+#endif -+} -+ -+int es7000_plat; -+ -+/* -------------------------------------------------------------------------- -+ ACPI-based MP Configuration -+ -------------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_ACPI -+ -+void __init mp_register_lapic_address ( -+ u64 address) -+{ -+#ifndef CONFIG_XEN -+ mp_lapic_addr = (unsigned long) address; -+ -+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); -+ -+ if (boot_cpu_physical_apicid == -1U) -+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); -+ -+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); -+#endif -+} -+ -+ -+void __devinit mp_register_lapic ( -+ u8 id, -+ u8 enabled) -+{ -+ struct mpc_config_processor processor; -+ int boot_cpu = 0; -+ -+ if (MAX_APICS - id <= 0) { -+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n", -+ id, MAX_APICS); -+ return; -+ } -+ -+ if (id == boot_cpu_physical_apicid) -+ boot_cpu = 1; -+ -+#ifndef CONFIG_XEN -+ processor.mpc_type = MP_PROCESSOR; -+ processor.mpc_apicid = id; -+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); -+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); -+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); -+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | -+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; -+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; -+ processor.mpc_reserved[0] = 0; -+ processor.mpc_reserved[1] = 0; -+#endif -+ -+ MP_processor_info(&processor); -+} -+ -+#ifdef CONFIG_X86_IO_APIC -+ -+#define MP_ISA_BUS 0 -+#define MP_MAX_IOAPIC_PIN 127 -+ -+static struct mp_ioapic_routing { -+ int apic_id; -+ int gsi_base; -+ int gsi_end; -+ u32 pin_programmed[4]; -+} mp_ioapic_routing[MAX_IO_APICS]; -+ -+ -+static int mp_find_ioapic ( -+ int gsi) -+{ -+ int i = 0; -+ -+ /* Find the IOAPIC that manages this GSI. */ -+ for (i = 0; i < nr_ioapics; i++) { -+ if ((gsi >= mp_ioapic_routing[i].gsi_base) -+ && (gsi <= mp_ioapic_routing[i].gsi_end)) -+ return i; -+ } -+ -+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); -+ -+ return -1; -+} -+ -+ -+void __init mp_register_ioapic ( -+ u8 id, -+ u32 address, -+ u32 gsi_base) -+{ -+ int idx = 0; -+ int tmpid; -+ -+ if (nr_ioapics >= MAX_IO_APICS) { -+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " -+ "(found %d)\n", MAX_IO_APICS, nr_ioapics); -+ panic("Recompile kernel with bigger MAX_IO_APICS!\n"); -+ } -+ if (!address) { -+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" -+ " found in MADT table, skipping!\n"); -+ return; -+ } -+ -+ idx = nr_ioapics++; -+ -+ mp_ioapics[idx].mpc_type = MP_IOAPIC; -+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; -+ mp_ioapics[idx].mpc_apicaddr = address; -+ -+#ifndef CONFIG_XEN -+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); -+#endif -+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) -+ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) -+ tmpid = io_apic_get_unique_id(idx, id); -+ else -+ tmpid = id; -+ if (tmpid == -1) { -+ nr_ioapics--; -+ return; -+ } -+ mp_ioapics[idx].mpc_apicid = tmpid; -+ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); -+ -+ /* -+ * Build basic GSI lookup table to facilitate gsi->io_apic lookups -+ * and to prevent reprogramming of IOAPIC pins (PCI GSIs). -+ */ -+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; -+ mp_ioapic_routing[idx].gsi_base = gsi_base; -+ mp_ioapic_routing[idx].gsi_end = gsi_base + -+ io_apic_get_redir_entries(idx); -+ -+ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " -+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, -+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, -+ mp_ioapic_routing[idx].gsi_base, -+ mp_ioapic_routing[idx].gsi_end); -+ -+ return; -+} -+ -+ -+void __init mp_override_legacy_irq ( -+ u8 bus_irq, -+ u8 polarity, -+ u8 trigger, -+ u32 gsi) -+{ -+ struct mpc_config_intsrc intsrc; -+ int ioapic = -1; -+ int pin = -1; -+ -+ /* -+ * Convert 'gsi' to 'ioapic.pin'. -+ */ -+ ioapic = mp_find_ioapic(gsi); -+ if (ioapic < 0) -+ return; -+ pin = gsi - mp_ioapic_routing[ioapic].gsi_base; -+ -+ /* -+ * TBD: This check is for faulty timer entries, where the override -+ * erroneously sets the trigger to level, resulting in a HUGE -+ * increase of timer interrupts! -+ */ -+ if ((bus_irq == 0) && (trigger == 3)) -+ trigger = 1; -+ -+ intsrc.mpc_type = MP_INTSRC; -+ intsrc.mpc_irqtype = mp_INT; -+ intsrc.mpc_irqflag = (trigger << 2) | polarity; -+ intsrc.mpc_srcbus = MP_ISA_BUS; -+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ -+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ -+ intsrc.mpc_dstirq = pin; /* INTIN# */ -+ -+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", -+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, -+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, -+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); -+ -+ mp_irqs[mp_irq_entries] = intsrc; -+ if (++mp_irq_entries == MAX_IRQ_SOURCES) -+ panic("Max # of irq sources exceeded!\n"); -+ -+ return; -+} -+ -+void __init mp_config_acpi_legacy_irqs (void) -+{ -+ struct mpc_config_intsrc intsrc; -+ int i = 0; -+ int ioapic = -1; -+ -+ /* -+ * Fabricate the legacy ISA bus (bus #31). -+ */ -+ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; -+ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); -+ -+ /* -+ * Older generations of ES7000 have no legacy identity mappings -+ */ -+ if (es7000_plat == 1) -+ return; -+ -+ /* -+ * Locate the IOAPIC that manages the ISA IRQs (0-15). -+ */ -+ ioapic = mp_find_ioapic(0); -+ if (ioapic < 0) -+ return; -+ -+ intsrc.mpc_type = MP_INTSRC; -+ intsrc.mpc_irqflag = 0; /* Conforming */ -+ intsrc.mpc_srcbus = MP_ISA_BUS; -+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; -+ -+ /* -+ * Use the default configuration for the IRQs 0-15. Unless -+ * overriden by (MADT) interrupt source override entries. -+ */ -+ for (i = 0; i < 16; i++) { -+ int idx; -+ -+ for (idx = 0; idx < mp_irq_entries; idx++) { -+ struct mpc_config_intsrc *irq = mp_irqs + idx; -+ -+ /* Do we already have a mapping for this ISA IRQ? */ -+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) -+ break; -+ -+ /* Do we already have a mapping for this IOAPIC pin */ -+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && -+ (irq->mpc_dstirq == i)) -+ break; -+ } -+ -+ if (idx != mp_irq_entries) { -+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); -+ continue; /* IRQ already used */ -+ } -+ -+ intsrc.mpc_irqtype = mp_INT; -+ intsrc.mpc_srcbusirq = i; /* Identity mapped */ -+ intsrc.mpc_dstirq = i; -+ -+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " -+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, -+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, -+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, -+ intsrc.mpc_dstirq); -+ -+ mp_irqs[mp_irq_entries] = intsrc; -+ if (++mp_irq_entries == MAX_IRQ_SOURCES) -+ panic("Max # of irq sources exceeded!\n"); -+ } -+} -+ -+#define MAX_GSI_NUM 4096 -+ -+int mp_register_gsi (u32 gsi, int triggering, int polarity) -+{ -+ int ioapic = -1; -+ int ioapic_pin = 0; -+ int idx, bit = 0; -+ static int pci_irq = 16; -+ /* -+ * Mapping between Global System Interrups, which -+ * represent all possible interrupts, and IRQs -+ * assigned to actual devices. -+ */ -+ static int gsi_to_irq[MAX_GSI_NUM]; -+ -+ /* Don't set up the ACPI SCI because it's already set up */ -+ if (acpi_fadt.sci_int == gsi) -+ return gsi; -+ -+ ioapic = mp_find_ioapic(gsi); -+ if (ioapic < 0) { -+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); -+ return gsi; -+ } -+ -+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; -+ -+ if (ioapic_renumber_irq) -+ gsi = ioapic_renumber_irq(ioapic, gsi); -+ -+ /* -+ * Avoid pin reprogramming. PRTs typically include entries -+ * with redundant pin->gsi mappings (but unique PCI devices); -+ * we only program the IOAPIC on the first. -+ */ -+ bit = ioapic_pin % 32; -+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); -+ if (idx > 3) { -+ printk(KERN_ERR "Invalid reference to IOAPIC pin " -+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, -+ ioapic_pin); -+ return gsi; -+ } -+ if ((1< 15), but -+ * avoid a problem where the 8254 timer (IRQ0) is setup -+ * via an override (so it's not on pin 0 of the ioapic), -+ * and at the same time, the pin 0 interrupt is a PCI -+ * type. The gsi > 15 test could cause these two pins -+ * to be shared as IRQ0, and they are not shareable. -+ * So test for this condition, and if necessary, avoid -+ * the pin collision. -+ */ -+ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0)) -+ gsi = pci_irq++; -+ /* -+ * Don't assign IRQ used by ACPI SCI -+ */ -+ if (gsi == acpi_fadt.sci_int) -+ gsi = pci_irq++; -+ gsi_to_irq[irq] = gsi; -+ } else { -+ printk(KERN_ERR "GSI %u is too high\n", gsi); -+ return gsi; -+ } -+ } -+ -+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, -+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, -+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1); -+ return gsi; -+} -+ -+#endif /* CONFIG_X86_IO_APIC */ -+#endif /* CONFIG_ACPI */ ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/pci-dma-xen.c 2009-11-06 10:23:23.000000000 +0100 -@@ -0,0 +1,406 @@ -+/* -+ * Dynamic DMA mapping support. -+ * -+ * On i386 there is no hardware dynamic DMA address translation, -+ * so consistent alloc/free are merely page allocation/freeing. -+ * The rest of the dynamic DMA mapping interface is implemented -+ * in asm/pci.h. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#ifdef __x86_64__ -+#include -+ -+int iommu_merge __read_mostly = 0; -+EXPORT_SYMBOL(iommu_merge); -+ -+dma_addr_t bad_dma_address __read_mostly; -+EXPORT_SYMBOL(bad_dma_address); -+ -+/* This tells the BIO block layer to assume merging. Default to off -+ because we cannot guarantee merging later. */ -+int iommu_bio_merge __read_mostly = 0; -+EXPORT_SYMBOL(iommu_bio_merge); -+ -+int force_iommu __read_mostly= 0; -+ -+__init int iommu_setup(char *p) -+{ -+ return 1; -+} -+ -+void __init pci_iommu_alloc(void) -+{ -+#ifdef CONFIG_SWIOTLB -+ pci_swiotlb_init(); -+#endif -+} -+ -+static int __init pci_iommu_init(void) -+{ -+ no_iommu_init(); -+ return 0; -+} -+ -+/* Must execute after PCI subsystem */ -+fs_initcall(pci_iommu_init); -+#endif -+ -+struct dma_coherent_mem { -+ void *virt_base; -+ u32 device_base; -+ int size; -+ int flags; -+ unsigned long *bitmap; -+}; -+ -+#define IOMMU_BUG_ON(test) \ -+do { \ -+ if (unlikely(test)) { \ -+ printk(KERN_ALERT "Fatal DMA error! " \ -+ "Please use 'swiotlb=force'\n"); \ -+ BUG(); \ -+ } \ -+} while (0) -+ -+static int check_pages_physically_contiguous(unsigned long pfn, -+ unsigned int offset, -+ size_t length) -+{ -+ unsigned long next_mfn; -+ int i; -+ int nr_pages; -+ -+ next_mfn = pfn_to_mfn(pfn); -+ nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; -+ -+ for (i = 1; i < nr_pages; i++) { -+ if (pfn_to_mfn(++pfn) != ++next_mfn) -+ return 0; -+ } -+ return 1; -+} -+ -+int range_straddles_page_boundary(paddr_t p, size_t size) -+{ -+ unsigned long pfn = p >> PAGE_SHIFT; -+ unsigned int offset = p & ~PAGE_MASK; -+ -+ return ((offset + size > PAGE_SIZE) && -+ !check_pages_physically_contiguous(pfn, offset, size)); -+} -+ -+int -+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, -+ enum dma_data_direction direction) -+{ -+ int i, rc; -+ -+ if (direction == DMA_NONE) -+ BUG(); -+ WARN_ON(nents == 0 || sg[0].length == 0); -+ -+ if (swiotlb) { -+ rc = swiotlb_map_sg(hwdev, sg, nents, direction); -+ } else { -+ for (i = 0; i < nents; i++ ) { -+ BUG_ON(!sg[i].page); -+ sg[i].dma_address = -+ gnttab_dma_map_page(sg[i].page) + sg[i].offset; -+ sg[i].dma_length = sg[i].length; -+ IOMMU_BUG_ON(address_needs_mapping( -+ hwdev, sg[i].dma_address)); -+ IOMMU_BUG_ON(range_straddles_page_boundary( -+ page_to_pseudophys(sg[i].page) + sg[i].offset, -+ sg[i].length)); -+ } -+ rc = nents; -+ } -+ -+ flush_write_buffers(); -+ return rc; -+} -+EXPORT_SYMBOL(dma_map_sg); -+ -+void -+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, -+ enum dma_data_direction direction) -+{ -+ int i; -+ -+ BUG_ON(direction == DMA_NONE); -+ if (swiotlb) -+ swiotlb_unmap_sg(hwdev, sg, nents, direction); -+ else { -+ for (i = 0; i < nents; i++ ) -+ gnttab_dma_unmap_page(sg[i].dma_address); -+ } -+} -+EXPORT_SYMBOL(dma_unmap_sg); -+ -+#ifdef CONFIG_HIGHMEM -+dma_addr_t -+dma_map_page(struct device *dev, struct page *page, unsigned long offset, -+ size_t size, enum dma_data_direction direction) -+{ -+ dma_addr_t dma_addr; -+ -+ BUG_ON(direction == DMA_NONE); -+ -+ if (swiotlb) { -+ dma_addr = swiotlb_map_page( -+ dev, page, offset, size, direction); -+ } else { -+ dma_addr = gnttab_dma_map_page(page) + offset; -+ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr)); -+ } -+ -+ return dma_addr; -+} -+EXPORT_SYMBOL(dma_map_page); -+ -+void -+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, -+ enum dma_data_direction direction) -+{ -+ BUG_ON(direction == DMA_NONE); -+ if (swiotlb) -+ swiotlb_unmap_page(dev, dma_address, size, direction); -+ else -+ gnttab_dma_unmap_page(dma_address); -+} -+EXPORT_SYMBOL(dma_unmap_page); -+#endif /* CONFIG_HIGHMEM */ -+ -+int -+dma_mapping_error(dma_addr_t dma_addr) -+{ -+ if (swiotlb) -+ return swiotlb_dma_mapping_error(dma_addr); -+ return 0; -+} -+EXPORT_SYMBOL(dma_mapping_error); -+ -+int -+dma_supported(struct device *dev, u64 mask) -+{ -+ if (swiotlb) -+ return swiotlb_dma_supported(dev, mask); -+ /* -+ * By default we'll BUG when an infeasible DMA is requested, and -+ * request swiotlb=force (see IOMMU_BUG_ON). -+ */ -+ return 1; -+} -+EXPORT_SYMBOL(dma_supported); -+ -+void *dma_alloc_coherent(struct device *dev, size_t size, -+ dma_addr_t *dma_handle, gfp_t gfp) -+{ -+ void *ret; -+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; -+ unsigned int order = get_order(size); -+ unsigned long vstart; -+ u64 mask; -+ -+ /* ignore region specifiers */ -+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); -+ -+ if (mem) { -+ int page = bitmap_find_free_region(mem->bitmap, mem->size, -+ order); -+ if (page >= 0) { -+ *dma_handle = mem->device_base + (page << PAGE_SHIFT); -+ ret = mem->virt_base + (page << PAGE_SHIFT); -+ memset(ret, 0, size); -+ return ret; -+ } -+ if (mem->flags & DMA_MEMORY_EXCLUSIVE) -+ return NULL; -+ } -+ -+ vstart = __get_free_pages(gfp, order); -+ ret = (void *)vstart; -+ -+ if (dev != NULL && dev->coherent_dma_mask) -+ mask = dev->coherent_dma_mask; -+ else -+ mask = 0xffffffff; -+ -+ if (ret != NULL) { -+ if (xen_create_contiguous_region(vstart, order, -+ fls64(mask)) != 0) { -+ free_pages(vstart, order); -+ return NULL; -+ } -+ memset(ret, 0, size); -+ *dma_handle = virt_to_bus(ret); -+ } -+ return ret; -+} -+EXPORT_SYMBOL(dma_alloc_coherent); -+ -+void dma_free_coherent(struct device *dev, size_t size, -+ void *vaddr, dma_addr_t dma_handle) -+{ -+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; -+ int order = get_order(size); -+ -+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { -+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; -+ -+ bitmap_release_region(mem->bitmap, page, order); -+ } else { -+ xen_destroy_contiguous_region((unsigned long)vaddr, order); -+ free_pages((unsigned long)vaddr, order); -+ } -+} -+EXPORT_SYMBOL(dma_free_coherent); -+ -+#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, -+ dma_addr_t device_addr, size_t size, int flags) -+{ -+ void __iomem *mem_base; -+ int pages = size >> PAGE_SHIFT; -+ int bitmap_size = (pages + 31)/32; -+ -+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) -+ goto out; -+ if (!size) -+ goto out; -+ if (dev->dma_mem) -+ goto out; -+ -+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ -+ -+ mem_base = ioremap(bus_addr, size); -+ if (!mem_base) -+ goto out; -+ -+ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); -+ if (!dev->dma_mem) -+ goto out; -+ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem)); -+ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL); -+ if (!dev->dma_mem->bitmap) -+ goto free1_out; -+ memset(dev->dma_mem->bitmap, 0, bitmap_size); -+ -+ dev->dma_mem->virt_base = mem_base; -+ dev->dma_mem->device_base = device_addr; -+ dev->dma_mem->size = pages; -+ dev->dma_mem->flags = flags; -+ -+ if (flags & DMA_MEMORY_MAP) -+ return DMA_MEMORY_MAP; -+ -+ return DMA_MEMORY_IO; -+ -+ free1_out: -+ kfree(dev->dma_mem->bitmap); -+ out: -+ return 0; -+} -+EXPORT_SYMBOL(dma_declare_coherent_memory); -+ -+void dma_release_declared_memory(struct device *dev) -+{ -+ struct dma_coherent_mem *mem = dev->dma_mem; -+ -+ if(!mem) -+ return; -+ dev->dma_mem = NULL; -+ iounmap(mem->virt_base); -+ kfree(mem->bitmap); -+ kfree(mem); -+} -+EXPORT_SYMBOL(dma_release_declared_memory); -+ -+void *dma_mark_declared_memory_occupied(struct device *dev, -+ dma_addr_t device_addr, size_t size) -+{ -+ struct dma_coherent_mem *mem = dev->dma_mem; -+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; -+ int pos, err; -+ -+ if (!mem) -+ return ERR_PTR(-EINVAL); -+ -+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT; -+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); -+ if (err != 0) -+ return ERR_PTR(err); -+ return mem->virt_base + (pos << PAGE_SHIFT); -+} -+EXPORT_SYMBOL(dma_mark_declared_memory_occupied); -+#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ -+ -+dma_addr_t -+dma_map_single(struct device *dev, void *ptr, size_t size, -+ enum dma_data_direction direction) -+{ -+ dma_addr_t dma; -+ -+ if (direction == DMA_NONE) -+ BUG(); -+ WARN_ON(size == 0); -+ -+ if (swiotlb) { -+ dma = swiotlb_map_single(dev, ptr, size, direction); -+ } else { -+ dma = gnttab_dma_map_page(virt_to_page(ptr)) + -+ offset_in_page(ptr); -+ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size)); -+ IOMMU_BUG_ON(address_needs_mapping(dev, dma)); -+ } -+ -+ flush_write_buffers(); -+ return dma; -+} -+EXPORT_SYMBOL(dma_map_single); -+ -+void -+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, -+ enum dma_data_direction direction) -+{ -+ if (direction == DMA_NONE) -+ BUG(); -+ if (swiotlb) -+ swiotlb_unmap_single(dev, dma_addr, size, direction); -+ else -+ gnttab_dma_unmap_page(dma_addr); -+} -+EXPORT_SYMBOL(dma_unmap_single); -+ -+void -+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, -+ enum dma_data_direction direction) -+{ -+ if (swiotlb) -+ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction); -+} -+EXPORT_SYMBOL(dma_sync_single_for_cpu); -+ -+void -+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, -+ enum dma_data_direction direction) -+{ -+ if (swiotlb) -+ swiotlb_sync_single_for_device(dev, dma_handle, size, direction); -+} -+EXPORT_SYMBOL(dma_sync_single_for_device); ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/process_32-xen.c 2008-07-21 11:00:32.000000000 +0200 -@@ -0,0 +1,877 @@ -+/* -+ * linux/arch/i386/kernel/process.c -+ * -+ * Copyright (C) 1995 Linus Torvalds -+ * -+ * Pentium III FXSR, SSE support -+ * Gareth Hughes , May 2000 -+ */ -+ -+/* -+ * This file handles the architecture-dependent parts of process handling.. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#ifdef CONFIG_MATH_EMULATION -+#include -+#endif -+ -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+ -+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); -+ -+static int hlt_counter; -+ -+unsigned long boot_option_idle_override = 0; -+EXPORT_SYMBOL(boot_option_idle_override); -+ -+/* -+ * Return saved PC of a blocked thread. -+ */ -+unsigned long thread_saved_pc(struct task_struct *tsk) -+{ -+ return ((unsigned long *)tsk->thread.esp)[3]; -+} -+ -+/* -+ * Powermanagement idle function, if any.. -+ */ -+void (*pm_idle)(void); -+EXPORT_SYMBOL(pm_idle); -+static DEFINE_PER_CPU(unsigned int, cpu_idle_state); -+ -+void disable_hlt(void) -+{ -+ hlt_counter++; -+} -+ -+EXPORT_SYMBOL(disable_hlt); -+ -+void enable_hlt(void) -+{ -+ hlt_counter--; -+} -+ -+EXPORT_SYMBOL(enable_hlt); -+ -+/* -+ * On SMP it's slightly faster (but much more power-consuming!) -+ * to poll the ->work.need_resched flag instead of waiting for the -+ * cross-CPU IPI to arrive. Use this option with caution. -+ */ -+static void poll_idle (void) -+{ -+ local_irq_enable(); -+ -+ asm volatile( -+ "2:" -+ "testl %0, %1;" -+ "rep; nop;" -+ "je 2b;" -+ : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags)); -+} -+ -+static void xen_idle(void) -+{ -+ local_irq_disable(); -+ -+ if (need_resched()) -+ local_irq_enable(); -+ else { -+ current_thread_info()->status &= ~TS_POLLING; -+ smp_mb__after_clear_bit(); -+ safe_halt(); -+ current_thread_info()->status |= TS_POLLING; -+ } -+} -+#ifdef CONFIG_APM_MODULE -+EXPORT_SYMBOL(default_idle); -+#endif -+ -+#ifdef CONFIG_HOTPLUG_CPU -+extern cpumask_t cpu_initialized; -+static inline void play_dead(void) -+{ -+ idle_task_exit(); -+ local_irq_disable(); -+ cpu_clear(smp_processor_id(), cpu_initialized); -+ preempt_enable_no_resched(); -+ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL)); -+ cpu_bringup(); -+} -+#else -+static inline void play_dead(void) -+{ -+ BUG(); -+} -+#endif /* CONFIG_HOTPLUG_CPU */ -+ -+/* -+ * The idle thread. There's no useful work to be -+ * done, so just try to conserve power and have a -+ * low exit latency (ie sit in a loop waiting for -+ * somebody to say that they'd like to reschedule) -+ */ -+void cpu_idle(void) -+{ -+ int cpu = smp_processor_id(); -+ -+ current_thread_info()->status |= TS_POLLING; -+ -+ /* endless idle loop with no priority at all */ -+ while (1) { -+ while (!need_resched()) { -+ void (*idle)(void); -+ -+ if (__get_cpu_var(cpu_idle_state)) -+ __get_cpu_var(cpu_idle_state) = 0; -+ -+ rmb(); -+ idle = xen_idle; /* no alternatives */ -+ -+ if (cpu_is_offline(cpu)) -+ play_dead(); -+ -+ __get_cpu_var(irq_stat).idle_timestamp = jiffies; -+ idle(); -+ } -+ preempt_enable_no_resched(); -+ schedule(); -+ preempt_disable(); -+ } -+} -+ -+void cpu_idle_wait(void) -+{ -+ unsigned int cpu, this_cpu = get_cpu(); -+ cpumask_t map; -+ -+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); -+ put_cpu(); -+ -+ cpus_clear(map); -+ for_each_online_cpu(cpu) { -+ per_cpu(cpu_idle_state, cpu) = 1; -+ cpu_set(cpu, map); -+ } -+ -+ __get_cpu_var(cpu_idle_state) = 0; -+ -+ wmb(); -+ do { -+ ssleep(1); -+ for_each_online_cpu(cpu) { -+ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) -+ cpu_clear(cpu, map); -+ } -+ cpus_and(map, map, cpu_online_map); -+ } while (!cpus_empty(map)); -+} -+EXPORT_SYMBOL_GPL(cpu_idle_wait); -+ -+void __devinit select_idle_routine(const struct cpuinfo_x86 *c) -+{ -+} -+ -+static int __init idle_setup (char *str) -+{ -+ if (!strncmp(str, "poll", 4)) { -+ printk("using polling idle threads.\n"); -+ pm_idle = poll_idle; -+ } -+ -+ boot_option_idle_override = 1; -+ return 1; -+} -+ -+__setup("idle=", idle_setup); -+ -+void show_regs(struct pt_regs * regs) -+{ -+ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; -+ -+ printk("\n"); -+ printk("Pid: %d, comm: %20s\n", current->pid, current->comm); -+ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); -+ print_symbol("EIP is at %s\n", regs->eip); -+ -+ if (user_mode_vm(regs)) -+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); -+ printk(" EFLAGS: %08lx %s (%s %.*s)\n", -+ regs->eflags, print_tainted(), system_utsname.release, -+ (int)strcspn(system_utsname.version, " "), -+ system_utsname.version); -+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", -+ regs->eax,regs->ebx,regs->ecx,regs->edx); -+ printk("ESI: %08lx EDI: %08lx EBP: %08lx", -+ regs->esi, regs->edi, regs->ebp); -+ printk(" DS: %04x ES: %04x\n", -+ 0xffff & regs->xds,0xffff & regs->xes); -+ -+ cr0 = read_cr0(); -+ cr2 = read_cr2(); -+ cr3 = read_cr3(); -+ cr4 = read_cr4_safe(); -+ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); -+ show_trace(NULL, regs, ®s->esp); -+} -+ -+/* -+ * This gets run with %ebx containing the -+ * function to call, and %edx containing -+ * the "args". -+ */ -+extern void kernel_thread_helper(void); -+__asm__(".section .text\n" -+ ".align 4\n" -+ "kernel_thread_helper:\n\t" -+ "movl %edx,%eax\n\t" -+ "pushl %edx\n\t" -+ "call *%ebx\n\t" -+ "pushl %eax\n\t" -+ "call do_exit\n" -+ ".previous"); -+ -+/* -+ * Create a kernel thread -+ */ -+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) -+{ -+ struct pt_regs regs; -+ -+ memset(®s, 0, sizeof(regs)); -+ -+ regs.ebx = (unsigned long) fn; -+ regs.edx = (unsigned long) arg; -+ -+ regs.xds = __USER_DS; -+ regs.xes = __USER_DS; -+ regs.orig_eax = -1; -+ regs.eip = (unsigned long) kernel_thread_helper; -+ regs.xcs = GET_KERNEL_CS(); -+ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; -+ -+ /* Ok, create the new process.. */ -+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); -+} -+EXPORT_SYMBOL(kernel_thread); -+ -+/* -+ * Free current thread data structures etc.. -+ */ -+void exit_thread(void) -+{ -+ /* The process may have allocated an io port bitmap... nuke it. */ -+ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { -+ struct task_struct *tsk = current; -+ struct thread_struct *t = &tsk->thread; -+ struct physdev_set_iobitmap set_iobitmap; -+ memset(&set_iobitmap, 0, sizeof(set_iobitmap)); -+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, -+ &set_iobitmap)); -+ kfree(t->io_bitmap_ptr); -+ t->io_bitmap_ptr = NULL; -+ clear_thread_flag(TIF_IO_BITMAP); -+ } -+} -+ -+void flush_thread(void) -+{ -+ struct task_struct *tsk = current; -+ -+ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); -+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); -+ clear_tsk_thread_flag(tsk, TIF_DEBUG); -+ /* -+ * Forget coprocessor state.. -+ */ -+ clear_fpu(tsk); -+ clear_used_math(); -+} -+ -+void release_thread(struct task_struct *dead_task) -+{ -+ BUG_ON(dead_task->mm); -+ release_vm86_irqs(dead_task); -+} -+ -+/* -+ * This gets called before we allocate a new thread and copy -+ * the current task into it. -+ */ -+void prepare_to_copy(struct task_struct *tsk) -+{ -+ unlazy_fpu(tsk); -+} -+ -+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, -+ unsigned long unused, -+ struct task_struct * p, struct pt_regs * regs) -+{ -+ struct pt_regs * childregs; -+ struct task_struct *tsk; -+ int err; -+ -+ childregs = task_pt_regs(p); -+ *childregs = *regs; -+ childregs->eax = 0; -+ childregs->esp = esp; -+ -+ p->thread.esp = (unsigned long) childregs; -+ p->thread.esp0 = (unsigned long) (childregs+1); -+ -+ p->thread.eip = (unsigned long) ret_from_fork; -+ -+ savesegment(fs,p->thread.fs); -+ savesegment(gs,p->thread.gs); -+ -+ tsk = current; -+ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { -+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); -+ if (!p->thread.io_bitmap_ptr) { -+ p->thread.io_bitmap_max = 0; -+ return -ENOMEM; -+ } -+ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr, -+ IO_BITMAP_BYTES); -+ set_tsk_thread_flag(p, TIF_IO_BITMAP); -+ } -+ -+ /* -+ * Set a new TLS for the child thread? -+ */ -+ if (clone_flags & CLONE_SETTLS) { -+ struct desc_struct *desc; -+ struct user_desc info; -+ int idx; -+ -+ err = -EFAULT; -+ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info))) -+ goto out; -+ err = -EINVAL; -+ if (LDT_empty(&info)) -+ goto out; -+ -+ idx = info.entry_number; -+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) -+ goto out; -+ -+ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; -+ desc->a = LDT_entry_a(&info); -+ desc->b = LDT_entry_b(&info); -+ } -+ -+ p->thread.iopl = current->thread.iopl; -+ -+ err = 0; -+ out: -+ if (err && p->thread.io_bitmap_ptr) { -+ kfree(p->thread.io_bitmap_ptr); -+ p->thread.io_bitmap_max = 0; -+ } -+ return err; -+} -+ -+/* -+ * fill in the user structure for a core dump.. -+ */ -+void dump_thread(struct pt_regs * regs, struct user * dump) -+{ -+ int i; -+ -+/* changed the size calculations - should hopefully work better. lbt */ -+ dump->magic = CMAGIC; -+ dump->start_code = 0; -+ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); -+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; -+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; -+ dump->u_dsize -= dump->u_tsize; -+ dump->u_ssize = 0; -+ for (i = 0; i < 8; i++) -+ dump->u_debugreg[i] = current->thread.debugreg[i]; -+ -+ if (dump->start_stack < TASK_SIZE) -+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; -+ -+ dump->regs.ebx = regs->ebx; -+ dump->regs.ecx = regs->ecx; -+ dump->regs.edx = regs->edx; -+ dump->regs.esi = regs->esi; -+ dump->regs.edi = regs->edi; -+ dump->regs.ebp = regs->ebp; -+ dump->regs.eax = regs->eax; -+ dump->regs.ds = regs->xds; -+ dump->regs.es = regs->xes; -+ savesegment(fs,dump->regs.fs); -+ savesegment(gs,dump->regs.gs); -+ dump->regs.orig_eax = regs->orig_eax; -+ dump->regs.eip = regs->eip; -+ dump->regs.cs = regs->xcs; -+ dump->regs.eflags = regs->eflags; -+ dump->regs.esp = regs->esp; -+ dump->regs.ss = regs->xss; -+ -+ dump->u_fpvalid = dump_fpu (regs, &dump->i387); -+} -+EXPORT_SYMBOL(dump_thread); -+ -+/* -+ * Capture the user space registers if the task is not running (in user space) -+ */ -+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) -+{ -+ struct pt_regs ptregs = *task_pt_regs(tsk); -+ ptregs.xcs &= 0xffff; -+ ptregs.xds &= 0xffff; -+ ptregs.xes &= 0xffff; -+ ptregs.xss &= 0xffff; -+ -+ elf_core_copy_regs(regs, &ptregs); -+ -+ return 1; -+} -+ -+static noinline void __switch_to_xtra(struct task_struct *next_p) -+{ -+ struct thread_struct *next; -+ -+ next = &next_p->thread; -+ -+ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { -+ set_debugreg(next->debugreg[0], 0); -+ set_debugreg(next->debugreg[1], 1); -+ set_debugreg(next->debugreg[2], 2); -+ set_debugreg(next->debugreg[3], 3); -+ /* no 4 and 5 */ -+ set_debugreg(next->debugreg[6], 6); -+ set_debugreg(next->debugreg[7], 7); -+ } -+} -+ -+/* -+ * This function selects if the context switch from prev to next -+ * has to tweak the TSC disable bit in the cr4. -+ */ -+static inline void disable_tsc(struct task_struct *prev_p, -+ struct task_struct *next_p) -+{ -+ struct thread_info *prev, *next; -+ -+ /* -+ * gcc should eliminate the ->thread_info dereference if -+ * has_secure_computing returns 0 at compile time (SECCOMP=n). -+ */ -+ prev = task_thread_info(prev_p); -+ next = task_thread_info(next_p); -+ -+ if (has_secure_computing(prev) || has_secure_computing(next)) { -+ /* slow path here */ -+ if (has_secure_computing(prev) && -+ !has_secure_computing(next)) { -+ write_cr4(read_cr4() & ~X86_CR4_TSD); -+ } else if (!has_secure_computing(prev) && -+ has_secure_computing(next)) -+ write_cr4(read_cr4() | X86_CR4_TSD); -+ } -+} -+ -+/* -+ * switch_to(x,yn) should switch tasks from x to y. -+ * -+ * We fsave/fwait so that an exception goes off at the right time -+ * (as a call from the fsave or fwait in effect) rather than to -+ * the wrong process. Lazy FP saving no longer makes any sense -+ * with modern CPU's, and this simplifies a lot of things (SMP -+ * and UP become the same). -+ * -+ * NOTE! We used to use the x86 hardware context switching. The -+ * reason for not using it any more becomes apparent when you -+ * try to recover gracefully from saved state that is no longer -+ * valid (stale segment register values in particular). With the -+ * hardware task-switch, there is no way to fix up bad state in -+ * a reasonable manner. -+ * -+ * The fact that Intel documents the hardware task-switching to -+ * be slow is a fairly red herring - this code is not noticeably -+ * faster. However, there _is_ some room for improvement here, -+ * so the performance issues may eventually be a valid point. -+ * More important, however, is the fact that this allows us much -+ * more flexibility. -+ * -+ * The return value (in %eax) will be the "prev" task after -+ * the task-switch, and shows up in ret_from_fork in entry.S, -+ * for example. -+ */ -+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ struct thread_struct *prev = &prev_p->thread, -+ *next = &next_p->thread; -+ int cpu = smp_processor_id(); -+#ifndef CONFIG_X86_NO_TSS -+ struct tss_struct *tss = &per_cpu(init_tss, cpu); -+#endif -+#if CONFIG_XEN_COMPAT > 0x030002 -+ struct physdev_set_iopl iopl_op; -+ struct physdev_set_iobitmap iobmp_op; -+#else -+ struct physdev_op _pdo[2], *pdo = _pdo; -+#define iopl_op pdo->u.set_iopl -+#define iobmp_op pdo->u.set_iobitmap -+#endif -+ multicall_entry_t _mcl[8], *mcl = _mcl; -+ -+ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */ -+ -+ /* -+ * This is basically '__unlazy_fpu', except that we queue a -+ * multicall to indicate FPU task switch, rather than -+ * synchronously trapping to Xen. -+ */ -+ if (prev_p->thread_info->status & TS_USEDFPU) { -+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */ -+ mcl->op = __HYPERVISOR_fpu_taskswitch; -+ mcl->args[0] = 1; -+ mcl++; -+ } -+#if 0 /* lazy fpu sanity check */ -+ else BUG_ON(!(read_cr0() & 8)); -+#endif -+ -+ /* -+ * Reload esp0. -+ * This is load_esp0(tss, next) with a multicall. -+ */ -+ mcl->op = __HYPERVISOR_stack_switch; -+ mcl->args[0] = __KERNEL_DS; -+ mcl->args[1] = next->esp0; -+ mcl++; -+ -+ /* -+ * Load the per-thread Thread-Local Storage descriptor. -+ * This is load_TLS(next, cpu) with multicalls. -+ */ -+#define C(i) do { \ -+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \ -+ next->tls_array[i].b != prev->tls_array[i].b)) { \ -+ mcl->op = __HYPERVISOR_update_descriptor; \ -+ *(u64 *)&mcl->args[0] = virt_to_machine( \ -+ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\ -+ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \ -+ mcl++; \ -+ } \ -+} while (0) -+ C(0); C(1); C(2); -+#undef C -+ -+ if (unlikely(prev->iopl != next->iopl)) { -+ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3; -+#if CONFIG_XEN_COMPAT > 0x030002 -+ mcl->op = __HYPERVISOR_physdev_op; -+ mcl->args[0] = PHYSDEVOP_set_iopl; -+ mcl->args[1] = (unsigned long)&iopl_op; -+#else -+ mcl->op = __HYPERVISOR_physdev_op_compat; -+ pdo->cmd = PHYSDEVOP_set_iopl; -+ mcl->args[0] = (unsigned long)pdo++; -+#endif -+ mcl++; -+ } -+ -+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { -+ set_xen_guest_handle(iobmp_op.bitmap, -+ (char *)next->io_bitmap_ptr); -+ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0; -+#if CONFIG_XEN_COMPAT > 0x030002 -+ mcl->op = __HYPERVISOR_physdev_op; -+ mcl->args[0] = PHYSDEVOP_set_iobitmap; -+ mcl->args[1] = (unsigned long)&iobmp_op; -+#else -+ mcl->op = __HYPERVISOR_physdev_op_compat; -+ pdo->cmd = PHYSDEVOP_set_iobitmap; -+ mcl->args[0] = (unsigned long)pdo++; -+#endif -+ mcl++; -+ } -+ -+#if CONFIG_XEN_COMPAT <= 0x030002 -+ BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo)); -+#endif -+ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl)); -+ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL))) -+ BUG(); -+ -+ /* -+ * Restore %fs and %gs if needed. -+ * -+ * Glibc normally makes %fs be zero, and %gs is one of -+ * the TLS segments. -+ */ -+ if (unlikely(next->fs)) -+ loadsegment(fs, next->fs); -+ -+ if (next->gs) -+ loadsegment(gs, next->gs); -+ -+ /* -+ * Now maybe handle debug registers -+ */ -+ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) -+ __switch_to_xtra(next_p); -+ -+ disable_tsc(prev_p, next_p); -+ -+ return prev_p; -+} -+ -+asmlinkage int sys_fork(struct pt_regs regs) -+{ -+ return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL); -+} -+ -+asmlinkage int sys_clone(struct pt_regs regs) -+{ -+ unsigned long clone_flags; -+ unsigned long newsp; -+ int __user *parent_tidptr, *child_tidptr; -+ -+ clone_flags = regs.ebx; -+ newsp = regs.ecx; -+ parent_tidptr = (int __user *)regs.edx; -+ child_tidptr = (int __user *)regs.edi; -+ if (!newsp) -+ newsp = regs.esp; -+ return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); -+} -+ -+/* -+ * This is trivial, and on the face of it looks like it -+ * could equally well be done in user mode. -+ * -+ * Not so, for quite unobvious reasons - register pressure. -+ * In user mode vfork() cannot have a stack frame, and if -+ * done by calling the "clone()" system call directly, you -+ * do not have enough call-clobbered registers to hold all -+ * the information you need. -+ */ -+asmlinkage int sys_vfork(struct pt_regs regs) -+{ -+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL); -+} -+ -+/* -+ * sys_execve() executes a new program. -+ */ -+asmlinkage int sys_execve(struct pt_regs regs) -+{ -+ int error; -+ char * filename; -+ -+ filename = getname((char __user *) regs.ebx); -+ error = PTR_ERR(filename); -+ if (IS_ERR(filename)) -+ goto out; -+ error = do_execve(filename, -+ (char __user * __user *) regs.ecx, -+ (char __user * __user *) regs.edx, -+ ®s); -+ if (error == 0) { -+ task_lock(current); -+ current->ptrace &= ~PT_DTRACE; -+ task_unlock(current); -+ /* Make sure we don't return using sysenter.. */ -+ set_thread_flag(TIF_IRET); -+ } -+ putname(filename); -+out: -+ return error; -+} -+ -+#define top_esp (THREAD_SIZE - sizeof(unsigned long)) -+#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) -+ -+unsigned long get_wchan(struct task_struct *p) -+{ -+ unsigned long ebp, esp, eip; -+ unsigned long stack_page; -+ int count = 0; -+ if (!p || p == current || p->state == TASK_RUNNING) -+ return 0; -+ stack_page = (unsigned long)task_stack_page(p); -+ esp = p->thread.esp; -+ if (!stack_page || esp < stack_page || esp > top_esp+stack_page) -+ return 0; -+ /* include/asm-i386/system.h:switch_to() pushes ebp last. */ -+ ebp = *(unsigned long *) esp; -+ do { -+ if (ebp < stack_page || ebp > top_ebp+stack_page) -+ return 0; -+ eip = *(unsigned long *) (ebp+4); -+ if (!in_sched_functions(eip)) -+ return eip; -+ ebp = *(unsigned long *) ebp; -+ } while (count++ < 16); -+ return 0; -+} -+ -+/* -+ * sys_alloc_thread_area: get a yet unused TLS descriptor index. -+ */ -+static int get_free_idx(void) -+{ -+ struct thread_struct *t = ¤t->thread; -+ int idx; -+ -+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) -+ if (desc_empty(t->tls_array + idx)) -+ return idx + GDT_ENTRY_TLS_MIN; -+ return -ESRCH; -+} -+ -+/* -+ * Set a given TLS descriptor: -+ */ -+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info) -+{ -+ struct thread_struct *t = ¤t->thread; -+ struct user_desc info; -+ struct desc_struct *desc; -+ int cpu, idx; -+ -+ if (copy_from_user(&info, u_info, sizeof(info))) -+ return -EFAULT; -+ idx = info.entry_number; -+ -+ /* -+ * index -1 means the kernel should try to find and -+ * allocate an empty descriptor: -+ */ -+ if (idx == -1) { -+ idx = get_free_idx(); -+ if (idx < 0) -+ return idx; -+ if (put_user(idx, &u_info->entry_number)) -+ return -EFAULT; -+ } -+ -+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) -+ return -EINVAL; -+ -+ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN; -+ -+ /* -+ * We must not get preempted while modifying the TLS. -+ */ -+ cpu = get_cpu(); -+ -+ if (LDT_empty(&info)) { -+ desc->a = 0; -+ desc->b = 0; -+ } else { -+ desc->a = LDT_entry_a(&info); -+ desc->b = LDT_entry_b(&info); -+ } -+ load_TLS(t, cpu); -+ -+ put_cpu(); -+ -+ return 0; -+} -+ -+/* -+ * Get the current Thread-Local Storage area: -+ */ -+ -+#define GET_BASE(desc) ( \ -+ (((desc)->a >> 16) & 0x0000ffff) | \ -+ (((desc)->b << 16) & 0x00ff0000) | \ -+ ( (desc)->b & 0xff000000) ) -+ -+#define GET_LIMIT(desc) ( \ -+ ((desc)->a & 0x0ffff) | \ -+ ((desc)->b & 0xf0000) ) -+ -+#define GET_32BIT(desc) (((desc)->b >> 22) & 1) -+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) -+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) -+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) -+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1) -+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1) -+ -+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) -+{ -+ struct user_desc info; -+ struct desc_struct *desc; -+ int idx; -+ -+ if (get_user(idx, &u_info->entry_number)) -+ return -EFAULT; -+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) -+ return -EINVAL; -+ -+ memset(&info, 0, sizeof(info)); -+ -+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; -+ -+ info.entry_number = idx; -+ info.base_addr = GET_BASE(desc); -+ info.limit = GET_LIMIT(desc); -+ info.seg_32bit = GET_32BIT(desc); -+ info.contents = GET_CONTENTS(desc); -+ info.read_exec_only = !GET_WRITABLE(desc); -+ info.limit_in_pages = GET_LIMIT_PAGES(desc); -+ info.seg_not_present = !GET_PRESENT(desc); -+ info.useable = GET_USEABLE(desc); -+ -+ if (copy_to_user(u_info, &info, sizeof(info))) -+ return -EFAULT; -+ return 0; -+} -+ -+unsigned long arch_align_stack(unsigned long sp) -+{ -+ if (randomize_va_space) -+ sp -= get_random_int() % 8192; -+ return sp & ~0xf; -+} ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ b/arch/x86/kernel/setup_32-xen.c 2008-04-22 15:41:51.000000000 +0200 -@@ -0,0 +1,1919 @@ -+/* -+ * linux/arch/i386/kernel/setup.c -+ * -+ * Copyright (C) 1995 Linus Torvalds -+ * -+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 -+ * -+ * Memory region support -+ * David Parsons , July-August 1999 -+ * -+ * Added E820 sanitization routine (removes overlapping memory regions); -+ * Brian Moyle , February 2001 -+ * -+ * Moved CPU detection code to cpu/${cpu}.c -+ * Patrick Mochel , March 2002 -+ * -+ * Provisions for empty E820 memory regions (reported by certain BIOSes). -+ * Alex Achenbach , December 2002. -+ * -+ */ -+ -+/* -+ * This file handles the architecture-dependent parts of initialization -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include